From ba0861e384341fea535281328b05d0438f160494 Mon Sep 17 00:00:00 2001 From: Yutong Dai Date: Thu, 22 Aug 2024 00:04:54 +0000 Subject: [PATCH] the difference is from resize --- examples/CMakeLists.txt | 1 + examples/xgenmm/CMakeLists.txt | 51 + examples/xgenmm/clip.cpp | 2618 +++++++++++++++++ examples/xgenmm/clip.h | 98 + examples/xgenmm/debug.py | 15 + examples/xgenmm/imgs/image-1d100e9-1.jpg | Bin 0 -> 53303 bytes examples/xgenmm/imgs/image-1d100e9.jpg | Bin 0 -> 64176 bytes ...model_breakdown.ipynb => playground.ipynb} | 237 ++ examples/xgenmm/test_anyres_img.cpp | 530 ++++ examples/xgenmm/xgenmm.cpp | 597 ++++ examples/xgenmm/xgenmm.h | 53 + 11 files changed, 4200 insertions(+) create mode 100644 examples/xgenmm/CMakeLists.txt create mode 100644 examples/xgenmm/clip.cpp create mode 100644 examples/xgenmm/clip.h create mode 100644 examples/xgenmm/debug.py create mode 100644 examples/xgenmm/imgs/image-1d100e9-1.jpg create mode 100644 examples/xgenmm/imgs/image-1d100e9.jpg rename examples/xgenmm/{model_breakdown.ipynb => playground.ipynb} (80%) create mode 100644 examples/xgenmm/test_anyres_img.cpp create mode 100644 examples/xgenmm/xgenmm.cpp create mode 100644 examples/xgenmm/xgenmm.h diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 67b3d2774..f3d30c625 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -39,6 +39,7 @@ else() add_subdirectory(quantize-stats) add_subdirectory(quantize) add_subdirectory(retrieval) + add_subdirectory(xgenmm) if (GGML_RPC) add_subdirectory(rpc) endif() diff --git a/examples/xgenmm/CMakeLists.txt b/examples/xgenmm/CMakeLists.txt new file mode 100644 index 000000000..40b745fb5 --- /dev/null +++ b/examples/xgenmm/CMakeLists.txt @@ -0,0 +1,51 @@ +add_library(xgenmm OBJECT + xgenmm.cpp + xgenmm.h + clip.cpp + clip.h + ) + +target_link_libraries(xgenmm PRIVATE ggml llama ${CMAKE_THREAD_LIBS_INIT}) + +target_include_directories(xgenmm PUBLIC .) +target_include_directories(xgenmm PUBLIC ../..) +target_include_directories(xgenmm PUBLIC ../../common) + +target_compile_features(xgenmm PRIVATE cxx_std_11) + +add_library(xgenmm_static STATIC $) +if (BUILD_SHARED_LIBS) + set_target_properties(xgenmm PROPERTIES POSITION_INDEPENDENT_CODE ON) + target_compile_definitions(xgenmm PRIVATE LLAMA_SHARED LLAMA_BUILD) + add_library(xgenmm_shared SHARED $) + target_link_libraries(xgenmm_shared PRIVATE ggml llama ${CMAKE_THREAD_LIBS_INIT}) + install(TARGETS xgenmm_shared LIBRARY) +endif() + +if (NOT MSVC) + target_compile_options(xgenmm PRIVATE -Wno-cast-qual) # stb_image.h +endif() + +if(TARGET BUILD_INFO) + add_dependencies(xgenmm BUILD_INFO) +endif() + + +set(TARGET test_anyres_img) +add_executable(test_anyres_img test_anyres_img.cpp) +install(TARGETS test_anyres_img RUNTIME) +target_link_libraries(test_anyres_img PRIVATE common xgenmm ${CMAKE_THREAD_LIBS_INIT}) +target_compile_features(xgenmm PRIVATE cxx_std_11) + + +# not implemented yet +# set(TARGET xgenmm-cli) +# add_executable(xgenmm-cli xgenmm-cli.cpp) +# install(TARGETS xgenmm-cli RUNTIME) +# target_link_libraries(xgenmm-cli PRIVATE common xgenmm_io xgenmm ${CMAKE_THREAD_LIBS_INIT}) +# target_compile_features(xgenmm PRIVATE cxx_std_11) + +# add_library(xgenmm_io OBJECT +# xgenmm_io.cpp +# ) +# target_link_libraries(xgenmm_io PRIVATE xgenmm ${CMAKE_THREAD_LIBS_INIT}) \ No newline at end of file diff --git a/examples/xgenmm/clip.cpp b/examples/xgenmm/clip.cpp new file mode 100644 index 000000000..1afc3e316 --- /dev/null +++ b/examples/xgenmm/clip.cpp @@ -0,0 +1,2618 @@ +/* +08/18/2024 - Yutong - The file is adpated from examples/llava/llava.h in the llama.cpp repository. +*/ + +// NOTE: This is modified from clip.cpp only for LLaVA, +// so there might be still unnecessary artifacts hanging around +// I'll gradually clean and extend it +// Note: Even when using identical normalized image inputs (see normalize_image_u8_to_f32()) we have a significant difference in resulting embeddings compared to pytorch +#include "clip.h" +#include "log.h" +#include "ggml.h" +#include "ggml-alloc.h" +#include "ggml-backend.h" + +#ifdef GGML_USE_CUDA +#include "ggml-cuda.h" +#endif + +#ifdef GGML_USE_METAL +#include "ggml-metal.h" +#endif + +#ifdef GGML_USE_CANN +#include "ggml-cann.h" +#endif + +#define STB_IMAGE_IMPLEMENTATION +#include "stb_image.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +//#define CLIP_DEBUG_FUNCTIONS + +// RGB uint8 image +struct clip_image_u8 { + int nx; + int ny; + + std::vector buf; +}; + +// RGB float32 image (NHWC) +// Memory layout: RGBRGBRGB... +struct clip_image_f32 { + int nx; + int ny; + + std::vector buf; +}; + +static std::string format(const char * fmt, ...) { + va_list ap; + va_list ap2; + va_start(ap, fmt); + va_copy(ap2, ap); + int size = vsnprintf(NULL, 0, fmt, ap); + GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT + std::vector buf(size + 1); + int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2); + GGML_ASSERT(size2 == size); + va_end(ap2); + va_end(ap); + return std::string(buf.data(), buf.size()); +} + +// +// key constants +// + +#define KEY_FTYPE "general.file_type" +#define KEY_NAME "general.name" +#define KEY_DESCRIPTION "general.description" +#define KEY_HAS_TEXT_ENC "clip.has_text_encoder" +#define KEY_HAS_VIS_ENC "clip.has_vision_encoder" +#define KEY_HAS_LLAVA_PROJ "clip.has_llava_projector" +#define KEY_HAS_MINICPMV_PROJ "clip.has_minicpmv_projector" +#define KEY_MINICPMV_VERSION "clip.minicpmv_version" +#define KEY_USE_GELU "clip.use_gelu" +#define KEY_N_EMBD "clip.%s.embedding_length" +#define KEY_N_FF "clip.%s.feed_forward_length" +#define KEY_N_BLOCK "clip.%s.block_count" +#define KEY_N_HEAD "clip.%s.attention.head_count" +#define KEY_LAYER_NORM_EPS "clip.%s.attention.layer_norm_epsilon" +#define KEY_PROJ_DIM "clip.%s.projection_dim" +#define KEY_TOKENS "tokenizer.ggml.tokens" +#define KEY_N_POSITIONS "clip.text.context_length" +#define KEY_IMAGE_SIZE "clip.vision.image_size" +#define KEY_PATCH_SIZE "clip.vision.patch_size" +#define KEY_IMAGE_MEAN "clip.vision.image_mean" +#define KEY_IMAGE_STD "clip.vision.image_std" +#define KEY_PROJ_TYPE "clip.projector_type" + +#define KEY_MM_PATCH_MERGE_TYPE "clip.vision.mm_patch_merge_type" +#define KEY_IMAGE_GRID_PINPOINTS "clip.vision.image_grid_pinpoints" +#define KEY_IMAGE_CROP_RESOLUTION "clip.vision.image_crop_resolution" + + +// +// tensor name constants +// + +#define TN_TOKEN_EMBD "%s.token_embd.weight" +#define TN_POS_EMBD "%s.position_embd.weight" +#define TN_CLASS_EMBD "v.class_embd" +#define TN_PATCH_EMBD "v.patch_embd.weight" +#define TN_PATCH_BIAS "v.patch_embd.bias" +#define TN_ATTN_K "%s.blk.%d.attn_k.%s" +#define TN_ATTN_Q "%s.blk.%d.attn_q.%s" +#define TN_ATTN_V "%s.blk.%d.attn_v.%s" +#define TN_ATTN_OUTPUT "%s.blk.%d.attn_out.%s" +#define TN_FFN_DOWN "%s.blk.%d.ffn_down.%s" +#define TN_FFN_UP "%s.blk.%d.ffn_up.%s" +#define TN_LN_1 "%s.blk.%d.ln1.%s" +#define TN_LN_2 "%s.blk.%d.ln2.%s" +#define TN_LN_PRE "%s.pre_ln.%s" +#define TN_LN_POST "%s.post_ln.%s" +#define TN_TEXT_PROJ "text_projection.weight" +#define TN_VIS_PROJ "visual_projection.weight" +#define TN_LLAVA_PROJ "mm.%d.%s" +#define TN_MVLM_PROJ_MLP "mm.model.mlp.%d.%s" +#define TN_MVLM_PROJ_BLOCK "mm.model.mb_block.%d.block.%d.%s" +#define TN_MVLM_PROJ_PEG "mm.model.peg.%d.%s" +#define TN_IMAGE_NEWLINE "model.image_newline" + +#define TN_MINICPMV_POS_EMBD_K "resampler.pos_embed_k" +#define TN_MINICPMV_QUERY "resampler.query" +#define TN_MINICPMV_PROJ "resampler.proj.weight" +#define TN_MINICPMV_KV_PROJ "resampler.kv.weight" +#define TN_MINICPMV_ATTN "resampler.attn.%s.%s" +#define TN_MINICPMV_LN "resampler.ln_%s.%s" + + +enum projector_type { + PROJECTOR_TYPE_MLP, + PROJECTOR_TYPE_MLP_NORM, + PROJECTOR_TYPE_LDP, + PROJECTOR_TYPE_LDPV2, + PROJECTOR_TYPE_RESAMPLER, + PROJECTOR_TYPE_UNKNOWN, +}; + +static std::map PROJECTOR_TYPE_NAMES = { + { PROJECTOR_TYPE_MLP, "mlp" }, + { PROJECTOR_TYPE_LDP, "ldp" }, + { PROJECTOR_TYPE_LDPV2, "ldpv2"}, + { PROJECTOR_TYPE_RESAMPLER, "resampler"}, +}; + + +// +// utilities to get data from a gguf file +// + +static int get_key_idx(const gguf_context * ctx, const char * key) { + int i = gguf_find_key(ctx, key); + if (i == -1) { + LOG_TEE("key %s not found in file\n", key); + throw std::runtime_error(format("Missing required key: %s", key)); + } + + return i; +} + +static uint32_t get_u32(const gguf_context * ctx, const std::string & key) { + const int i = get_key_idx(ctx, key.c_str()); + + return gguf_get_val_u32(ctx, i); +} + +static float get_f32(const gguf_context * ctx, const std::string & key) { + const int i = get_key_idx(ctx, key.c_str()); + + return gguf_get_val_f32(ctx, i); +} + +static struct ggml_tensor * get_tensor(struct ggml_context * ctx, const std::string & name) { + struct ggml_tensor * cur = ggml_get_tensor(ctx, name.c_str()); + if (!cur) { + throw std::runtime_error(format("%s: unable to find tensor %s\n", __func__, name.c_str())); + } + + return cur; +} + +static std::string get_ftype(int ftype) { + return ggml_type_name(static_cast(ftype)); +} + +static std::string gguf_data_to_str(enum gguf_type type, const void * data, int i) { + switch (type) { + case GGUF_TYPE_UINT8: return std::to_string(((const uint8_t *)data)[i]); + case GGUF_TYPE_INT8: return std::to_string(((const int8_t *)data)[i]); + case GGUF_TYPE_UINT16: return std::to_string(((const uint16_t *)data)[i]); + case GGUF_TYPE_INT16: return std::to_string(((const int16_t *)data)[i]); + case GGUF_TYPE_UINT32: return std::to_string(((const uint32_t *)data)[i]); + case GGUF_TYPE_INT32: return std::to_string(((const int32_t *)data)[i]); + case GGUF_TYPE_UINT64: return std::to_string(((const uint64_t *)data)[i]); + case GGUF_TYPE_INT64: return std::to_string(((const int64_t *)data)[i]); + case GGUF_TYPE_FLOAT32: return std::to_string(((const float *)data)[i]); + case GGUF_TYPE_FLOAT64: return std::to_string(((const double *)data)[i]); + case GGUF_TYPE_BOOL: return ((const bool *)data)[i] ? "true" : "false"; + default: return format("unknown type %d", type); + } +} + +static void replace_all(std::string & s, const std::string & search, const std::string & replace) { + if (search.empty()) { + return; // Avoid infinite loop if 'search' is an empty string + } + size_t pos = 0; + while ((pos = s.find(search, pos)) != std::string::npos) { + s.replace(pos, search.length(), replace); + pos += replace.length(); + } +} + +static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) { + const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i); + + switch (type) { + case GGUF_TYPE_STRING: + return gguf_get_val_str(ctx_gguf, i); + case GGUF_TYPE_ARRAY: + { + const enum gguf_type arr_type = gguf_get_arr_type(ctx_gguf, i); + int arr_n = gguf_get_arr_n(ctx_gguf, i); + const void * data = gguf_get_arr_data(ctx_gguf, i); + std::stringstream ss; + ss << "["; + for (int j = 0; j < arr_n; j++) { + if (arr_type == GGUF_TYPE_STRING) { + std::string val = gguf_get_arr_str(ctx_gguf, i, j); + // escape quotes + replace_all(val, "\\", "\\\\"); + replace_all(val, "\"", "\\\""); + ss << '"' << val << '"'; + } else if (arr_type == GGUF_TYPE_ARRAY) { + ss << "???"; + } else { + ss << gguf_data_to_str(arr_type, data, j); + } + if (j < arr_n - 1) { + ss << ", "; + } + } + ss << "]"; + return ss.str(); + } + default: + return gguf_data_to_str(type, gguf_get_val_data(ctx_gguf, i), 0); + } +} + +static void print_tensor_info(const ggml_tensor * tensor, const char * prefix = "") { + size_t tensor_size = ggml_nbytes(tensor); + LOG_TEE("%s: n_dims = %d, name = %s, tensor_size=%zu, shape:[%" PRId64 ", %" PRId64 ", %" PRId64 ", %" PRId64 "], type = %s\n", + prefix, ggml_n_dims(tensor), tensor->name, tensor_size, + tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3], ggml_type_name(tensor->type)); +} + +static projector_type clip_projector_type_from_string(const std::string & name) { + for (const auto & kv : PROJECTOR_TYPE_NAMES) { // NOLINT + if (kv.second == name) { + return kv.first; + } + } + return PROJECTOR_TYPE_UNKNOWN; +} + +#ifdef CLIP_DEBUG_FUNCTIONS +static void clip_image_write_image_to_ppm(const clip_image_u8& img, const std::string& filename) { + std::ofstream file(filename, std::ios::binary); + if (!file.is_open()) { + LOG_TEE("Failed to open file for writing: %s\n", filename.c_str()); + return; + } + + // PPM header: P6 format, width, height, and max color value + file << "P6\n" << img.nx << " " << img.ny << "\n255\n"; + + // Write pixel data + for (size_t i = 0; i < img.buf.size(); i += 3) { + // PPM expects binary data in RGB format, which matches our image buffer + file.write(reinterpret_cast(&img.buf[i]), 3); + } + + file.close(); +} + +static void clip_image_save_to_bmp(const clip_image_u8& img, const std::string& filename) { + std::ofstream file(filename, std::ios::binary); + if (!file.is_open()) { + LOG_TEE("Failed to open file for writing: %s\n", filename.c_str()); + return; + } + + int fileSize = 54 + 3 * img.nx * img.ny; // File header + info header + pixel data + int bytesPerPixel = 3; + int widthInBytes = img.nx * bytesPerPixel; + int paddingAmount = (4 - (widthInBytes % 4)) % 4; + int stride = widthInBytes + paddingAmount; + + // Bitmap file header + unsigned char fileHeader[14] = { + 'B','M', // Signature + 0,0,0,0, // Image file size in bytes + 0,0,0,0, // Reserved + 54,0,0,0 // Start of pixel array + }; + + // Total file size + fileSize = 54 + (stride * img.ny); + fileHeader[2] = (unsigned char)(fileSize); + fileHeader[3] = (unsigned char)(fileSize >> 8); + fileHeader[4] = (unsigned char)(fileSize >> 16); + fileHeader[5] = (unsigned char)(fileSize >> 24); + + // Bitmap information header (BITMAPINFOHEADER) + unsigned char infoHeader[40] = { + 40,0,0,0, // Size of this header (40 bytes) + 0,0,0,0, // Image width + 0,0,0,0, // Image height + 1,0, // Number of color planes + 24,0, // Bits per pixel + 0,0,0,0, // No compression + 0,0,0,0, // Image size (can be 0 for no compression) + 0,0,0,0, // X pixels per meter (not specified) + 0,0,0,0, // Y pixels per meter (not specified) + 0,0,0,0, // Total colors (color table not used) + 0,0,0,0 // Important colors (all are important) + }; + + // Width and height in the information header + infoHeader[4] = (unsigned char)(img.nx); + infoHeader[5] = (unsigned char)(img.nx >> 8); + infoHeader[6] = (unsigned char)(img.nx >> 16); + infoHeader[7] = (unsigned char)(img.nx >> 24); + infoHeader[8] = (unsigned char)(img.ny); + infoHeader[9] = (unsigned char)(img.ny >> 8); + infoHeader[10] = (unsigned char)(img.ny >> 16); + infoHeader[11] = (unsigned char)(img.ny >> 24); + + // Write file headers + file.write(reinterpret_cast(fileHeader), sizeof(fileHeader)); + file.write(reinterpret_cast(infoHeader), sizeof(infoHeader)); + + // Pixel data + std::vector padding(3, 0); // Max padding size to be added to each row + for (int y = img.ny - 1; y >= 0; --y) { // BMP files are stored bottom-to-top + for (int x = 0; x < img.nx; ++x) { + // Each pixel + size_t pixelIndex = (y * img.nx + x) * 3; + unsigned char pixel[3] = { + img.buf[pixelIndex + 2], // BMP stores pixels in BGR format + img.buf[pixelIndex + 1], + img.buf[pixelIndex] + }; + file.write(reinterpret_cast(pixel), 3); + } + // Write padding for the row + file.write(reinterpret_cast(padding.data()), paddingAmount); + } + + file.close(); +} + +// debug function to convert f32 to u8 +static void clip_image_convert_f32_to_u8(const clip_image_f32& src, clip_image_u8& dst) { + dst.nx = src.nx; + dst.ny = src.ny; + dst.buf.resize(3 * src.nx * src.ny); + for (size_t i = 0; i < src.buf.size(); ++i) { + dst.buf[i] = static_cast(std::min(std::max(int(src.buf[i] * 255.0f), 0), 255)); + } +} +#endif + + +// +// clip layers +// + +struct clip_hparams { + int32_t image_size; + int32_t patch_size; + int32_t hidden_size; + int32_t n_intermediate; + int32_t projection_dim; + int32_t n_head; + int32_t n_layer; + + float eps; + + char mm_patch_merge_type[32] = "flat"; // spatial_unpad or flat (default) + + int32_t image_grid_pinpoints[32]; + int32_t image_crop_resolution; +}; + +struct clip_layer { + // attention + struct ggml_tensor * k_w; + struct ggml_tensor * k_b; + struct ggml_tensor * q_w; + struct ggml_tensor * q_b; + struct ggml_tensor * v_w; + struct ggml_tensor * v_b; + + struct ggml_tensor * o_w; + struct ggml_tensor * o_b; + + // layernorm 1 + struct ggml_tensor * ln_1_w; + struct ggml_tensor * ln_1_b; + + // ff + struct ggml_tensor * ff_i_w; + struct ggml_tensor * ff_i_b; + + struct ggml_tensor * ff_o_w; + struct ggml_tensor * ff_o_b; + + // layernorm 2 + struct ggml_tensor * ln_2_w; + struct ggml_tensor * ln_2_b; +}; + +struct clip_vision_model { + struct clip_hparams hparams; + + // embeddings + struct ggml_tensor * class_embedding; + struct ggml_tensor * patch_embeddings; + struct ggml_tensor * patch_bias; + struct ggml_tensor * position_embeddings; + + struct ggml_tensor * pre_ln_w; + struct ggml_tensor * pre_ln_b; + + std::vector layers; + + struct ggml_tensor * post_ln_w; + struct ggml_tensor * post_ln_b; + + struct ggml_tensor * projection; + + // LLaVA projection + struct ggml_tensor * mm_0_w = NULL; + struct ggml_tensor * mm_0_b = NULL; + struct ggml_tensor * mm_2_w = NULL; + struct ggml_tensor * mm_2_b = NULL; + + struct ggml_tensor * image_newline = NULL; + + // Yi type models with mlp+normalization projection + struct ggml_tensor * mm_1_w = NULL; // Yi type models have 0, 1, 3, 4 + struct ggml_tensor * mm_1_b = NULL; + struct ggml_tensor * mm_3_w = NULL; + struct ggml_tensor * mm_3_b = NULL; + struct ggml_tensor * mm_4_w = NULL; + struct ggml_tensor * mm_4_b = NULL; + + // MobileVLM projection + struct ggml_tensor * mm_model_mlp_1_w; + struct ggml_tensor * mm_model_mlp_1_b; + struct ggml_tensor * mm_model_mlp_3_w; + struct ggml_tensor * mm_model_mlp_3_b; + struct ggml_tensor * mm_model_block_1_block_0_0_w; + struct ggml_tensor * mm_model_block_1_block_0_1_w; + struct ggml_tensor * mm_model_block_1_block_0_1_b; + struct ggml_tensor * mm_model_block_1_block_1_fc1_w; + struct ggml_tensor * mm_model_block_1_block_1_fc1_b; + struct ggml_tensor * mm_model_block_1_block_1_fc2_w; + struct ggml_tensor * mm_model_block_1_block_1_fc2_b; + struct ggml_tensor * mm_model_block_1_block_2_0_w; + struct ggml_tensor * mm_model_block_1_block_2_1_w; + struct ggml_tensor * mm_model_block_1_block_2_1_b; + struct ggml_tensor * mm_model_block_2_block_0_0_w; + struct ggml_tensor * mm_model_block_2_block_0_1_w; + struct ggml_tensor * mm_model_block_2_block_0_1_b; + struct ggml_tensor * mm_model_block_2_block_1_fc1_w; + struct ggml_tensor * mm_model_block_2_block_1_fc1_b; + struct ggml_tensor * mm_model_block_2_block_1_fc2_w; + struct ggml_tensor * mm_model_block_2_block_1_fc2_b; + struct ggml_tensor * mm_model_block_2_block_2_0_w; + struct ggml_tensor * mm_model_block_2_block_2_1_w; + struct ggml_tensor * mm_model_block_2_block_2_1_b; + + // MobileVLM_V2 projection + struct ggml_tensor * mm_model_mlp_0_w; + struct ggml_tensor * mm_model_mlp_0_b; + struct ggml_tensor * mm_model_mlp_2_w; + struct ggml_tensor * mm_model_mlp_2_b; + struct ggml_tensor * mm_model_peg_0_w; + struct ggml_tensor * mm_model_peg_0_b; + + // MINICPMV projection + struct ggml_tensor * mm_model_pos_embed_k; + struct ggml_tensor * mm_model_query; + struct ggml_tensor * mm_model_proj; + struct ggml_tensor * mm_model_kv_proj; + struct ggml_tensor * mm_model_attn_q_w; + struct ggml_tensor * mm_model_attn_q_b; + struct ggml_tensor * mm_model_attn_k_w; + struct ggml_tensor * mm_model_attn_k_b; + struct ggml_tensor * mm_model_attn_v_w; + struct ggml_tensor * mm_model_attn_v_b; + struct ggml_tensor * mm_model_attn_o_w; + struct ggml_tensor * mm_model_attn_o_b; + struct ggml_tensor * mm_model_ln_q_w; + struct ggml_tensor * mm_model_ln_q_b; + struct ggml_tensor * mm_model_ln_kv_w; + struct ggml_tensor * mm_model_ln_kv_b; + struct ggml_tensor * mm_model_ln_post_w; + struct ggml_tensor * mm_model_ln_post_b; +}; + +struct clip_ctx { + bool has_text_encoder = false; + bool has_vision_encoder = false; + bool has_llava_projector = false; + bool has_minicpmv_projector = false; + int minicpmv_version = 2; + + struct clip_vision_model vision_model; + projector_type proj_type = PROJECTOR_TYPE_MLP; + + float image_mean[3]; + float image_std[3]; + bool use_gelu = false; + int32_t ftype = 1; + + bool has_class_embedding = true; + bool has_pre_norm = true; + bool has_post_norm = false; + bool has_patch_bias = false; + + struct gguf_context * ctx_gguf; + struct ggml_context * ctx_data; + + std::vector buf_compute_meta; + + // memory buffers to evaluate the model + ggml_backend_buffer_t params_buffer = NULL; + + ggml_backend_t backend = NULL; + ggml_gallocr_t compute_alloc = NULL; + + struct clip_image_size * load_image_size; +}; + +static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32_batch * imgs, struct clip_image_size * load_image_size, bool is_inf = false) { + if (!ctx->has_vision_encoder) { + LOG_TEE("This gguf file seems to have no vision encoder\n"); + return nullptr; + } + + const auto & model = ctx->vision_model; + const auto & hparams = model.hparams; + + const int image_size = hparams.image_size; + int image_size_width = image_size; + int image_size_height = image_size; + if (ctx->has_minicpmv_projector) { + if (load_image_size == nullptr) { + load_image_size = clip_image_size_init(); + } + LOG_TEE("%s: %d %d\n", __func__, load_image_size->width, load_image_size->height); + image_size_width = load_image_size->width; + image_size_height = load_image_size->height; + if (is_inf) { + image_size_width = imgs->data->nx; + image_size_height = imgs->data->ny; + } + } + const int patch_size = hparams.patch_size; + const int num_patches = ((image_size_width / patch_size) * (image_size_height / patch_size)); + const int num_positions = num_patches + (ctx->has_class_embedding ? 1 : 0); + const int hidden_size = hparams.hidden_size; + const int n_head = hparams.n_head; + const int d_head = hidden_size / n_head; + int n_layer = hparams.n_layer; + const float eps = hparams.eps; + + const int batch_size = imgs->size; + + if (ctx->has_llava_projector || ctx->has_minicpmv_projector) { + GGML_ASSERT(batch_size == 1); + } + + struct ggml_init_params params = { + /*.mem_size =*/ ctx->buf_compute_meta.size(), + /*.mem_buffer =*/ ctx->buf_compute_meta.data(), + /*.no_alloc =*/ true, + }; + + struct ggml_context * ctx0 = ggml_init(params); + struct ggml_cgraph * gf = ggml_new_graph(ctx0); + + struct ggml_tensor * inp_raw = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, image_size_width, image_size_height, 3, batch_size); + ggml_set_name(inp_raw, "inp_raw"); + ggml_set_input(inp_raw); + + struct ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings, inp_raw, patch_size, patch_size, 0, 0, 1, 1); + + inp = ggml_reshape_3d(ctx0, inp, num_patches, hidden_size, batch_size); + inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 1, 0, 2, 3)); + + if (ctx->has_patch_bias) { + // inp = ggml_add(ctx0, inp, ggml_repeat(ctx0, model.patch_bias, inp)); + inp = ggml_add(ctx0, inp, model.patch_bias); + } + struct ggml_tensor * embeddings = inp; + struct ggml_tensor * pos_embed = nullptr; + + if (ctx->has_llava_projector) { + // concat class_embeddings and patch_embeddings + if (ctx->has_class_embedding) { + embeddings = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, hidden_size, num_positions, batch_size); + ggml_set_name(embeddings, "embeddings"); + ggml_set_input(embeddings); + embeddings = ggml_acc(ctx0, embeddings, model.class_embedding, + embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], 0); + embeddings = ggml_acc(ctx0, embeddings, inp, + embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], model.class_embedding->nb[1]); + } + } + + struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_positions); + ggml_set_name(positions, "positions"); + ggml_set_input(positions); + + embeddings = + ggml_add(ctx0, embeddings, ggml_get_rows(ctx0, model.position_embeddings, positions)); + + if (ctx->has_minicpmv_projector) { + int pos_w = image_size_width/patch_size; + int pos_h = image_size_height/patch_size; + if (ctx->minicpmv_version == 2) { + pos_embed = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, 4096, pos_w * pos_h, 1); + } + else if (ctx->minicpmv_version == 3) { + pos_embed = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, 3584, pos_w * pos_h, 1); + } + ggml_set_name(pos_embed, "pos_embed"); + ggml_set_input(pos_embed); + } + + // pre-layernorm + if (ctx->has_pre_norm) { + embeddings = ggml_norm(ctx0, embeddings, eps); + ggml_set_name(embeddings, "pre_ln"); + + embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.pre_ln_w), model.pre_ln_b); + } + + // loop over layers + if (ctx->has_minicpmv_projector) { + n_layer += 1; + } + for (int il = 0; il < n_layer - 1; il++) { + struct ggml_tensor * cur = embeddings; // embeddings = residual, cur = hidden_states + + //const size_t nb_q_w = model.layers[il].q_w->nb[0]; + + // layernorm1 + { + cur = ggml_norm(ctx0, cur, eps); + + cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_1_w), + model.layers[il].ln_1_b); + } + + // self-attention + { + + struct ggml_tensor * Q = + ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].q_w, cur), model.layers[il].q_b); + + Q = ggml_scale_inplace(ctx0, Q, 1.0f / sqrt((float)d_head)); + Q = ggml_reshape_4d(ctx0, Q, d_head, n_head, num_positions, batch_size); + Q = ggml_cont(ctx0, ggml_permute(ctx0, Q, 0, 2, 1, 3)); + Q = ggml_reshape_3d(ctx0, Q, d_head, num_positions, n_head * batch_size); + + struct ggml_tensor * K = + ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].k_w, cur), model.layers[il].k_b); + + K = ggml_reshape_4d(ctx0, K, d_head, n_head, num_positions, batch_size); + K = ggml_cont(ctx0, ggml_permute(ctx0, K, 0, 2, 1, 3)); + K = ggml_reshape_3d(ctx0, K, d_head, num_positions, n_head * batch_size); + + struct ggml_tensor * V = + ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].v_w, cur), model.layers[il].v_b); + + V = ggml_reshape_4d(ctx0, V, d_head, n_head, num_positions, batch_size); + V = ggml_cont(ctx0, ggml_permute(ctx0, V, 1, 2, 0, 3)); + V = ggml_reshape_3d(ctx0, V, num_positions, d_head, n_head * batch_size); + + struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); + KQ = ggml_soft_max_inplace(ctx0, KQ); + struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ); + KQV = ggml_reshape_4d(ctx0, KQV, d_head, num_positions, n_head, batch_size); + KQV = ggml_permute(ctx0, KQV, 0, 2, 1, 3); + + cur = ggml_cont_3d(ctx0, KQV, hidden_size, num_positions, batch_size); + } + + // attention output + cur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].o_w, cur), model.layers[il].o_b); + + // re-add the layer input, e.g., residual + cur = ggml_add(ctx0, cur, embeddings); + + embeddings = cur; // embeddings = residual, cur = hidden_states + + // layernorm2 + { + cur = ggml_norm(ctx0, cur, eps); + + cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_2_w), model.layers[il].ln_2_b); + } + + cur = ggml_mul_mat(ctx0, model.layers[il].ff_i_w, cur); + cur = ggml_add(ctx0, cur, model.layers[il].ff_i_b); + + if (ctx->use_gelu) { + cur = ggml_gelu_inplace(ctx0, cur); + } else { + cur = ggml_gelu_quick_inplace(ctx0, cur); + } + + cur = ggml_mul_mat(ctx0, model.layers[il].ff_o_w, cur); + cur = ggml_add(ctx0, cur, model.layers[il].ff_o_b); + + // residual 2 + cur = ggml_add(ctx0, embeddings, cur); + + embeddings = cur; + } + + // post-layernorm + if (ctx->has_post_norm) { + embeddings = ggml_norm(ctx0, embeddings, eps); + ggml_set_name(embeddings, "post_ln"); + + embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.post_ln_w), model.post_ln_b); + } + + // llava projector + if (ctx->has_llava_projector) { + embeddings = ggml_reshape_2d(ctx0, embeddings, embeddings->ne[0], embeddings->ne[1]); + + struct ggml_tensor * patches = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_patches); + ggml_set_name(patches, "patches"); + ggml_set_input(patches); + + // shape [1, 576, 1024] + // ne is whcn, ne = [1024, 576, 1, 1] + embeddings = ggml_get_rows(ctx0, embeddings, patches); + + // print_tensor_info(embeddings, "embeddings"); + + // llava projector + if (ctx->proj_type == PROJECTOR_TYPE_MLP) { + embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings); + embeddings = ggml_add(ctx0, embeddings, model.mm_0_b); + + embeddings = ggml_gelu(ctx0, embeddings); + embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings); + embeddings = ggml_add(ctx0, embeddings, model.mm_2_b); + } + else if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) { + embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings); + embeddings = ggml_add(ctx0, embeddings, model.mm_0_b); + // ggml_tensor_printf(embeddings, "mm_0_w",0,true,false); + // First LayerNorm + embeddings = ggml_norm(ctx0, embeddings, eps); + embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_1_w), + model.mm_1_b); + + // GELU activation + embeddings = ggml_gelu(ctx0, embeddings); + + // Second linear layer + embeddings = ggml_mul_mat(ctx0, model.mm_3_w, embeddings); + embeddings = ggml_add(ctx0, embeddings, model.mm_3_b); + + // Second LayerNorm + embeddings = ggml_norm(ctx0, embeddings, eps); + embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_4_w), + model.mm_4_b); + } + else if (ctx->proj_type == PROJECTOR_TYPE_LDP) { + // MobileVLM projector + int n_patch = 24; + struct ggml_tensor * mlp_1 = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w, embeddings); + mlp_1 = ggml_add(ctx0, mlp_1, model.mm_model_mlp_1_b); + mlp_1 = ggml_gelu(ctx0, mlp_1); + struct ggml_tensor * mlp_3 = ggml_mul_mat(ctx0, model.mm_model_mlp_3_w, mlp_1); + mlp_3 = ggml_add(ctx0, mlp_3, model.mm_model_mlp_3_b); + // mlp_3 shape = [1, 576, 2048], ne = [2048, 576, 1, 1] + + // block 1 + struct ggml_tensor * block_1 = nullptr; + { + // transpose from [1, 576, 2048] --> [1, 2048, 576] --> [1, 2048, 24, 24] + mlp_3 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_3, 1, 0, 2, 3)); + mlp_3 = ggml_reshape_4d(ctx0, mlp_3, n_patch, n_patch, mlp_3->ne[1], mlp_3->ne[2]); + // stride = 1, padding = 1, bias is nullptr + block_1 = ggml_conv_depthwise_2d(ctx0, model.mm_model_block_1_block_0_0_w, mlp_3, 1, 1, 1, 1, 1, 1); + + // layer norm + // // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1] + block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 2, 0, 3)); + // block_1 shape = [1, 24, 24, 2048], ne = [2048, 24, 24, 1] + block_1 = ggml_norm(ctx0, block_1, eps); + block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_1_block_0_1_w), model.mm_model_block_1_block_0_1_b); + block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3)); + + // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1] + // hardswish + struct ggml_tensor * block_1_hw = ggml_hardswish(ctx0, block_1); + + block_1 = ggml_pool_2d(ctx0, block_1_hw, GGML_OP_POOL_AVG, block_1_hw->ne[0], block_1_hw->ne[1], block_1_hw->ne[0], block_1_hw->ne[1], 0, 0); + // block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1] + // pointwise conv + block_1 = ggml_reshape_2d(ctx0, block_1, block_1->ne[0]*block_1->ne[1]*block_1->ne[2], block_1->ne[3]); + block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_1_fc1_w, block_1); + block_1 = ggml_add(ctx0, block_1, model.mm_model_block_1_block_1_fc1_b); + block_1 = ggml_relu(ctx0, block_1); + block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_1_fc2_w, block_1); + block_1 = ggml_add(ctx0, block_1, model.mm_model_block_1_block_1_fc2_b); + block_1 = ggml_hardsigmoid(ctx0, block_1); + // block_1_hw shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1], block_1 shape = [1, 2048], ne = [2048, 1, 1, 1] + block_1 = ggml_reshape_4d(ctx0, block_1, 1, 1, block_1->ne[0], block_1->ne[1]); + block_1 = ggml_mul(ctx0, block_1_hw, block_1); + + int w = block_1->ne[0], h = block_1->ne[1]; + block_1 = ggml_reshape_3d(ctx0, block_1, w*h, block_1->ne[2], block_1->ne[3]); + block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 0, 2, 3)); + + // block_1 shape = [1, 24*24, 2048], ne = [24*24, 2048, 1] + block_1 = ggml_mul_mat(ctx0, model.mm_model_block_1_block_2_0_w, block_1); + block_1 = ggml_reshape_4d(ctx0, block_1, block_1->ne[0], w, h, block_1->ne[3]); + + // block_1 shape = [1, 24, 24, 2048], ne = [2048, 24, 24, 1] + block_1 = ggml_norm(ctx0, block_1, eps); + block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_1_block_2_1_w), model.mm_model_block_1_block_2_1_b); + block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3)); + // block1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1] + // residual + block_1 = ggml_add(ctx0, mlp_3, block_1); + } + + // block_2 + { + // stride = 2 + block_1 = ggml_conv_depthwise_2d(ctx0, model.mm_model_block_2_block_0_0_w, block_1, 2, 2, 1, 1, 1, 1); + + // block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1] + // layer norm + block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 2, 0, 3)); + // block_1 shape = [1, 12, 12, 2048], ne = [2048, 12, 12, 1] + block_1 = ggml_norm(ctx0, block_1, eps); + block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_2_block_0_1_w), model.mm_model_block_2_block_0_1_b); + block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 2, 0, 1, 3)); + // block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1] + // hardswish + struct ggml_tensor * block_1_hw = ggml_hardswish(ctx0, block_1); + + // not sure the parameters is right for globalAvgPooling + block_1 = ggml_pool_2d(ctx0, block_1_hw, GGML_OP_POOL_AVG, block_1_hw->ne[0], block_1_hw->ne[1], block_1_hw->ne[0], block_1_hw->ne[1], 0, 0); + // block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1] + // pointwise conv + block_1 = ggml_reshape_2d(ctx0, block_1, block_1->ne[0]*block_1->ne[1]*block_1->ne[2], block_1->ne[3]); + block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_1_fc1_w, block_1); + block_1 = ggml_add(ctx0, block_1, model.mm_model_block_2_block_1_fc1_b); + block_1 = ggml_relu(ctx0, block_1); + block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_1_fc2_w, block_1); + block_1 = ggml_add(ctx0, block_1, model.mm_model_block_2_block_1_fc2_b); + block_1 = ggml_hardsigmoid(ctx0, block_1); + + // block_1_hw shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1], block_1 shape = [1, 2048, 1, 1], ne = [1, 1, 2048, 1] + block_1 = ggml_reshape_4d(ctx0, block_1, 1, 1, block_1->ne[0], block_1->ne[1]); + block_1 = ggml_mul(ctx0, block_1_hw, block_1); + + int w = block_1->ne[0], h = block_1->ne[1]; + block_1 = ggml_reshape_3d(ctx0, block_1, w*h, block_1->ne[2], block_1->ne[3]); + block_1 = ggml_cont(ctx0, ggml_permute(ctx0, block_1, 1, 0, 2, 3)); + // block_1 shape = [1, 24*24, 2048], ne = [24*24, 2048, 1] + block_1 = ggml_mul_mat(ctx0, model.mm_model_block_2_block_2_0_w, block_1); + block_1 = ggml_reshape_4d(ctx0, block_1, block_1->ne[0], w, h, block_1->ne[3]); + + + // block_1 shape = [1, 12, 12, 2048], ne = [2048, 12, 12, 1] + block_1 = ggml_norm(ctx0, block_1, eps); + block_1 = ggml_add(ctx0, ggml_mul(ctx0, block_1, model.mm_model_block_2_block_2_1_w), model.mm_model_block_2_block_2_1_b); + block_1 = ggml_reshape_3d(ctx0, block_1, block_1->ne[0], block_1->ne[1] * block_1->ne[2], block_1->ne[3]); + // block_1 shape = [1, 144, 2048], ne = [2048, 144, 1] + } + embeddings = block_1; + } + else if (ctx->proj_type == PROJECTOR_TYPE_LDPV2) + { + int n_patch = 24; + struct ggml_tensor * mlp_0 = ggml_mul_mat(ctx0, model.mm_model_mlp_0_w, embeddings); + mlp_0 = ggml_add(ctx0, mlp_0, model.mm_model_mlp_0_b); + mlp_0 = ggml_gelu(ctx0, mlp_0); + struct ggml_tensor * mlp_2 = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, mlp_0); + mlp_2 = ggml_add(ctx0, mlp_2, model.mm_model_mlp_2_b); + // mlp_2 ne = [2048, 576, 1, 1] + // // AVG Pool Layer 2*2, strides = 2 + mlp_2 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_2, 1, 0, 2, 3)); + // mlp_2 ne = [576, 2048, 1, 1] + mlp_2 = ggml_reshape_4d(ctx0, mlp_2, n_patch, n_patch, mlp_2->ne[1], mlp_2->ne[2]); + // mlp_2 ne [24, 24, 2048, 1] + mlp_2 = ggml_pool_2d(ctx0, mlp_2, GGML_OP_POOL_AVG, 2, 2, 2, 2, 0, 0); + // weight ne = [3, 3, 2048, 1] + struct ggml_tensor * peg_0 = ggml_conv_depthwise_2d(ctx0, model.mm_model_peg_0_w, mlp_2, 1, 1, 1, 1, 1, 1); + peg_0 = ggml_cont(ctx0, ggml_permute(ctx0, peg_0, 1, 2, 0, 3)); + peg_0 = ggml_add(ctx0, peg_0, model.mm_model_peg_0_b); + mlp_2 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_2, 1, 2, 0, 3)); + peg_0 = ggml_add(ctx0, peg_0, mlp_2); + peg_0 = ggml_reshape_3d(ctx0, peg_0, peg_0->ne[0], peg_0->ne[1] * peg_0->ne[2], peg_0->ne[3]); + embeddings = peg_0; + } + else { + GGML_ABORT("fatal error"); + } + } + // minicpmv projector + else if (ctx->has_minicpmv_projector) + { + if (ctx->proj_type == PROJECTOR_TYPE_RESAMPLER) { + struct ggml_tensor * q = model.mm_model_query; + { // layernorm + q = ggml_norm(ctx0, q, eps); + q = ggml_add(ctx0, ggml_mul(ctx0, q, model.mm_model_ln_q_w), model.mm_model_ln_q_b); + } + struct ggml_tensor * v = ggml_mul_mat(ctx0, model.mm_model_kv_proj, embeddings); + { // layernorm + v = ggml_norm(ctx0, v, eps); + v = ggml_add(ctx0, ggml_mul(ctx0, v, model.mm_model_ln_kv_w), model.mm_model_ln_kv_b); + } + struct ggml_tensor * k; + { // position + // q = ggml_add(ctx0, q, model.mm_model_pos_embed); + k = ggml_add(ctx0, v, pos_embed); + } + + { // attention + int hidden_size = 4096; + const int d_head = 128; + int n_head = hidden_size/d_head; + int num_query = 96; + if (ctx->minicpmv_version == 2) { + hidden_size = 4096; + n_head = hidden_size/d_head; + num_query = 96; + } + else if (ctx->minicpmv_version == 3) { + hidden_size = 3584; + n_head = hidden_size/d_head; + num_query = 64; + } + + struct ggml_tensor * Q = ggml_add(ctx0, ggml_mul_mat(ctx0, model.mm_model_attn_q_w, q), model.mm_model_attn_q_b); + Q = ggml_scale_inplace(ctx0, Q, 1.0f / sqrt((float)d_head)); + struct ggml_tensor * K = ggml_add(ctx0, ggml_mul_mat(ctx0, model.mm_model_attn_k_w, k), model.mm_model_attn_k_b); + struct ggml_tensor * V = ggml_add(ctx0, ggml_mul_mat(ctx0, model.mm_model_attn_v_w, v), model.mm_model_attn_v_b); + // permute + Q = ggml_reshape_4d(ctx0, Q, d_head, n_head, num_query, batch_size); + Q = ggml_cont(ctx0, ggml_permute(ctx0, Q, 0, 2, 1, 3)); + Q = ggml_reshape_3d(ctx0, Q, d_head, num_query, n_head * batch_size); + K = ggml_reshape_4d(ctx0, K, d_head, n_head, num_positions, batch_size); + K = ggml_cont(ctx0, ggml_permute(ctx0, K, 0, 2, 1, 3)); + K = ggml_reshape_3d(ctx0, K, d_head, num_positions, n_head * batch_size); + V = ggml_reshape_4d(ctx0, V, d_head, n_head, num_positions, batch_size); + V = ggml_cont(ctx0, ggml_permute(ctx0, V, 1, 2, 0, 3)); + V = ggml_reshape_3d(ctx0, V, num_positions, d_head, n_head * batch_size); + struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); + KQ = ggml_soft_max_inplace(ctx0, KQ); + struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ); + KQV = ggml_reshape_4d(ctx0, KQV, d_head, num_query, n_head, batch_size); + KQV = ggml_permute(ctx0, KQV, 0, 2, 1, 3); + KQV = ggml_cont_3d(ctx0, KQV, hidden_size, num_query, batch_size); + + embeddings = ggml_add(ctx0, ggml_mul_mat(ctx0, model.mm_model_attn_o_w, KQV), model.mm_model_attn_o_b); + } + { // layernorm + embeddings = ggml_norm(ctx0, embeddings, eps); + embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_model_ln_post_w), model.mm_model_ln_post_b); + } + embeddings = ggml_mul_mat(ctx0, model.mm_model_proj, embeddings); + } + else { + GGML_ASSERT(false); + } + } + + // build the graph + ggml_build_forward_expand(gf, embeddings); + + ggml_free(ctx0); + + return gf; +} + +// read and create ggml_context containing the tensors and their data +struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { + struct ggml_context * meta = NULL; + + struct gguf_init_params params = { + /*.no_alloc = */ true, + /*.ctx = */ &meta, + }; + + struct gguf_context * ctx = gguf_init_from_file(fname, params); + if (!ctx) { + throw std::runtime_error(format("%s: failed to load CLIP model from %s. Does this file exist?\n", __func__, fname)); + } + + if (verbosity >= 1) { + const int n_tensors = gguf_get_n_tensors(ctx); + const int n_kv = gguf_get_n_kv(ctx); + const int ftype = get_u32(ctx, KEY_FTYPE); + const std::string ftype_str = get_ftype(ftype); + const int idx_desc = get_key_idx(ctx, KEY_DESCRIPTION); + const std::string description = gguf_get_val_str(ctx, idx_desc); + const int idx_name = gguf_find_key(ctx, KEY_NAME); + if (idx_name != -1) { // make name optional temporarily as some of the uploaded models missing it due to a bug + const std::string name = gguf_get_val_str(ctx, idx_name); + LOG_TEE("%s: model name: %s\n", __func__, name.c_str()); + } + LOG_TEE("%s: description: %s\n", __func__, description.c_str()); + LOG_TEE("%s: GGUF version: %d\n", __func__, gguf_get_version(ctx)); + LOG_TEE("%s: alignment: %zu\n", __func__, gguf_get_alignment(ctx)); + LOG_TEE("%s: n_tensors: %d\n", __func__, n_tensors); + LOG_TEE("%s: n_kv: %d\n", __func__, n_kv); + LOG_TEE("%s: ftype: %s\n", __func__, ftype_str.c_str()); + LOG_TEE("\n"); + } + const int n_tensors = gguf_get_n_tensors(ctx); + + // kv + const int n_kv = gguf_get_n_kv(ctx); + LOG_TEE("%s: loaded meta data with %d key-value pairs and %d tensors from %s\n", + __func__, n_kv, n_tensors, fname); + { + std::map n_type; + + for (int i = 0; i < n_tensors; i++) { + enum ggml_type type = gguf_get_tensor_type(ctx, i); + + n_type[type]++; + } + + LOG_TEE("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__); + for (int i = 0; i < n_kv; i++) { + const char * name = gguf_get_key(ctx, i); + const enum gguf_type type = gguf_get_kv_type(ctx, i); + const std::string type_name = + type == GGUF_TYPE_ARRAY + ? format("%s[%s,%d]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(ctx, i)), gguf_get_arr_n(ctx, i)) + : gguf_type_name(type); + + std::string value = gguf_kv_to_str(ctx, i); + const size_t MAX_VALUE_LEN = 40; + if (value.size() > MAX_VALUE_LEN) { + value = format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str()); + } + replace_all(value, "\n", "\\n"); + + LOG_TEE("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), value.c_str()); + } + + // print type counts + for (auto & kv : n_type) { + if (kv.second == 0) { + continue; + } + + LOG_TEE("%s: - type %4s: %4d tensors\n", __func__, ggml_type_name(kv.first), kv.second); + } + } + + // data + size_t model_size = 0; + { + for (int i = 0; i < n_tensors; ++i) { + const char * name = gguf_get_tensor_name(ctx, i); + const size_t offset = gguf_get_tensor_offset(ctx, i); + enum ggml_type type = gguf_get_tensor_type(ctx, i); + struct ggml_tensor * cur = ggml_get_tensor(meta, name); + size_t tensor_size = ggml_nbytes(cur); + model_size += tensor_size; + if (verbosity >= 3) { + LOG_TEE("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu, shape:[%" PRIu64 ", %" PRIu64 ", %" PRIu64 ", %" PRIu64 "], type = %s\n", + __func__, i, ggml_n_dims(cur), cur->name, tensor_size, offset, cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], ggml_type_name(type)); + } + } + } + + clip_ctx * new_clip = new clip_ctx; + + // update projector type + { + int idx = gguf_find_key(ctx, KEY_PROJ_TYPE); + if (idx != -1) { + const std::string proj_type = gguf_get_val_str(ctx, idx); + new_clip->proj_type = clip_projector_type_from_string(proj_type); + } else { + new_clip->proj_type = PROJECTOR_TYPE_MLP; + } + + if (new_clip->proj_type == PROJECTOR_TYPE_MLP) { + if (gguf_find_tensor(ctx, format(TN_LLAVA_PROJ, 3, "weight").c_str()) != -1) { + new_clip->proj_type = PROJECTOR_TYPE_MLP_NORM; + } + } + } + +#ifdef GGML_USE_CUDA + new_clip->backend = ggml_backend_cuda_init(0); + LOG_TEE("%s: CLIP using CUDA backend\n", __func__); +#endif + +#ifdef GGML_USE_METAL + new_clip->backend = ggml_backend_metal_init(); + LOG_TEE("%s: CLIP using Metal backend\n", __func__); +#endif + +#ifdef GGML_USE_CANN + new_clip->backend = ggml_backend_cann_init(0); + LOG_TEE("%s: CLIP using CANN backend\n", __func__); +#endif + + + if (!new_clip->backend) { + new_clip->backend = ggml_backend_cpu_init(); + LOG_TEE("%s: CLIP using CPU backend\n", __func__); + } + + // model size and capabilities + { + int idx = get_key_idx(ctx, KEY_HAS_TEXT_ENC); + new_clip->has_text_encoder = gguf_get_val_bool(ctx, idx); + + idx = get_key_idx(ctx, KEY_HAS_VIS_ENC); + new_clip->has_vision_encoder = gguf_get_val_bool(ctx, idx); + + idx = gguf_find_key(ctx, KEY_HAS_LLAVA_PROJ); + if (idx != -1) { + new_clip->has_llava_projector = gguf_get_val_bool(ctx, idx); + } + + idx = gguf_find_key(ctx, KEY_HAS_MINICPMV_PROJ); + if (idx != -1) { + new_clip->has_minicpmv_projector = gguf_get_val_bool(ctx, idx); + } + + idx = gguf_find_key(ctx, KEY_MINICPMV_VERSION); + if (idx != -1) { + new_clip->minicpmv_version = gguf_get_val_i32(ctx, idx); + } + + // GGML_ASSERT(new_clip->has_llava_projector); // see monatis/clip.cpp for image and/or text encoding for semantic search + + GGML_ASSERT(new_clip->has_vision_encoder); + GGML_ASSERT(!new_clip->has_text_encoder); + + idx = get_key_idx(ctx, KEY_USE_GELU); + new_clip->use_gelu = gguf_get_val_bool(ctx, idx); + + if (verbosity >= 1) { + LOG_TEE("%s: text_encoder: %d\n", __func__, new_clip->has_text_encoder); + LOG_TEE("%s: vision_encoder: %d\n", __func__, new_clip->has_vision_encoder); + LOG_TEE("%s: llava_projector: %d\n", __func__, new_clip->has_llava_projector); + LOG_TEE("%s: minicpmv_projector: %d\n", __func__, new_clip->has_minicpmv_projector); + LOG_TEE("%s: model size: %.2f MB\n", __func__, model_size / 1024.0 / 1024.0); + LOG_TEE("%s: metadata size: %.2f MB\n", __func__, ggml_get_mem_size(meta) / 1024.0 / 1024.0); + } + } + + LOG_TEE("%s: params backend buffer size = % 6.2f MB (%i tensors)\n", __func__, model_size / (1024.0 * 1024.0), n_tensors); + + // load tensors + { + std::vector read_buf; + struct ggml_init_params params = { + /*.mem_size =*/ (n_tensors + 1) * ggml_tensor_overhead(), + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, + }; + + new_clip->ctx_data = ggml_init(params); + if (!new_clip->ctx_data) { + LOG_TEE("%s: ggml_init() failed\n", __func__); + clip_free(new_clip); + gguf_free(ctx); + return nullptr; + } + + auto fin = std::ifstream(fname, std::ios::binary); + if (!fin) { + LOG_TEE("cannot open model file for loading tensors\n"); + clip_free(new_clip); + gguf_free(ctx); + return nullptr; + } + + // add tensors to context + for (int i = 0; i < n_tensors; ++i) { + const char * name = gguf_get_tensor_name(ctx, i); + struct ggml_tensor * t = ggml_get_tensor(meta, name); + struct ggml_tensor * cur = ggml_dup_tensor(new_clip->ctx_data, t); + ggml_set_name(cur, name); + } + + // alloc memory and offload data + new_clip->params_buffer = ggml_backend_alloc_ctx_tensors(new_clip->ctx_data, new_clip->backend); + for (int i = 0; i < n_tensors; ++i) { + const char * name = gguf_get_tensor_name(ctx, i); + struct ggml_tensor * cur = ggml_get_tensor(new_clip->ctx_data, name); + const size_t offset = gguf_get_data_offset(ctx) + gguf_get_tensor_offset(ctx, i); + fin.seekg(offset, std::ios::beg); + if (!fin) { + LOG_TEE("%s: failed to seek for tensor %s\n", __func__, name); + clip_free(new_clip); + gguf_free(ctx); + return nullptr; + } + int num_bytes = ggml_nbytes(cur); + if (ggml_backend_buffer_is_host(new_clip->params_buffer)) { + // for the CPU and Metal backend, we can read directly into the tensor + fin.read(reinterpret_cast(cur->data), num_bytes); + } else { + // read into a temporary buffer first, then copy to device memory + read_buf.resize(num_bytes); + fin.read(reinterpret_cast(read_buf.data()), num_bytes); + ggml_backend_tensor_set(cur, read_buf.data(), 0, num_bytes); + } + } + fin.close(); + } + + // vision model + if (new_clip->has_vision_encoder) { + // load vision model + auto & vision_model = new_clip->vision_model; + auto & hparams = vision_model.hparams; + hparams.hidden_size = get_u32(ctx, format(KEY_N_EMBD, "vision")); + hparams.n_head = get_u32(ctx, format(KEY_N_HEAD, "vision")); + hparams.n_intermediate = get_u32(ctx, format(KEY_N_FF, "vision")); + hparams.n_layer = get_u32(ctx, format(KEY_N_BLOCK, "vision")); + hparams.image_size = get_u32(ctx, KEY_IMAGE_SIZE); + hparams.patch_size = get_u32(ctx, KEY_PATCH_SIZE); + hparams.projection_dim = get_u32(ctx, format(KEY_PROJ_DIM, "vision")); + hparams.eps = get_f32(ctx, format(KEY_LAYER_NORM_EPS, "vision")); + + try { + int idx = get_key_idx(ctx, KEY_IMAGE_GRID_PINPOINTS); + int n = gguf_get_arr_n(ctx, idx); + const int32_t * pinpoints = (const int32_t *)gguf_get_arr_data(ctx, idx); + for (int i = 0; i < 32 && i < n && pinpoints[i] != 0; ++i) { + hparams.image_grid_pinpoints[i] = pinpoints[i]; + } + if (n < 32) + hparams.image_grid_pinpoints[n] = 0; + } catch (std::runtime_error & /*e*/) { + hparams.image_grid_pinpoints[0]=0; + } + + try { + int idx = get_key_idx(ctx, KEY_MM_PATCH_MERGE_TYPE); + strcpy(hparams.mm_patch_merge_type, gguf_get_val_str(ctx, idx)); + } catch (std::runtime_error & /*e*/) { + strcpy(hparams.mm_patch_merge_type, "flat"); + } + + try { + hparams.image_crop_resolution = get_u32(ctx, KEY_IMAGE_CROP_RESOLUTION); // llava-1.6 + } catch(const std::exception& /*e*/) { + hparams.image_crop_resolution = hparams.image_size; + } + + int idx_mean = get_key_idx(ctx, KEY_IMAGE_MEAN); + int idx_std = get_key_idx(ctx, KEY_IMAGE_STD); + + const float * mean_data = (const float *)gguf_get_arr_data(ctx, idx_mean); + const float * std_data = (const float *)gguf_get_arr_data(ctx, idx_std); + + for (int i = 0; i < 3; ++i) { + new_clip->image_mean[i] = mean_data[i]; + new_clip->image_std[i] = std_data[i]; + } + + if (verbosity >= 2) { + LOG_TEE("\n%s: vision model hparams\n", __func__); + LOG_TEE("image_size %d\n", hparams.image_size); + LOG_TEE("patch_size %d\n", hparams.patch_size); + LOG_TEE("v_hidden_size %d\n", hparams.hidden_size); + LOG_TEE("v_n_intermediate %d\n", hparams.n_intermediate); + LOG_TEE("v_projection_dim %d\n", hparams.projection_dim); + LOG_TEE("v_n_head %d\n", hparams.n_head); + LOG_TEE("v_n_layer %d\n", hparams.n_layer); + LOG_TEE("v_eps %f\n", hparams.eps); + LOG_TEE("v_image_mean %f %f %f\n", new_clip->image_mean[0], new_clip->image_mean[1], new_clip->image_mean[2]); + LOG_TEE("v_image_std %f %f %f\n", new_clip->image_std[0], new_clip->image_std[1], new_clip->image_std[2]); + LOG_TEE("v_image_grid_pinpoints: "); + for (int i = 0; i < 32 && (hparams.image_grid_pinpoints[i] != 0); ++i) { + LOG_TEE("%d ", hparams.image_grid_pinpoints[i]); + } + LOG_TEE("\n"); + LOG_TEE("v_mm_patch_merge_type: %s\n", hparams.mm_patch_merge_type); + + } + + try { + vision_model.class_embedding = get_tensor(new_clip->ctx_data, TN_CLASS_EMBD); + new_clip->has_class_embedding = true; + } catch (const std::exception& /*e*/) { + new_clip->has_class_embedding = false; + } + + try { + vision_model.pre_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "weight")); + vision_model.pre_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "bias")); + new_clip->has_pre_norm = true; + } catch (std::exception & /*e*/) { + new_clip->has_pre_norm = false; + } + + try { + vision_model.post_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "weight")); + vision_model.post_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "bias")); + new_clip->has_post_norm = true; + } catch (std::exception & /*e*/) { + new_clip->has_post_norm = false; + } + + try { + vision_model.patch_bias = get_tensor(new_clip->ctx_data, TN_PATCH_BIAS); + new_clip->has_patch_bias = true; + } catch (std::exception & /*e*/) { + new_clip->has_patch_bias = false; + } + + try { + vision_model.patch_embeddings = get_tensor(new_clip->ctx_data, TN_PATCH_EMBD); + vision_model.position_embeddings = get_tensor(new_clip->ctx_data, format(TN_POS_EMBD, "v")); + } catch(const std::exception& /*e*/) { + LOG_TEE("%s: failed to load vision model tensors\n", __func__); + } + + // LLaVA projection + if (new_clip->proj_type == PROJECTOR_TYPE_MLP || new_clip->proj_type == PROJECTOR_TYPE_MLP_NORM) { + vision_model.mm_0_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 0, "weight")); + vision_model.mm_0_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 0, "bias")); + try { + // Yi-type llava + vision_model.mm_1_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 1, "weight")); + vision_model.mm_1_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 1, "bias")); + } catch (std::runtime_error & /*e*/) { } + try { + // missing in Yi-type llava + vision_model.mm_2_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 2, "weight")); + vision_model.mm_2_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 2, "bias")); + } catch (std::runtime_error & /*e*/) { } + try { + // Yi-type llava + vision_model.mm_3_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 3, "weight")); + vision_model.mm_3_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 3, "bias")); + } catch (std::runtime_error & /*e*/) { } + try { + // Yi-type llava + vision_model.mm_4_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 4, "weight")); + vision_model.mm_4_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 4, "bias")); + } catch (std::runtime_error & /*e*/) { } + try { + vision_model.image_newline = get_tensor(new_clip->ctx_data, TN_IMAGE_NEWLINE); + // LOG_TEE("%s: image_newline tensor (llava-1.6) found\n", __func__); + } catch (std::runtime_error & /*e*/) { } + } else if (new_clip->proj_type == PROJECTOR_TYPE_LDP) { + // MobileVLM projection + vision_model.mm_model_mlp_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 1, "weight")); + vision_model.mm_model_mlp_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 1, "bias")); + vision_model.mm_model_mlp_3_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 3, "weight")); + vision_model.mm_model_mlp_3_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 3, "bias")); + vision_model.mm_model_block_1_block_0_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 0, "0.weight")); + vision_model.mm_model_block_1_block_0_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.weight")); + vision_model.mm_model_block_1_block_0_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.bias")); + vision_model.mm_model_block_1_block_1_fc1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.weight")); + vision_model.mm_model_block_1_block_1_fc1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.bias")); + vision_model.mm_model_block_1_block_1_fc2_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.weight")); + vision_model.mm_model_block_1_block_1_fc2_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.bias")); + vision_model.mm_model_block_1_block_2_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 2, "0.weight")); + vision_model.mm_model_block_1_block_2_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.weight")); + vision_model.mm_model_block_1_block_2_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.bias")); + vision_model.mm_model_block_2_block_0_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 0, "0.weight")); + vision_model.mm_model_block_2_block_0_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.weight")); + vision_model.mm_model_block_2_block_0_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.bias")); + vision_model.mm_model_block_2_block_1_fc1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.weight")); + vision_model.mm_model_block_2_block_1_fc1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.bias")); + vision_model.mm_model_block_2_block_1_fc2_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.weight")); + vision_model.mm_model_block_2_block_1_fc2_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.bias")); + vision_model.mm_model_block_2_block_2_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "0.weight")); + vision_model.mm_model_block_2_block_2_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.weight")); + vision_model.mm_model_block_2_block_2_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.bias")); + } + else if (new_clip->proj_type == PROJECTOR_TYPE_LDPV2) + { + // MobilVLM_V2 projection + vision_model.mm_model_mlp_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 0, "weight")); + vision_model.mm_model_mlp_0_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 0, "bias")); + vision_model.mm_model_mlp_2_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 2, "weight")); + vision_model.mm_model_mlp_2_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 2, "bias")); + vision_model.mm_model_peg_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_PEG, 0, "weight")); + vision_model.mm_model_peg_0_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_PEG, 0, "bias")); + } + else if (new_clip->proj_type == PROJECTOR_TYPE_RESAMPLER) { + // vision_model.mm_model_pos_embed = get_tensor(new_clip->ctx_data, TN_MINICPMV_POS_EMBD); + vision_model.mm_model_pos_embed_k = get_tensor(new_clip->ctx_data, TN_MINICPMV_POS_EMBD_K); + vision_model.mm_model_query = get_tensor(new_clip->ctx_data, TN_MINICPMV_QUERY); + vision_model.mm_model_proj = get_tensor(new_clip->ctx_data, TN_MINICPMV_PROJ); + vision_model.mm_model_kv_proj = get_tensor(new_clip->ctx_data, TN_MINICPMV_KV_PROJ); + vision_model.mm_model_attn_q_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "q", "weight")); + vision_model.mm_model_attn_k_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "k", "weight")); + vision_model.mm_model_attn_v_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "v", "weight")); + vision_model.mm_model_attn_q_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "q", "bias")); + vision_model.mm_model_attn_k_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "k", "bias")); + vision_model.mm_model_attn_v_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "v", "bias")); + vision_model.mm_model_attn_o_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "out", "weight")); + vision_model.mm_model_attn_o_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "out", "bias")); + vision_model.mm_model_ln_q_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "q", "weight")); + vision_model.mm_model_ln_q_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "q", "bias")); + vision_model.mm_model_ln_kv_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "kv", "weight")); + vision_model.mm_model_ln_kv_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "kv", "bias")); + vision_model.mm_model_ln_post_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "post", "weight")); + vision_model.mm_model_ln_post_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "post", "bias")); + } + else { + std::string proj_type = PROJECTOR_TYPE_NAMES[new_clip->proj_type]; + throw std::runtime_error(format("%s: don't support projector with: %s currently\n", __func__, proj_type.c_str())); + } + + vision_model.layers.resize(hparams.n_layer); + + for (int il = 0; il < hparams.n_layer; ++il) { + auto & layer = vision_model.layers[il]; + layer.k_w = get_tensor(new_clip->ctx_data, format(TN_ATTN_K, "v", il, "weight")); + layer.q_w = get_tensor(new_clip->ctx_data, format(TN_ATTN_Q, "v", il, "weight")); + layer.v_w = get_tensor(new_clip->ctx_data, format(TN_ATTN_V, "v", il, "weight")); + layer.o_w = get_tensor(new_clip->ctx_data, format(TN_ATTN_OUTPUT, "v", il, "weight")); + layer.ln_1_w = get_tensor(new_clip->ctx_data, format(TN_LN_1, "v", il, "weight")); + layer.ln_2_w = get_tensor(new_clip->ctx_data, format(TN_LN_2, "v", il, "weight")); + layer.ff_i_w = get_tensor(new_clip->ctx_data, format(TN_FFN_DOWN, "v", il, "weight")); + layer.ff_o_w = get_tensor(new_clip->ctx_data, format(TN_FFN_UP, "v", il, "weight")); + layer.k_b = get_tensor(new_clip->ctx_data, format(TN_ATTN_K, "v", il, "bias")); + layer.q_b = get_tensor(new_clip->ctx_data, format(TN_ATTN_Q, "v", il, "bias")); + layer.v_b = get_tensor(new_clip->ctx_data, format(TN_ATTN_V, "v", il, "bias")); + layer.o_b = get_tensor(new_clip->ctx_data, format(TN_ATTN_OUTPUT, "v", il, "bias")); + layer.ln_1_b = get_tensor(new_clip->ctx_data, format(TN_LN_1, "v", il, "bias")); + layer.ln_2_b = get_tensor(new_clip->ctx_data, format(TN_LN_2, "v", il, "bias")); + layer.ff_i_b = get_tensor(new_clip->ctx_data, format(TN_FFN_DOWN, "v", il, "bias")); + layer.ff_o_b = get_tensor(new_clip->ctx_data, format(TN_FFN_UP, "v", il, "bias")); + } + } + + ggml_free(meta); + + new_clip->ctx_gguf = ctx; + + // measure mem requirement and allocate + { + new_clip->buf_compute_meta.resize(GGML_DEFAULT_GRAPH_SIZE * ggml_tensor_overhead() + ggml_graph_overhead()); + new_clip->compute_alloc = ggml_gallocr_new(ggml_backend_get_default_buffer_type(new_clip->backend)); + clip_image_f32_batch batch; + batch.size = 1; + ggml_cgraph * gf = clip_image_build_graph(new_clip, &batch, nullptr, false); + ggml_gallocr_reserve(new_clip->compute_alloc, gf); + size_t compute_memory_buffer_size = ggml_gallocr_get_buffer_size(new_clip->compute_alloc, 0); + LOG_TEE("%s: compute allocated memory: %.2f MB\n", __func__, compute_memory_buffer_size /1024.0/1024.0); + } + + return new_clip; +} + +void clip_add_load_image_size(struct clip_ctx * ctx_clip, struct clip_image_size * load_image_size) { + ctx_clip->load_image_size = load_image_size; +} + +struct clip_image_size * clip_image_size_init() { + struct clip_image_size * load_image_size = new struct clip_image_size(); + load_image_size->width = 448; + load_image_size->height = 448; + return load_image_size; +} + +struct clip_image_u8 * clip_image_u8_init() { + return new clip_image_u8(); +} + +struct clip_image_f32 * clip_image_f32_init() { + return new clip_image_f32(); +} + +void clip_image_u8_free(struct clip_image_u8 * img) { delete img; } +void clip_image_f32_free(struct clip_image_f32 * img) { delete img; } +void clip_image_u8_batch_free(struct clip_image_u8_batch * batch) { + if (batch->size > 0) { + delete[] batch->data; + batch->size = 0; + } +} +void clip_image_f32_batch_free(struct clip_image_f32_batch * batch) { + if (batch->size > 0) { + delete[] batch->data; + batch->size = 0; + } +} + +static void build_clip_img_from_data(const stbi_uc * data, int nx, int ny, clip_image_u8 * img) { + img->nx = nx; + img->ny = ny; + img->buf.resize(3 * nx * ny); + memcpy(img->buf.data(), data, img->buf.size()); +} + +bool clip_image_load_from_file(const char * fname, clip_image_u8 * img) { + int nx, ny, nc; + auto * data = stbi_load(fname, &nx, &ny, &nc, 3); + if (!data) { + LOG_TEE("%s: failed to load image '%s'\n", __func__, fname); + return false; + } + build_clip_img_from_data(data, nx, ny, img); + stbi_image_free(data); + return true; +} + +bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img) { + int nx, ny, nc; + auto * data = stbi_load_from_memory(bytes, bytes_length, &nx, &ny, &nc, 3); + if (!data) { + LOG_TEE("%s: failed to decode image bytes\n", __func__); + return false; + } + build_clip_img_from_data(data, nx, ny, img); + stbi_image_free(data); + return true; +} + +// Linear interpolation between two points +inline float clip_lerp(float s, float e, float t) { + return s + (e - s) * t; +} +// Bilinear resize function +static void bilinear_resize(const clip_image_u8& src, clip_image_u8& dst, int target_width, int target_height) { + dst.nx = target_width; + dst.ny = target_height; + dst.buf.resize(3 * target_width * target_height); + + float x_ratio = static_cast(src.nx - 1) / target_width; + float y_ratio = static_cast(src.ny - 1) / target_height; + + for (int y = 0; y < target_height; y++) { + for (int x = 0; x < target_width; x++) { + float px = x_ratio * x; + float py = y_ratio * y; + int x_floor = static_cast(px); + int y_floor = static_cast(py); + float x_lerp = px - x_floor; + float y_lerp = py - y_floor; + + for (int c = 0; c < 3; c++) { + float top = clip_lerp( + static_cast(src.buf[3 * (y_floor * src.nx + x_floor) + c]), + static_cast(src.buf[3 * (y_floor * src.nx + (x_floor + 1)) + c]), + x_lerp + ); + float bottom = clip_lerp( + static_cast(src.buf[3 * ((y_floor + 1) * src.nx + x_floor) + c]), + static_cast(src.buf[3 * ((y_floor + 1) * src.nx + (x_floor + 1)) + c]), + x_lerp + ); + dst.buf[3 * (y * target_width + x) + c] = static_cast(clip_lerp(top, bottom, y_lerp)); + } + } + } +} + +// Normalize image to float32 - careful with pytorch .to(model.device, dtype=torch.float16) - this sometimes reduces precision (32>16>32), sometimes not +static void normalize_image_u8_to_f32(const clip_image_u8* src, clip_image_f32* dst, const float mean[3], const float std[3]) { + dst->nx = src->nx; + dst->ny = src->ny; + dst->buf.resize(src->buf.size()); + + for (size_t i = 0; i < src->buf.size(); ++i) { + int c = i % 3; // rgb + dst->buf[i] = (static_cast(src->buf[i]) / 255.0f - mean[c]) / std[c]; + } +} + +inline float clip(float x, float lower, float upper) { + return std::max(lower, std::min(x, upper)); +} + +static bool bicubic_resize(const clip_image_u8 &img, clip_image_u8 &dst, int target_width, int target_height) { + const int nx = img.nx; + const int ny = img.ny; + + dst.nx = target_width; + dst.ny = target_height; + dst.buf.resize(3 * target_width * target_height); + + float Cc; + float C[5]; + float d0, d2, d3, a0, a1, a2, a3; + int i, j, k, jj; + int x, y; + float dx, dy; + float tx, ty; + + tx = (float)nx / (float)target_width; + ty = (float)ny / (float)target_height; + + // Bicubic interpolation; adapted from ViT.cpp, inspired from : + // -> https://github.com/yglukhov/bicubic-interpolation-image-processing/blob/master/libimage.c#L36 + // -> https://en.wikipedia.org/wiki/Bicubic_interpolation + + for (i = 0; i < target_height; i++) { + for (j = 0; j < target_width; j++) { + x = (int)(tx * j); + y = (int)(ty * i); + + dx = tx * j - x; + dy = ty * i - y; + + for (k = 0; k < 3; k++) { + for (jj = 0; jj <= 3; jj++) { + d0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x - 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k]; + d2 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k]; + d3 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 2, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k]; + a0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k]; + + a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3; + a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2; + a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3; + + C[jj] = a0 + a1 * dx + a2 * dx * dx + a3 * dx * dx * dx; + + d0 = C[0] - C[1]; + d2 = C[2] - C[1]; + d3 = C[3] - C[1]; + a0 = C[1]; + a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3; + a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2; + a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3; + Cc = a0 + a1 * dy + a2 * dy * dy + a3 * dy * dy * dy; + + const uint8_t Cc2 = std::min(std::max(std::round(Cc), 0.0f), 255.0f); + dst.buf[(i * target_width + j) * 3 + k] = float(Cc2); + } + } + } + } + + return true; +} + +// llava-1.6 type of resize_and_pad (black) +static void resize_and_pad_image(const clip_image_u8& image, clip_image_u8 &image_output, const std::pair& target_resolution) { + int target_width = target_resolution.first; + int target_height = target_resolution.second; + + float scale_w = static_cast(target_width) / image.nx; + float scale_h = static_cast(target_height) / image.ny; + + int new_width, new_height; + + if (scale_w < scale_h) { + new_width = target_width; + new_height = std::min(static_cast(std::ceil(image.ny * scale_w)), target_height); + } else { + new_height = target_height; + new_width = std::min(static_cast(std::ceil(image.nx * scale_h)), target_width); + } + + clip_image_u8 resized_image; + // bilinear_resize(image, resized_image, new_width, new_height); + bicubic_resize(image, resized_image, new_width, new_height); + + clip_image_u8 padded_image; + padded_image.nx = target_width; + padded_image.ny = target_height; + padded_image.buf.resize(3 * target_width * target_height, 0); // Initialize with black + + // Calculate padding offsets + int pad_x = (target_width - new_width) / 2; + int pad_y = (target_height - new_height) / 2; + + // Copy the resized image into the center of the padded buffer + for (int y = 0; y < new_height; ++y) { + for (int x = 0; x < new_width; ++x) { + for (int c = 0; c < 3; ++c) { + padded_image.buf[3 * ((y + pad_y) * target_width + (x + pad_x)) + c] = resized_image.buf[3 * (y * new_width + x) + c]; + } + } + } + image_output = std::move(padded_image); +} + +/** + * Selects the best resolution from a list of possible resolutions based on the original size. + * + * @param original_size The original size of the image in the format (width, height). + * @param possible_resolutions A list of possible resolutions in the format [(width1, height1), (width2, height2), ...]. + * @return The best fit resolution in the format (width, height). + */ +static std::pair select_best_resolution(const std::pair & original_size, const std::vector> & possible_resolutions) { + int original_width = original_size.first; + int original_height = original_size.second; + std::pair best_fit; + int max_effective_resolution = 0; + int min_wasted_resolution = std::numeric_limits::max(); + + for (const auto& resolution : possible_resolutions) { + int width = resolution.first; + int height = resolution.second; + float scale = std::min(static_cast(width) / original_width, static_cast(height) / original_height); + int downscaled_width = static_cast(original_width * scale); + int downscaled_height = static_cast(original_height * scale); + int effective_resolution = std::min(downscaled_width * downscaled_height, original_width * original_height); + int wasted_resolution = (width * height) - effective_resolution; + // LOG_TEE("resolution: %d %d, scale: %f, downscaled: %d %d, effective: %d, wasted: %d\n", width, height, scale, downscaled_width, downscaled_height, effective_resolution, wasted_resolution); + if (effective_resolution > max_effective_resolution || (effective_resolution == max_effective_resolution && wasted_resolution < min_wasted_resolution)) { + max_effective_resolution = effective_resolution; + min_wasted_resolution = wasted_resolution; + best_fit = resolution; + } + } + + return best_fit; +} + +static std::vector divide_to_patches_u8(const clip_image_u8 & image, int patch_size) { + std::vector patches; + int width = image.nx; + int height = image.ny; + for (int i = 0; i < height; i += patch_size) { + for (int j = 0; j < width; j += patch_size) { + clip_image_u8 *patch = clip_image_u8_init(); + patch->nx = std::min(patch_size, width - j); + patch->ny = std::min(patch_size, height - i); + patch->buf.resize(3 * patch->nx * patch->ny); + for (int y = 0; y < patch->ny; ++y) { + for (int x = 0; x < patch->nx; ++x) { + for (int c = 0; c < 3; ++c) { + patch->buf[3 * (y * patch->nx + x) + c] = image.buf[3 * ((i + y) * width + (j + x)) + c]; + } + } + } + patches.push_back(patch); + } + } + return patches; +} + +static int ensure_divide(int length, int patch_size) { + return std::max(static_cast(std::round(static_cast(length) / patch_size) * patch_size), patch_size); +} + +static std::pair uhd_find_best_resize(std::pair original_size, int scale_resolution, int patch_size, bool allow_upscale = false) { + int width = original_size.first; + int height = original_size.second; + if ((width * height > scale_resolution * scale_resolution) || allow_upscale) { + float r = static_cast(width) / height; + height = static_cast(scale_resolution / std::sqrt(r)); + width = static_cast(height * r); + } + int best_width = ensure_divide(width, patch_size); + int best_height = ensure_divide(height, patch_size); + return std::make_pair(best_width, best_height); +} + +static std::pair uhd_get_refine_size(std::pair original_size, std::pair grid, int scale_resolution, int patch_size, bool allow_upscale = false) { + int width, height; + std::tie(width, height) = original_size; + int grid_x, grid_y; + std::tie(grid_x, grid_y) = grid; + + int refine_width = ensure_divide(width, grid_x); + int refine_height = ensure_divide(height, grid_y); + + int grid_width = refine_width / grid_x; + int grid_height = refine_height / grid_y; + + // auto best_grid_size = find_best_resize(std::make_tuple(grid_width, grid_height), scale_resolution, patch_size, allow_upscale); (old line) + auto best_grid_size = uhd_find_best_resize(std::make_pair(grid_width, grid_height), scale_resolution, patch_size, allow_upscale); // (new line) => fixes conversion for make_tuple to make_pair + int best_grid_width, best_grid_height; + std::tie(best_grid_width, best_grid_height) = best_grid_size; + + // std::pair refine_size = std::make_tuple(best_grid_width * grid_x, best_grid_height * grid_y); (old line) + std::pair refine_size = std::make_pair(best_grid_width * grid_x, best_grid_height * grid_y); // (new line) + return refine_size; +} + +inline int clip(int x, int lower, int upper) { + return std::max(lower, std::min(x, upper)); +} + +static std::pair uhd_best_grid(const int max_slice_nums, const int multiple, const float log_ratio) { + std::vector candidate_split_grids_nums; + for (int i : {multiple - 1, multiple, multiple + 1}) { + if (i == 1 || i > max_slice_nums) { + continue; + } + candidate_split_grids_nums.push_back(i); + } + + std::vector> candidate_grids; + for (int split_grids_nums : candidate_split_grids_nums) { + int m = 1; + while (m <= split_grids_nums) { + if (split_grids_nums % m == 0) { + candidate_grids.emplace_back(m, split_grids_nums / m); + } + ++m; + } + } + + std::pair best_grid{1, 1}; + float min_error = std::numeric_limits::infinity(); + for (const auto& grid : candidate_grids) { + float error = std::abs(log_ratio - std::log(1.0 * grid.first / grid.second)); + if (error < min_error) { + best_grid = grid; + min_error = error; + } + } + return best_grid; +} + +// inspired from LLaVA-UHD: +// -> https://arxiv.org/pdf/2403.11703 +// -> https://github.com/thunlp/LLaVA-UHD +// -> https://github.com/thunlp/LLaVA-UHD/blob/302301bc2175f7e717fb8548516188e89f649753/llava_uhd/train/llava-uhd/slice_logic.py#L118 +static std::vector> uhd_slice_image(const clip_image_u8 * img, const int max_slice_nums=9, const int scale_resolution=448, const int patch_size=14) { + const std::pair original_size={img->nx,img->ny}; + const int original_width = img->nx; + const int original_height = img->ny; + const float log_ratio = log(1.0*original_width/original_height); + const float ratio = 1.0 * original_width * original_height/ (scale_resolution * scale_resolution); + const int multiple = fmin(ceil(ratio), max_slice_nums); + + std::vector> images; + LOG_TEE("%s: multiple %d\n", __func__, multiple); + images.push_back(std::vector()); + + if (multiple <= 1) { + auto best_size = uhd_find_best_resize(original_size, scale_resolution, patch_size, true); + clip_image_u8 * source_image = clip_image_u8_init(); + bicubic_resize(*img, *source_image, best_size.first, best_size.second); + // source_image = image.resize(best_size, Image.Resampling.BICUBIC) + images[images.size()-1].push_back(source_image); + } + else if (multiple > 1) { + auto best_size = uhd_find_best_resize(original_size, scale_resolution, patch_size); + clip_image_u8 * source_image = clip_image_u8_init(); + bicubic_resize(*img, *source_image, best_size.first, best_size.second); + // source_image = image.copy().resize(best_resize, Image.Resampling.BICUBIC) + LOG_TEE("%s: image_size: %d %d; source_image size: %d %d\n", __func__, img->nx, img->ny, best_size.first, best_size.second); + images[images.size()-1].push_back(source_image); + + std::pair best_grid = uhd_best_grid(max_slice_nums, multiple, log_ratio); + LOG_TEE("%s: image_size: %d %d; best_grid: %d %d\n", __func__, img->nx, img->ny, best_grid.first, best_grid.second); + + auto refine_size = uhd_get_refine_size(original_size, best_grid, scale_resolution, patch_size, true); + clip_image_u8 * refine_image = clip_image_u8_init(); + bicubic_resize(*img, *refine_image, refine_size.first, refine_size.second); + + LOG_TEE("%s: refine_image_size: %d %d; refine_size: %d %d\n", __func__, refine_image->nx, refine_image->ny, refine_size.first, refine_size.second); + + // split_to_patches + int width = refine_image->nx; + int height = refine_image->ny; + int grid_x = int(width / best_grid.first); + int grid_y = int(height / best_grid.second); + for (int patches_i = 0, ic = 0; patches_i < height && ic < best_grid.second; patches_i += grid_y, ic += 1){ + images.push_back(std::vector()); + for(int patches_j = 0, jc = 0; patches_j < width && jc < best_grid.first; patches_j += grid_x, jc += 1){ + clip_image_u8 * patch = clip_image_u8_init(); + patch->nx = grid_x; + patch->ny = grid_y; + patch->buf.resize(3 * patch->nx * patch->ny); + for (int y = patches_i; y < patches_i + grid_y; ++y) { + for (int x = patches_j; x < patches_j + grid_x; ++x) { + const int i = 3 * (y * refine_image->nx + x); + const int j = 3 * ((y-patches_i) * patch->nx + (x-patches_j)); + patch->buf[j] = refine_image->buf[i]; + patch->buf[j+1] = refine_image->buf[i+1]; + patch->buf[j+2] = refine_image->buf[i+2]; + } + } + images[images.size()-1].push_back(patch); + } + } + } + return images; +} + +int clip_uhd_num_image_embeds_col(struct clip_ctx * ctx_clip) { + const int max_slice_nums=9; + const int scale_resolution=448; + const int original_width = ctx_clip->load_image_size->width; + const int original_height = ctx_clip->load_image_size->height; + const float log_ratio = log(1.0*original_width/original_height); + const float ratio = 1.0 * original_width * original_height/ (scale_resolution * scale_resolution); + const int multiple = fmin(ceil(ratio), max_slice_nums); + std::pair best_grid = uhd_best_grid(max_slice_nums, multiple, log_ratio); + return best_grid.first; +} + +// returns the normalized float tensor for llava-1.5, for spatial_unpad with anyres processing for llava-1.6 it returns the normalized image patch tensors as a vector +// res_imgs memory is being allocated here, previous allocations will be freed if found +bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, clip_image_f32_batch * res_imgs) { + + if(clip_is_minicpmv(ctx)){ + int max_slice_nums = 9; + std::vector> imgs = uhd_slice_image(img, max_slice_nums); + res_imgs->size = 0; + for (size_t i = 0; i < imgs.size(); ++i){ + res_imgs->size += imgs[i].size(); + } + res_imgs->data = new clip_image_f32[res_imgs->size]; + int idx = 0; + for (size_t i = 0; i < imgs.size(); ++i) { + for (size_t j = 0; j < imgs[i].size(); ++j) { + LOG_TEE("%s: %d %d\n", __func__,imgs[i][j]->nx,imgs[i][j]->ny); + clip_image_f32 * res = clip_image_f32_init(); + normalize_image_u8_to_f32(imgs[i][j], res, ctx->image_mean, ctx->image_std); + res_imgs->data[idx++] = *res; + clip_image_f32_free(res); + } + } + return true; + } + + bool pad_to_square = true; + if (!ctx->has_vision_encoder) { + LOG_TEE("This gguf file seems to have no vision encoder\n"); + return false; + } + auto & params = ctx->vision_model.hparams; + // The model config actually contains all we need to decide on how to preprocess, here we automatically switch to the new llava-1.6 preprocessing + if (strcmp(params.mm_patch_merge_type, "spatial_unpad") == 0) { + pad_to_square = false; + } + // free the previous res_imgs if any set + if (res_imgs->size > 0) { + clip_image_f32_batch_free(res_imgs); + } + res_imgs->data = nullptr; + res_imgs->size = 0; + + // the logic below is to pad the shorter side to the longer side with a background color: rgb(122, 116, 104) + // see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156 + + clip_image_u8 * temp = clip_image_u8_init(); // we will keep the input image data here temporarily + if (pad_to_square && img->nx != img->ny) { + int longer_side = std::max(img->nx, img->ny); + temp->nx = longer_side; + temp->ny = longer_side; + temp->buf.resize(3 * longer_side * longer_side); + const uint8_t bc[3] = {122, 116, 104}; // background color in RGB from LLaVA (this is the mean rgb color * 255) + + // fill with background color + for (size_t i = 0; i < temp->buf.size(); i++) { + temp->buf[i] = bc[i % 3]; + } + + // copy from the input image + for (int y = 0; y < img->ny; y++) { + for (int x = 0; x < img->nx; x++) { + const int i = 3 * (y * img->nx + x); + const int j = 3 * (y * temp->nx + x); + temp->buf[j] = img->buf[i]; + temp->buf[j+1] = img->buf[i+1]; + temp->buf[j+2] = img->buf[i+2]; + } + } + } else { + if (params.image_grid_pinpoints[0] != 0) { + // "spatial_unpad" with "anyres" processing for llava-1.6 + std::vector> possible_resolutions; + for (int i = 0; i < 32 && params.image_grid_pinpoints[i] != 0; i+=2) { + possible_resolutions.push_back({params.image_grid_pinpoints[i], params.image_grid_pinpoints[i+1]}); + } + std::pair best_resolution = select_best_resolution({img->nx, img->ny}, possible_resolutions); + // clip_image_save_to_bmp(*img, "input.bmp"); + resize_and_pad_image(*img, *temp, best_resolution); // we do not pad with mean-bg color anymore in llava-1.6 + // clip_image_save_to_bmp(*temp, "resized.bmp"); + // visually verify normalized image: + // normalize_image_u8_to_f32(*temp, *res, ctx->image_mean, ctx->image_std); + // { + // clip_image_u8 * temp2 = clip_image_u8_init(); + // clip_image_convert_f32_to_u8(*res, *temp2); + // clip_image_save_to_bmp(*temp2, "resized_normalized_f32.bmp"); + // clip_image_u8_free(temp2); + // } + + std::vector patches = divide_to_patches_u8(*temp, params.image_size); // prepare spatial sorted main patches of image_size each (336 in llava-1.6) + + clip_image_u8 *image_original_resize = clip_image_u8_init(); + // bilinear_resize(*img, *image_original_resize, params.image_size, params.image_size); // in python this is "shortest_edge", but all CLIP are square + bicubic_resize(*img, *image_original_resize, params.image_size, params.image_size); // in python this is "shortest_edge", but all CLIP are square + patches.insert(patches.begin(), image_original_resize); + // clip_image_f32_batch_init(patches.size()); + res_imgs->size = patches.size(); + res_imgs->data = new clip_image_f32[res_imgs->size]; + int num=0; + for (auto& patch : patches) { + normalize_image_u8_to_f32(patch, &res_imgs->data[num], ctx->image_mean, ctx->image_std); + num++; + } + + for (size_t i = 0; i < patches.size(); i++) { + // LOG_TEE("patch %d: %d %d\n", i, patches[i]->nx, patches[i]->ny); + clip_image_u8_free(patches[i]); + } + + clip_image_u8_free(temp); + + return true; + } else { + temp->nx = img->nx; + temp->ny = img->ny; + temp->buf.resize(img->buf.size()); + memcpy(temp->buf.data(), img->buf.data(), temp->buf.size()); + } + } + + const int nx = temp->nx; + const int ny = temp->ny; + // clip_image_save_to_bmp(*temp, "resized_vanilla.bmp"); + + const int nx2 = ctx->vision_model.hparams.image_size; + const int ny2 = ctx->vision_model.hparams.image_size; + clip_image_f32 * res = clip_image_f32_init(); + res->nx = nx2; + res->ny = ny2; + res->buf.resize(3 * nx2 * ny2); + + const float scale = std::max(nx, ny) / (float)ctx->vision_model.hparams.image_size; + + const int nx3 = int(nx / scale + 0.5f); + const int ny3 = int(ny / scale + 0.5f); + + const auto & m3 = ctx->image_mean; // {0.48145466f, 0.4578275f, 0.40821073f}; + const auto & s3 = ctx->image_std; // {0.26862954f, 0.26130258f, 0.27577711f}; + + for (int y = 0; y < ny3; y++) { + for (int x = 0; x < nx3; x++) { + for (int c = 0; c < 3; c++) { + // linear interpolation + const float sx = (x + 0.5f) * scale - 0.5f; + const float sy = (y + 0.5f) * scale - 0.5f; + + const int x0 = std::max(0, (int)std::floor(sx)); + const int y0 = std::max(0, (int)std::floor(sy)); + + const int x1 = std::min(x0 + 1, nx - 1); + const int y1 = std::min(y0 + 1, ny - 1); + + const float dx = sx - x0; + const float dy = sy - y0; + + const int j00 = 3 * (y0 * nx + x0) + c; + const int j01 = 3 * (y0 * nx + x1) + c; + const int j10 = 3 * (y1 * nx + x0) + c; + const int j11 = 3 * (y1 * nx + x1) + c; + + const float v00 = temp->buf[j00]; + const float v01 = temp->buf[j01]; + const float v10 = temp->buf[j10]; + const float v11 = temp->buf[j11]; + + const float v0 = v00 * (1.0f - dx) + v01 * dx; + const float v1 = v10 * (1.0f - dx) + v11 * dx; + + const float v = v0 * (1.0f - dy) + v1 * dy; + + const uint8_t v2 = std::min(std::max(std::round(v), 0.0f), 255.0f); + + const int i = 3 * (y * nx3 + x) + c; + + res->buf[i] = ((float(v2) / 255.0f) - m3[c]) / s3[c]; + } + } + } + clip_image_u8_free(temp); + + // { + // clip_image_u8 * temp2 = clip_image_u8_init(); + // clip_image_convert_f32_to_u8(*res, *temp2); + // clip_image_save_to_bmp(*temp2, "resized_normalized_f32_vanilla.bmp"); + // clip_image_u8_free(temp2); + // } + // res_imgs.push_back(res); + + res_imgs->size = 1; + res_imgs->data = new clip_image_f32[res_imgs->size]; + res_imgs->data[0] = *res; + clip_image_f32_free(res); + + return true; +} + +ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx) { + return ctx->vision_model.image_newline; +} + +void clip_free(clip_ctx * ctx) { + ggml_free(ctx->ctx_data); + gguf_free(ctx->ctx_gguf); + + ggml_backend_buffer_free(ctx->params_buffer); + ggml_backend_free(ctx->backend); + ggml_gallocr_free(ctx->compute_alloc); + delete ctx; +} + +size_t clip_embd_nbytes(const struct clip_ctx * ctx) { + return clip_n_patches(ctx) * clip_n_mmproj_embd(ctx) * sizeof(float); +} + +int32_t clip_image_size(const struct clip_ctx * ctx) { + return ctx->vision_model.hparams.image_size; +} + +int32_t clip_patch_size(const struct clip_ctx * ctx) { + return ctx->vision_model.hparams.patch_size; +} + +int32_t clip_hidden_size(const struct clip_ctx * ctx) { + return ctx->vision_model.hparams.hidden_size; +} + +const char * clip_patch_merge_type(const struct clip_ctx * ctx) { + return ctx->vision_model.hparams.mm_patch_merge_type; +} + +const int32_t * clip_image_grid(const struct clip_ctx * ctx) { + return ctx->vision_model.hparams.image_grid_pinpoints; +} + +int clip_n_patches(const struct clip_ctx * ctx) { + const auto & params = ctx->vision_model.hparams; + + int n_patches = (params.image_size / params.patch_size) * (params.image_size / params.patch_size); + + if (ctx->proj_type == PROJECTOR_TYPE_LDP || ctx->proj_type == PROJECTOR_TYPE_LDPV2) { + n_patches /= 4; + } else if (ctx->proj_type == PROJECTOR_TYPE_RESAMPLER) { + if (ctx->minicpmv_version == 2) { + n_patches = 96; + } + else if (ctx->minicpmv_version == 3) { + n_patches = 64; + } + } + + return n_patches; +} + +static std::vector>> get_1d_sincos_pos_embed_from_grid_new(int embed_dim, const std::vector> & pos) { + assert(embed_dim % 2 == 0); + int H = pos.size(); + int W = pos[0].size(); + + std::vector omega(embed_dim / 2); + for (int i = 0; i < embed_dim / 2; ++i) { + omega[i] = 1.0 / pow(10000.0, static_cast(i) / (embed_dim / 2)); + } + + std::vector>> emb(H, std::vector>(W, std::vector(embed_dim))); + for (int h = 0; h < H; ++h) { + for (int w = 0; w < W; ++w) { + for (int d = 0; d < embed_dim / 2; ++d) { + float out_value = pos[h][w] * omega[d]; + emb[h][w][d] = sin(out_value); + emb[h][w][d + embed_dim / 2] = cos(out_value); + } + } + } + + return emb; +} + +static std::vector>> get_2d_sincos_pos_embed_from_grid(int embed_dim, const std::vector>> & grid) { + assert(embed_dim % 2 == 0); + std::vector>> emb_h = get_1d_sincos_pos_embed_from_grid_new(embed_dim / 2, grid[0]); // (H, W, D/2) + std::vector>> emb_w = get_1d_sincos_pos_embed_from_grid_new(embed_dim / 2, grid[1]); // (H, W, D/2) + + int H = emb_h.size(); + int W = emb_h[0].size(); + std::vector>> emb(H, std::vector>(W, std::vector(embed_dim))); + + for (int h = 0; h < H; ++h) { + for (int w = 0; w < W; ++w) { + for (int d = 0; d < embed_dim / 2; ++d) { + emb[h][w][d] = emb_h[h][w][d]; + emb[h][w][d + embed_dim / 2] = emb_w[h][w][d]; + } + } + } + return emb; +} + +static std::vector> get_2d_sincos_pos_embed(int embed_dim, const std::pair image_size) { + int grid_h_size = image_size.first; + int grid_w_size = image_size.second; + + std::vector grid_h(grid_h_size); + std::vector grid_w(grid_w_size); + + for (int i = 0; i < grid_h_size; ++i) { + grid_h[i] = static_cast(i); + } + for (int i = 0; i < grid_w_size; ++i) { + grid_w[i] = static_cast(i); + } + + std::vector> grid(grid_h_size, std::vector(grid_w_size)); + for (int h = 0; h < grid_h_size; ++h) { + for (int w = 0; w < grid_w_size; ++w) { + grid[h][w] = grid_w[w]; + } + } + std::vector>> grid_2d = {grid, grid}; + for (int h = 0; h < grid_h_size; ++h) { + for (int w = 0; w < grid_w_size; ++w) { + grid_2d[0][h][w] = grid_h[h]; + grid_2d[1][h][w] = grid_w[w]; + } + } + + std::vector>> pos_embed_3d = get_2d_sincos_pos_embed_from_grid(embed_dim, grid_2d); + + int H = image_size.first; + int W = image_size.second; + std::vector> pos_embed_2d(H * W, std::vector(embed_dim)); + for (int h = 0; h < H; ++h) { + for (int w = 0; w < W; ++w) { + pos_embed_2d[w * H + h] = pos_embed_3d[h][w]; + } + } + + return pos_embed_2d; +} + +bool clip_image_encode(struct clip_ctx * ctx, const int n_threads, clip_image_f32 * img, float * vec) { + if (!ctx->has_vision_encoder) { + LOG_TEE("This gguf file seems to have no vision encoder\n"); + return false; + } + + clip_image_f32_batch imgs{}; + imgs.size = 1; + imgs.data = img; + return clip_image_batch_encode(ctx, n_threads, &imgs, vec); +} + +bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_image_f32_batch * imgs, float * vec) { + if (!ctx->has_vision_encoder) { + LOG_TEE("This gguf file seems to have no vision encoder\n"); + return false; + } + + int batch_size = imgs->size; + if (ctx->has_llava_projector) { + GGML_ASSERT(batch_size == 1); // TODO: support multiple images + } + if (ctx->has_minicpmv_projector) { + GGML_ASSERT(batch_size == 1); + } + + // build the inference graph + ggml_cgraph * gf = clip_image_build_graph(ctx, imgs, ctx->load_image_size, true); + ggml_gallocr_alloc_graph(ctx->compute_alloc, gf); + + // set inputs + const auto & model = ctx->vision_model; + const auto & hparams = model.hparams; + + const int image_size = hparams.image_size; + int image_size_width = image_size; + int image_size_height = image_size; + if (ctx->has_minicpmv_projector) { + image_size_width = imgs->data[0].nx; + image_size_height = imgs->data[0].ny; + } + const int patch_size = hparams.patch_size; + const int num_patches = ((image_size_width / patch_size) * (image_size_height / patch_size)); + const int num_positions = num_patches + (ctx->has_class_embedding ? 1 : 0); + if(ctx->load_image_size==nullptr){ + ctx->load_image_size= clip_image_size_init(); + } + const int pos_w = ctx->load_image_size->width/patch_size; + const int pos_h = ctx->load_image_size->height/patch_size; + + { + struct ggml_tensor * inp_raw = ggml_graph_get_tensor(gf, "inp_raw"); + float * data = (float *)malloc(ggml_nbytes(inp_raw)); + + for (size_t i = 0; i < imgs->size; i++) { + const int nx = imgs->data[i].nx; + const int ny = imgs->data[i].ny; + if (!ctx->has_minicpmv_projector) { + GGML_ASSERT(nx == image_size && ny == image_size); + } + + const int n = nx * ny; + + for (int b = 0; b < batch_size; b++) { + for (int k = 0; k < 3; k++) { + for (int y = 0; y < ny; y++) { + for (int x = 0; x < nx; x++) { + data[(b * 3 * n) + k * n + y * nx + x] = imgs->data[b].buf[3 * (y * nx + x) + k]; + } + } + } + } + } + ggml_backend_tensor_set(inp_raw, data, 0, ggml_nbytes(inp_raw)); + free(data); + } + if (ctx->has_minicpmv_projector) { + { + // inspired from siglip: + // -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit + // -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit/blob/d66538faeba44480d0bfaa42145eef26f9423199/modeling_siglip.py#L316 + struct ggml_tensor * positions = ggml_graph_get_tensor(gf, "positions"); + int* positions_data = (int*)malloc(ggml_nbytes(positions)); + int bucket_coords_h[70]; + int bucket_coords_w[70]; + for (int i = 0; i < pos_h; i++){ + bucket_coords_h[i] = std::floor(70.0*i/pos_h); + } + for (int i = 0; i < pos_w; i++){ + bucket_coords_w[i] = std::floor(70.0*i/pos_w); + } + for (int i = 0, id = 0; i < pos_h; i++){ + for (int j = 0; j < pos_w; j++){ + positions_data[id++] = bucket_coords_h[i]*70 + bucket_coords_w[j]; + } + } + ggml_backend_tensor_set(positions, positions_data, 0, ggml_nbytes(positions)); + free(positions_data); + } + + { + // inspired from resampler of Qwen-VL: + // -> https://huggingface.co/Qwen/Qwen-VL/tree/main + // -> https://huggingface.co/Qwen/Qwen-VL/blob/0547ed36a86561e2e42fecec8fd0c4f6953e33c4/visual.py#L23 + struct ggml_tensor * pos_embed = ggml_graph_get_tensor(gf, "pos_embed"); + int embed_dim = 4096; + if (ctx->minicpmv_version == 2) { + embed_dim = 4096; + } + else if (ctx->minicpmv_version == 3) { + embed_dim = 3584; + } + auto pos_embed_t = get_2d_sincos_pos_embed(embed_dim, std::make_pair(pos_w, pos_h)); + + float * pos_embed_data = (float *)malloc(ggml_nbytes(pos_embed)); + for(int i=0;ihas_class_embedding) { + struct ggml_tensor * embeddings = ggml_graph_get_tensor(gf, "embeddings"); + + void* zero_mem = malloc(ggml_nbytes(embeddings)); + memset(zero_mem, 0, ggml_nbytes(embeddings)); + ggml_backend_tensor_set(embeddings, zero_mem, 0, ggml_nbytes(embeddings)); + free(zero_mem); + } + } + + { + struct ggml_tensor * positions = ggml_graph_get_tensor(gf, "positions"); + + int* positions_data = (int*)malloc(ggml_nbytes(positions)); + for (int i = 0; i < num_positions; i++) { + positions_data[i] = i; + } + ggml_backend_tensor_set(positions, positions_data, 0, ggml_nbytes(positions)); + free(positions_data); + } + + { + struct ggml_tensor * patches = ggml_graph_get_tensor(gf, "patches"); + int* patches_data = (int*)malloc(ggml_nbytes(patches)); + for (int i = 0; i < num_patches; i++) { + patches_data[i] = i + 1; + } + ggml_backend_tensor_set(patches, patches_data, 0, ggml_nbytes(patches)); + free(patches_data); + } + } + + if (ggml_backend_is_cpu(ctx->backend)) { + ggml_backend_cpu_set_n_threads(ctx->backend, n_threads); + } + +#ifdef GGML_USE_METAL + if (ggml_backend_is_metal(ctx->backend)) { + ggml_backend_metal_set_n_cb(ctx->backend, n_threads); + } +#endif + + ggml_backend_graph_compute(ctx->backend, gf); + + // the last node is the embedding tensor + struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 1]; + + // copy the embeddings to the location passed by the user + ggml_backend_tensor_get(embeddings, vec, 0, ggml_nbytes(embeddings)); + + return true; +} + +bool clip_model_quantize(const char * fname_inp, const char * fname_out, const int itype) { + ggml_type type = GGML_TYPE_Q4_1; + + assert(itype < GGML_TYPE_COUNT); + type = static_cast(itype); + + auto * ctx_clip = clip_model_load(fname_inp, 2); + + const auto & ctx_src = ctx_clip->ctx_gguf; + const auto & ctx_data = ctx_clip->ctx_data; + + auto * ctx_out = gguf_init_empty(); + gguf_set_kv(ctx_out, ctx_src); + gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION); + gguf_set_val_u32(ctx_out, "general.file_type", itype); + + auto fout = std::ofstream(fname_out, std::ios::binary); + + const int n_tensors = gguf_get_n_tensors(ctx_src); + + for (int i = 0; i < n_tensors; ++i) { + const char * name = gguf_get_tensor_name(ctx_src, i); + struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name); + gguf_add_tensor(ctx_out, cur); + } + + const size_t meta_size = gguf_get_meta_size(ctx_out); + for (size_t i = 0; i < meta_size; ++i) { + fout.put(0); + } + + // regexes of tensor names to be quantized + const std::vector k_names = { + ".*weight", + }; + + std::vector work(512); + std::vector conv_buf(512); + size_t total_size_org = 0; + size_t total_size_new = 0; + + for (int i = 0; i < n_tensors; ++i) { + const std::string name = gguf_get_tensor_name(ctx_src, i); + struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name.c_str()); + + enum ggml_type new_type; + void * new_data; + size_t new_size; + + bool quantize = false; + for (const auto & s : k_names) { + if (std::regex_match(name, std::regex(s))) { + quantize = true; + break; + } + } + + // quantize only 2D tensors + quantize &= (ggml_n_dims(cur) == 2); + + if (quantize) { + new_type = type; + if (new_type >= GGML_TYPE_Q2_K && name.find("embd") != std::string::npos) { + new_type = GGML_TYPE_Q8_0; // ggml_get_rows needs non K type + // LOG_TEE("%s: quantizing %s to %s\n", __func__, name.c_str(), ggml_type_name(new_type)); + } + const size_t n_elms = ggml_nelements(cur); + float * f32_data; + + switch (cur->type) { + case GGML_TYPE_F32: + f32_data = (float *)cur->data; + break; + case GGML_TYPE_F16: + if (conv_buf.size() < n_elms) { + conv_buf.resize(n_elms); + } + for (size_t j = 0; j < n_elms; ++j) { + conv_buf[j] = ggml_fp16_to_fp32(((ggml_fp16_t *)cur->data)[j]); + } + f32_data = (float *)conv_buf.data(); + break; + default: + LOG_TEE("Please use an input file in f32 or f16\n"); + gguf_free(ctx_out); + return false; + } + + if (work.size() < n_elms * 4) { + work.resize(n_elms * 4); + } + new_data = work.data(); + + new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, n_elms/cur->ne[0], cur->ne[0], nullptr); + } else { + new_type = cur->type; + new_data = cur->data; + new_size = ggml_nbytes(cur); + } + const size_t orig_size = ggml_nbytes(cur); + total_size_org += orig_size; + total_size_new += new_size; + gguf_set_tensor_type(ctx_out, name.c_str(), new_type); + gguf_set_tensor_data(ctx_out, name.c_str(), new_data, new_size); + fout.write((const char *)new_data, new_size); + size_t pad = GGML_PAD(new_size, gguf_get_alignment(ctx_out)) - new_size; + for (size_t j = 0; j < pad; ++j) { + fout.put(0); + } + + LOG_TEE("%s: n_dims = %d | quantize=%d | size = %f MB -> %f MB\n", name.c_str(), ggml_n_dims(cur), quantize, + orig_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0); + } + + // go back to beginning of file and write the updated metadata + fout.seekp(0, std::ios::beg); + std::vector meta(meta_size); + gguf_get_meta_data(ctx_out, meta.data()); + fout.write((const char *)meta.data(), meta_size); + + fout.close(); + + clip_free(ctx_clip); + gguf_free(ctx_out); + + { + LOG_TEE("%s: original size = %8.2f MB\n", __func__, total_size_org / 1024.0 / 1024.0); + LOG_TEE("%s: quantized size = %8.2f MB\n", __func__, total_size_new / 1024.0 / 1024.0); + } + + return true; +} + +int clip_n_mmproj_embd(const struct clip_ctx * ctx) { + if (ctx->proj_type == PROJECTOR_TYPE_LDP) { + return ctx->vision_model.mm_model_block_1_block_2_1_b->ne[0]; + } + if (ctx->proj_type == PROJECTOR_TYPE_LDPV2) { + return ctx->vision_model.mm_model_peg_0_b->ne[0]; + } + if (ctx->proj_type == PROJECTOR_TYPE_MLP) { + return ctx->vision_model.mm_2_b->ne[0]; + } + if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) { + return ctx->vision_model.mm_3_b->ne[0]; + } + if (ctx->proj_type == PROJECTOR_TYPE_RESAMPLER) { + if (ctx->minicpmv_version == 2) { + return 4096; + } + else if (ctx->minicpmv_version == 3) { + return 3584; + } + } + + std::string proj_type = PROJECTOR_TYPE_NAMES[ctx->proj_type]; + throw std::runtime_error(format("%s: don't support projector with: %s currently\n", __func__, proj_type.c_str())); +} + +int clip_is_minicpmv(const struct clip_ctx * ctx) { + if (ctx->has_minicpmv_projector) { + return ctx->minicpmv_version; + } + return 0; +} diff --git a/examples/xgenmm/clip.h b/examples/xgenmm/clip.h new file mode 100644 index 000000000..f97faf310 --- /dev/null +++ b/examples/xgenmm/clip.h @@ -0,0 +1,98 @@ +/* +08/18/2024 - Yutong - The file is adpated from examples/llava/llava.h in the llama.cpp repository. +*/ + +#ifndef CLIP_H +#define CLIP_H + +#include +#include + +#ifdef LLAMA_SHARED +# if defined(_WIN32) && !defined(__MINGW32__) +# ifdef LLAMA_BUILD +# define CLIP_API __declspec(dllexport) +# else +# define CLIP_API __declspec(dllimport) +# endif +# else +# define CLIP_API __attribute__ ((visibility ("default"))) +# endif +#else +# define CLIP_API +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +struct clip_ctx; + +struct clip_image_size { + int width; + int height; +}; + +struct clip_image_u8_batch { + struct clip_image_u8 * data; + size_t size; +}; + +struct clip_image_f32_batch { + struct clip_image_f32 * data; + size_t size; +}; + +CLIP_API struct clip_ctx * clip_model_load (const char * fname, int verbosity); +CLIP_API struct clip_ctx * clip_model_load_cpu(const char * fname, int verbosity); + +CLIP_API void clip_free(struct clip_ctx * ctx); + +CLIP_API size_t clip_embd_nbytes(const struct clip_ctx * ctx); + +CLIP_API int32_t clip_image_size (const struct clip_ctx * ctx); +CLIP_API int32_t clip_patch_size (const struct clip_ctx * ctx); +CLIP_API int32_t clip_hidden_size(const struct clip_ctx * ctx); + +// TODO: should be enum, not string +CLIP_API const char * clip_patch_merge_type(const struct clip_ctx * ctx); + +CLIP_API const int32_t * clip_image_grid(const struct clip_ctx * ctx); + +CLIP_API int clip_n_patches (const struct clip_ctx * ctx); +CLIP_API int clip_n_mmproj_embd(const struct clip_ctx * ctx); + +CLIP_API int clip_uhd_num_image_embeds_col(struct clip_ctx * ctx_clip); +CLIP_API void clip_add_load_image_size(struct clip_ctx * ctx_clip, struct clip_image_size * load_image_size); + +CLIP_API struct clip_image_size * clip_image_size_init(); +CLIP_API struct clip_image_u8 * clip_image_u8_init (); +CLIP_API struct clip_image_f32 * clip_image_f32_init(); + +CLIP_API void clip_image_u8_free (struct clip_image_u8 * img); +CLIP_API void clip_image_f32_free(struct clip_image_f32 * img); +CLIP_API void clip_image_u8_batch_free (struct clip_image_u8_batch * batch); +CLIP_API void clip_image_f32_batch_free(struct clip_image_f32_batch * batch); + +CLIP_API bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img); + +/** interpret bytes as an image file with length bytes_length, and use the result to populate img */ +CLIP_API bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img); + +/** preprocess img and store the result in res_imgs, pad_to_square may be overridden to false depending on model configuration */ +CLIP_API bool clip_image_preprocess(struct clip_ctx * ctx, const struct clip_image_u8 * img, struct clip_image_f32_batch * res_imgs ); + +CLIP_API struct ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx); + +CLIP_API bool clip_image_encode (struct clip_ctx * ctx, int n_threads, struct clip_image_f32 * img, float * vec); +CLIP_API bool clip_image_batch_encode(struct clip_ctx * ctx, int n_threads, const struct clip_image_f32_batch * imgs, float * vec); + +CLIP_API bool clip_model_quantize(const char * fname_inp, const char * fname_out, int itype); + +CLIP_API int clip_is_minicpmv(const struct clip_ctx * ctx); + +#ifdef __cplusplus +} +#endif + +#endif // CLIP_H diff --git a/examples/xgenmm/debug.py b/examples/xgenmm/debug.py new file mode 100644 index 000000000..9a503a42c --- /dev/null +++ b/examples/xgenmm/debug.py @@ -0,0 +1,15 @@ +from torchvision.transforms import Resize +from torchvision.transforms import InterpolationMode +from PIL import Image +import numpy as np + +n_px = 384 +resize_func = Resize((n_px, n_px), interpolation=InterpolationMode.BICUBIC, antialias=True) + +img_dir = "./imgs" +image_path_1 = f'{img_dir}/image-1d100e9-1.jpg' +image_path_2 = f'{img_dir}/image-1d100e9.jpg' +image_1 = Image.open(image_path_1).convert('RGB') +image_2 = Image.open(image_path_2).convert('RGB') + +print(np.asarray(resize_func(image_2))[:5, :10, 0]) \ No newline at end of file diff --git a/examples/xgenmm/imgs/image-1d100e9-1.jpg b/examples/xgenmm/imgs/image-1d100e9-1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..590e926932279a9e1849768fddb62d90ff309899 GIT binary patch literal 53303 zcmbTdbx<8$^euSdUNpEvu;2v0KyWS)+}$m>1b26L*AUz#xCe)eySrO}-~=7M-h3ycueDddExm04Fr>w$!~rlc007MU0(e^md<7uF!y~}MAtE3k zAR!?lqu`*TeE5Juh=q-gLqbGGN4d!QN z=K%lL2@Dbv63Pb@0#sB2@F(I=;Q#IR)(yZ!g!u$)2f|PQU@>8Um@ses0J8V{goF8? z2Kc`l3@i`?2akY=g#6(>pb-NA3j+kgf`D*vAkcfX&--}*2onzL6PqwRw$e8Q3I`l; zKtdiOrATcLuJZI>Dt05sKqO>5d;&ruY8qNP`p+DkU%0q=c)yB@iAzXINvo);scUFz zX&alEnweWzS~)qpxVpJ}cm@TBgocGjL?$LBr=+H(XJqCV6c!bil$Mp()i*RYHMg|3 z_4f4-3=TnuM`mW{<`)*1mRGiScK7xV4v&scF0Za{Ztw0N{yqMO3kCrEU$EY{{|ngv z2N&i$E?5u<2txP|7YwZ1`wGMa!F^(b#}ZaT_~w930S-XK5lP6a?LnesSN@A@|3UWu4_M&;5wiaW*#C=b1%LvCd4G98OaKIMyS);vv=ZoktV0YVVVi6g z36JB#fnJ1zsOza{aS$nl<0O2Wt3rS&jN^G<&Yeb5@wE z%v_i@7#}s(b}3JF<-_a;NA8yB*nTe>mVbyxH#_0EIn{|@(YHdLWk_V(ujjiCSlx`B z%d~TqWy3l4Lq_uGG}tZKq8`liasJoZ)sYZ+E@;w^;_FwWiX5$)kSH!MWduP#)t+^;O1%AX+5;(ab_>hs3_gYw1izh(^rBH**`ei>6#9J;zio^ zgHCx54wSuzfdrAh@iGYGI)s<;l5setJg5ANTSOMfcs6KzTbf3q4g%he3t*1YOWR(# z@mDdP>q&F=Yx0n54m)Te7jMQUlgy{`b+4*W2zzWCL_W67yFeSzjvL7|6g^BJ5LNre#(>J#o#AG(Xt6261t@;*D++8o%QDvcOw7u;TM})HmOg z1I(`o%g?yd9c5Yt+-V8}cPyy2D5694gLiQj*_RPMbDVVuY=`~)BQofkFc)*x3UXM@ z5o|p01$uFnp@(6(s%1x=0nw_!uh#&TaN#ZDvhfk%ba|#!V zs%?!V=S)H_CV$;l`C;ci>Ec^}>xX)PEpLERq7wH5y)T>yG!ja2+9Pu7n$F2Ag1(VQ zWtJtKYb}`?D+(j3SAL|8wbuET#o{csR6K(wM(!n+3+GJ3gIYpEb7~I(sNMXLE6|-N zuhg@==5SCfL4yA)4iHkL$ykoYr^?nVy?1%!6`4QVCVk4<$3jT*27n$EFY@aCNx&_t zmp-EbtT_Et$w_`nCKSPEpdImTt>RX~f>Z6$wU^*|_L|qicAowOp--Z`;`mn$y^|6j z^nS8o`HBlquUZkidDv;DOID}PjuO~mOw2{Lt;3rPuL>mxUxE1+Y_BPjC`^TyBvgiO zUtQEY-j|93}a=BLDG3%u_v#nl)d$5)ej~n6{BIRW3VwdV@4m%3K!3e z1Fz@iq5lbzLsI?{hEYLe?~9r3&4%;a52^feE*n8n7a#U5$eK6{n( zIt*fgG!RS>^ZSn|mveM;eB$&!~ z!*-_4*HrxuQU@5kwP#AlC;ScmzpzNrhK@W3b&Y(hC2lgR3hbrvi9&?Oak_42s>X9Bm;_Ss zP@I?tsLI2YZ%UMt=RnqMZd2mNofuy}nJCrpRGKeYWW9&+UAmYvzi#AQEvxfG9Vhhe z`CEpW&b`okuChTpFcC3Z6vf=jwIM)gTj0CP?q>+)Z{u_ z#}C_(gWw5hXDK?`cbZb}6tG9Z%vi^u?%04hgrB98yE?O;#Mcto*$zoV#F;qHO9utw zYcLazt^AJYwkzW_lkvx2LlJ?TW837378g1?$lwPO*hGdwH{Q?4$ z7Wa=IG4*i?zfy#O%64Nr77M&-=SK84Efa~6Fl7DT03Pm;yo&rY_9#f-lP*Kz2Jagn zynZXB3%D z?0u?DO^lrbv>e1MT3P_k>CPlLj57gzN^{dW7P-KhDgwa<^GG_tv!&Ng?3@eF`^-#t zA^E-;U`4qUBViY(1B?R>-UBYgNGxywN)lGQ$^e?!8zot|!rV-D$!`*K)29+vAhH;o zp9`^b-P)+1IC8O}ptx_6jKoTc3Le(qky_dUF?jzamcM3<8?U=B_PKg&R*3xJ6H9~S z7Vf3~*~r0bR0u_Vq#6G+ow-x;@H4*@V%pf%PWe;|M^%Fr@bg1fuHLYapF_y z^}}JrK~7(3U*CphJSSOl^8I>K*f+SlZV*g<3HCFYRc_o?GV^e@p~X&PUH2rtPNl<> z`*VUV{|!yD6c+1c{Ff|(Hvm?-ewtsRz=3tR&0NE^7*^~Rx6<<|>FFRTNj<{&0+SSp z=y%;$A&rm@CwWb%Y+N?#`GnP&r_F5Ae*XyKE;2q@oVM;2dd1duM|LNc!H-GR)`zTo zQFa^Dvjtw!rRfddh^MdsUTF%kT%YU4OZA}3 z-SFx?&-k$I9~Ht@l1pfMxs&g6v%6o!hiT7%H;v_cQ^+ z(v!5laPp1;6DB%77M_@ua8lnlGzK%dT;6UY<;8{n%f8?M0cIa|vLkD^Kc5NlU#}O> zMQT^7$aBT7f%5jA*LoX`Se4E6RNod|cQU%|>};*07vOCv;sIPkPz3IwE>9D}Xh#p8 z`=Ur)nTC*mEuf&DXI>Qr;vgmbrLA3VomC}sRc(8i1?^{aMG5Ifl8zZJ%)08e{jR=h zp9%$YxXME5xFNCMpEW%EH~By8#5GOx1*%@i+;RlIp&BnHWLT zu`yfQCqgB1^4*07Z&SoMZxTC~==2|MWJ1d-k2|IM9W|vHX52PQ$f}{MtE;%}Hb`^c zY^BG4U~FO<%xS4ieDhPwwKF=6ybmkIjV#Q7C{+zF5|Eju%JVN)ALvYB#k55Hg(+Ka z1gxsN(2TQ<<3;`beh%i&9@0!qM};5{(o9kX&F0MKWn6%vE@YtPwX)IGU+MyX1r7GZ z%!P_l>F(n{-~!*1yLqXZN-XDXWKiyAEE?pU0G#H z4rXO9CQ!9FC9*0g9a(>&(r8OWLR3o&`F=xLWVseC$p=4fdV+Z%(ETzyr9|3WQ*iLd z@H9uW@v1t_x`+94Xv>e)N!u4Am42(E9$6F(oO_-9*Dt-4rQpe@6$`LoltOho`S1Dk zWA93E>Ply}?IFq_AWpM2=I8wst)Ht7>F){J zR{P~)suC~I_R1MtnCCiE-ticNhqe+zQJ zOqGwv!K67V93&J>_om zL*~BmC2=!v@ZS}!`T|dzYPS#zDFMtYj%|T^Yu+Qb-XTOJFg@c=2sh(EB`RD#7G#%26i=Hzz4?a^PsqZVR6i%OpX(Fr@XQE#$kP*+^Y_k1!wCVB z%@oWr;cNaKEGjWRu*fp=44U)J&nD}jXF|{FJ2MQKkU(xZ6715SXAzc5m%xd=hF2~Z z>YmV9bA7xkQeP*qQxY>RH=i<5t3gus;d}#tHG3EmRy*w;43u~^A{(BD3^5ClUr#pa zt9=Cvu>(}R1KIpHENl^vK9M8Q14>c_M_qIyQy66VpZCKr@}(Oq>#dtMuP6M(538N zu+B%S2`)oovI^`+Q3vke#&T22BO!Bf=?727Q~`R7T68vYefWpm5Pzj$Hg50-UL+89kbG$niBVx& z?+p;d$gwWbH)Hh}OtUA3MaDu4+y`ZwC*AHepWS*%ZfzD>G+jOymibgn#dbLKOllV- zswBB39OC`P?`WqSX>3n+z1sO)aE;q(`S(;M4X%nj*k$bM91>rhx!cbhtI2s=`5~Gv z!tIOu?e4QR)2(>Um&lWYlO3Tw;Kvjy9|VHCQ(;iT1K!G%*^}mbPG4z-^J;QXqQ~B= z9>jRhHJ)v+8R&Bh_X{&7q#1kiSDBNHt8#D- zRCLifiW9Q@NM%^_m0ZzpFfVP8sIUN|5`#qu*HPk7gZyu_P>{fGb7>50+}O3o&foJ4 z%;9D*nHF(om^Nag$oGAk`P&c0{fo0H0wQ17ACNaTxMNwdZZ{JBsJ+zNF)ZX=8=-3q+i)bh7Lu%h~KCkwdkVOb9L#f*vEU+h6LI z*5D#~R9afUhaNT;L$ueRhdRGMBS3kGiGHp7HUh6u;qZzW@A9&-6SDW5ULo35ggwM+ zQQjYQ1I5<@`~kG54=+~bf0v-NtvE22XuQ6-^I{ZH+~ED)7-0Ez_?55mAE{m3;h}{# z0j8$LWrCg zRzg8~{6Wu|6CL4%*V5E1CIUdb>-c!T=Ta1F%z;igK?p-`%qVUOLaj{VK#>xT@i{q<+5Ni&H$I6vp6oCR#&3={`vQbR zFr{q9<+HxOTj-Tcv$8Ks2C)eUc7>>EGdt*3wyE*3IN9ls9Pi1+lqACL?G)BFI~o0w zlzypcm0?-qRH%h+Hl3JW@IR(1@oD0a!!4WYieEiKxD{br1o1( zp$q9|i22E<6}$xFQ8W$mySr2A=Sm#i={TS2aUUzimMDn3#F}5uym~LaIVw9iR5*ON z8}1E|CU1G)oQ5I!YXy$f0zfCWfpqq7wjZx&9?30mz;P>@L0ZAqW6!9QU^T_@mbYAmFQM3lb zF%e<``T{i;S31piiqgZ>V*jAg#AyChH%NXB)<~%L$nXoy3CIzUC3$*iPqi(1#LyhoTF4V#g&%J# zt7r@wmvz{Ts8o9dZ#`F>aIaTJvFS|L2(C&NPvuU_I6CJN-0a=^BNZrfmP^e(Q`jqv zh@eT&ew`+Sn?f<63w*P6bUjUV&&kEMjTnzcNA0TDf5 zVm7`~nDjsKRULIr#G2kcJC{{~>wZnl3(QP(u#i#ROiU7%@nBwm3Qc`x-* z!o18lLUiZHi<`NRO7M2Z?IM~g%H5?WRiaH;P=tn8cAv%E89@bg7><0-`+RqU8kgRZA`A zo}+kpY>777xKrF#Hyg!i&9$hXEuV~fkFlujGC}4pun_SKjhly?#F|r_A-JRUUciC&}Pg#^KW z3z|T%a$F_ft0!f37@+;hz+i+Cj3JTU2T~>V>IkLHp>njxftBRh&AIuf=mee$BnN8< zX2mcc&648~=q7XzC^9Fd7~EtuB^H+UCcS!1;S7qCiyHEPhqKX7<)Ip;%R^bVJvyap zi2SN}js$iXSeo2D+`F8dZ7G4Q1Cl^MjjZ2ec>$m3zROVDKdvSq*Ben>F6z0wKUCH?XA!lfogB|{hh-KFyGyWy^3 zk!4BAP;6a`Ay6h{bss)*KZg*tR&<|@oO&_JdI<==0UoHz?Yrko*MCGC(xI?T9zLkC z-O>n^J#w@Nj$`2z6v8YSCZg0IZMV#+xhdHYqZYI-28=@BR&MH zgYN@wq)b*SNnZHYD@~oP?`Kn~x@l9>37`TE*wy&l~T=*zTJe$nQ_!-t4px*J(ufNJq?D514b*TRZmZTTP&9Uz(!DjNnn&Nt= zs2lhvlV*L5K1byz6!}^~@JM`w5c?M}4<|>puHH<-Pyzre`Rp~*DX}n6X6pQ#$kw-d zGhtQN`|G9bS+LhfQ32`Pe}34yuLV0X%FdQ7&%(~rN&deM51!%KZaK3voeKj$Ig0Eu z+9Ot;S_4b^aO~myKp+&e|06P=oHo(#C+DX2Gv}Nn{8!`eyxEC+ie){7lbizzVho;I zq_#==&`sunkM5X*+}w|^WV;$FeZTP< z$T_grx@`UoL1rq}B^WC-$}Xk4vdm*8@(k%bFupaTPG|;cr~W2Nbd+V)9UC9M+UADD zGAvT~2u|&cibfzFY2o_mH?muE36aCnrNoI6b`Kp(&53?i;Xj$}CaT=REKWw~?Dt0> zce?^-K8oNJV}r`hCi5CJ75={AD!jeL#ZZ-{i+0SwpNsiU%vO^xOyln19IF8m*}(6egr9+#pa7<-f?FCosU*D}$Jn zwFS0p7;AM?jtINawQVnHC9!lP-QKX}s*~$LZcpuwcX6#<4!p4%4H!!Y{w%!$0J`H95#nuxtL|S}b z;`TZtB-Rv40k(wa@G|EmtdpycS{eN&TOr(gzA2O5*d5Ddqu-_aYyqx&z!!#6?)pnm zxck+)JS%VX8{qa>#{Ao{^YV<&5w8jVOEM419@YEN3TXnPU>F=pe?J$YLK4e=10Ee$ zUiCA3k;~|Nw;X2aId&{9o8X;Q_Jh@w+z1GE-->kBc*t*vJJ`#voW!6@1u1a)GVt@z5rz#0@hg2r~01T$MB^hb#Zbu0v zC02h=sD>H_c(06&lj4jws%wWh4~8ql?HEZ|sqsIm2946&rax-rcLY&+s;WmusAi?^ zMmfS9xpVHhE(S?mij?X4%M-zI@ zuR&t*DB#!inVX2(CHbXPvH>@two7{?OjhKe1xB#_}N$zT)r7 ztRj%wJ45Qubj45mQnW=h8a~xTH@eW!dq`kX1fqSeL+m5(6Ur?h0a9h;op0}7cA+Ea zHjZ}uT!BIa$Q+LsKiNk6$W5LdSMv3h6UaP_xFSp+R; ztheEgn=ZxHlMIoz_+bK4$Zt-#X_|{rO?N&oJ6lHt8p8lqNlI42TBq>@fcMZ^+E zvC`=jlmGEquVKO{L}ZO&`(Y1w0?|Hp&~^;DZ!Orqw>~{wr-##UB8Mvay!bfib%^CR zLlF!K6uoi|S6W!mJQ7we+}B239SKlkTAqf;+~OA6b+WR2J`eJkJJqTp&c|xYqRW>& zA9Q4%t5lrlWK)z)$8jk$eclo)vtk`i9A|Z*vKLfZZPaf|maH6au_5TaHSpXuT*T1R zU6Y-G-Is=YT7Lte7{^s<3ex!F=ww;07xJm43@Vh6oNA|AHK}V8%vMNApCwsgN~hU3 zpkub^t$v7=ydCm(xy6;0oG?u- zruo$6J?Cg*e~r`kfkO4*L|j_Nwgeol!4S3SRC4*q^sX9wcR?{9HLP?<1TsU?Yf$C^ z*Pk-%%`D~IMQ%+0GTE_Be z|I0k1jc@TLJRrkBUSk2i#rmj{d0)I^qQF}43V7fXiTeoIEPRJNoOOzr@O4PfsIdd- z8z4;n_QVg}t{|(sZ7|K7gfgWbhR%K<%q|abP?It$8vR+CE$YBkLSy?wip2_r{9g~y zUXFlu$z0BoHfu`@28t;a!hy;O-_H`y>vsHCXxJEb@ZblCp>L-guT0rFGZ4cxCRBFp z;dB;Q_QFt{gQlQm;b8}k{xpi~-}lp+3BFkzPYA{q&vh>)m|7( z(LCFHEE~IINSHm*y)WyJ(Iq^kI-bb8_?=$lBjtESM1{tVaqb*VEG|rxsT*fyT&lj8 z8}a?|yG*^!;(idXLlt=#3NrMi$lzhCn@B*Sjyw0amlTlLpAfJ-wJWBY(Ov5$H0fi= z-o3<~j4H9<8OoavD4SGMN!qld5zk?mi_RCx(?!i4%09cfFTW?sY4mWd(^=J%MC=*a zGC$RWt6Gn}VBR1{+`<)t*(sD+E<%kmmxUF!jXA!lA(``u%a29!Dbqbvky)fwoB@ju zH6)tDRt@LNr-Fx_9H|o&v*CKP)iZp=9@7g*{S)P6Q_n|0(FT;LLkN-jGvZN5@~ecK zsJBLKx9dSN2NI0JQ+L#UkAoevLo4QY29b7E{=J1`DzRUh#~?p!j_%R7-vER<88E_j zZneK0ztj$0Q8fB;SbA}OWD-Ug<)`@~5JjsnwJ6ORShxSjo|J4ZOJ?!r>6Xevg)+y} z-{XbdKx#c%reKe0_r@&P#7>I6eXGA1k8lG%zSqFB=fojw^D&jt-Nw#w4PL6Hc~PHe50y(0vghEb*y;(g zJq$6fAqYQbiuurVV!_eFomD%@SBM|OA&UVr>`4qkjFVSo9|(5I^w%Pz_X(1y>uh;I zG5v>K%`-)jdX%X8JlEZu zH)g)+_Sol{5IF-WH@+mqE{v+cdQz!yx;VV)+S0e}cjXJ8t2PTZNaj zQszmNgRUHKbzL05%Dp0?8`PpP-NY5)_s!0Y5RNv+1mqQWS(Bv-F9C}8{Yw%Hd7S^V ziP8vZc2}}luDf$2=-Q%9c)ry6w*DgyA%f0HDMsziU~6I~HC_?bm5hK12bmtAB!<&1 zt#3*9pDiI5Fr7(<15ozmXvoO@z+KP?8)*vfr95N4@NQ%5EMpgAMb?(4gR%NpQQ^g4 z-rAP&DU>bMjT`W;&MrJ&4MLiL7Odj-{Y<2r399@71;8m<&+9= z59XJis>G3+r8w^E(lUyV$2tY znJQm`lL;92%Cn)0R!0bcL7gS^Qi$EFN{oOwfe7x^EHheZNQE%G>~g#-i$8_qmBN69d?k%&80LL$RLQlCD06KLFrbCzxthMpWfKe8U@H+DVcpG@wQFma3P zH$WayMRr|jTm5kEOr|of)>$%a=jC|8_nlrdM4Xe;_bdxaN^qo z%p~Lp00unL`t=AoGOC#{qbeoX+R?KR5NeJu^b?BZ23^qX}$viR^9lsDng~+(d;8*)X0V+L8yg#tc}x&DOZ@dzYH2 z_n3uZd&Zp+dw&rgW>)#6IEwiRR^ug4#6*t23H*zaS{}{g5K`szSxzq}r~==_?)37dz6Z@SMeX~e z;Z6sQx#mSCm!(7opDv57C4lK!ok*hL8Q_JpJs{>`{MP5A|U38t2J5n6Y)J^Sgg zp1oVd-h3RBcogKrh3~-bM;AtbC9b@JzVyw-LMQtGXvIDC2mo=%Qgnd60jM;hEs|d1 z1~8%M>yF*K)rgQV`Nls6m{~-jS#AVWx;pV}bGsnl(4Q)TVS1L%w1G6_6$JCYye=0v zD$|B$TIH}6tz)nz%yQ_Wg~|iP4}i@A5TYNP73^#g`|Xj&P{Zwj*PkIC^~G~P;uz8sKK81kV*nkP2D(0tfDf+(};23;Cn=gbuU4dwp>1mDC1E{JkVy(pL5o+&huc? zh^V0v?t1uDG3Q1&;N(j?+c5gki>K12qe-bX+vG5((r>V>Vi;u-7ut^9Ex(i^1fv@i z=;OXC5M$^by0S4ckfoMAf*)R)YlP8kGd>^Z+pQ+tNL8(yM`m{3$kT#FutC0vExp6Mw@b7@7FkELt7T$rF0BgO> z2jfeED(mS(Yv^vC)&SqB(_DD>Aw|dO4nK?w3GZ=>7Nte{F87MlbBxu4wR+l_-#0Ih ztT7&m#6QwKLi_1HJ_7y)JJc;{eSg=C)0y$Joly{9-7D$EW-AjKHO}b2XwG!)IQMF{o7_y@@t-BOG=5NiQvxV^E+R( zkzcQL@d`(T$xn&jb0$yFPdxEhkNzQsYs*DRNfp(C0|HJS2&M>!SfBi%Vujjbd1#uh z3lrz`8wawjMh@F1qXe@`sinum40jI?=yV)@BQ%cH+;Uir-AIvZJE$6OJnIWVipC?y zj{BQyg6Lj>`2~^uhEofjllQOFKSjzsb}-YA?i7-E`|=jHct;rSz02%_&dGs&P7-v< zdDZ7Sx?z&O(s#3QkdAnb;nm0VAbP!83^$&OxZK;*mCkz6v?qgYSoALHbaKP%r>6I> zdq|G{JNLPa@<+*Va2el$P}pClL5Cbj%6raO zr_H`50mzi>`P@3RL6)ZLprTC?5i*fisyi{3-CL@p)~R{_2hMz)(|nplO049g)#DwV z<&50+uyK=n^dK{Hh^83-xrSmxLY=gg9>6E^w&zvxq|l=9*A&v# z1|>NL?E~e17+S+tvPSfhM$^4oU9buyype%B)*iX^5%<89LOPQI!vH;)5zs$7 zNe|>E5RN;Pa`?7=O7{b`e3)hiz^E5MS?v^{LhA8bRZhC$xY|+xSs$SjL8G-Hscq8CXc?4HR3N!OIJN!err*aqp*6+9gXiGA2)`CWGWKgD|);}QdSI~x)a z%xEP0h~vRvfo@2V{4PcLa4Xu4mUOe69-0~9GAC@nQQ`Ln_Qfew=`}~_sX}L@$oF8k{2^!zhYqR&DKgGSKGg=1 zj{{U2r+%knQ;FvrxN8$U+;HLDkUeEFAKY#l_IPe8U}P@Q#8QL#h%8w;Hk6aDRS08{ zE6k<<828%fqwPS8Jfr>I{xn#D_3vqpYq!rt!GG7z=s`Z2@3AAwDHGj{xvi1RDTzn?= z_8e?7EHIkWOy|$7m;03)R24}x>H-HatVbUT*&Om^dyD?Wt7IXF-&QSPD-aD*_CF_Gqt}PBKbM00Qiy#h*tdl)N}T=l-_ME7M8>bOBTF3U10TJ< zCxnjtRX|~XKH2(mJw%x1Vd&2a+S;j&O%qR?b6n*a4i+gFN2KIaSnQO#^9Uz3o!vtD zCWNc2-}eD0l>CFElr4RjDzc;z3^4$Kr(`U|=x<}VKcP&DUaOH)^kL0H;RrAhhqQHz z>^gFkHWgC_gP|87F(0fkhe1v6s>yBr9w*e8y3{#Nqg)p(`C9bZ%&W~P{ZG@4F0*GGr@?J4 z&5V;A=5*x_SBI_8$wo`iG?rzKhwa{&YB`NKK%On`^kSnjwxzsj`d=-dXzh8hM;+fU z4a$IkNde*R!%nTquUhl!DU)Tx4A)c6o6CMdG_fMlmGC}JYZ^}PMZRVCSRcW!0WNp| z+K0u2M{HfJK#j=RnHnAG`GyxsZW^Aitdr!xphu!`J}LGtlCP`VC1|IYM?QG;9>oO} zxz0T5gvxwDvo|Fg`M#EPOJF+qNIHzx!u)rdWfn>A?<9tk3{akN`>!&DV=_fCS%j0Z z6aA#9*?CmJKmG7uz3Avn_TA{jK28pt>45CO=DV*dsZ-%2cgMoJ#!Yl~-_+neq($=~!JOzi0i3=<}am0fX%oGz%=!;qvJ z4Q2i=5uE;8EO`E0*Vr|;hp&X>9X^&&5FN7&q+JZscluZ^x}6UCEDqQsx?u@shuNW( zjmunrbQVCrson@Xm@gPr8mc$XeFOB$9(y@i!Zmi*bxSf21qH?WJE#?3!tI_jmFtKj z_5M>jw;G$PnmS#qZHwkOq~(uykim`3#_Z_Ky;a;w8ajQLHlHTeosg^UpS8wJujSld zJ9MM<9eY(1*GbJ}Ltf!8(xA>ZK~ta`1J&?WU77B@-U_p1ARO&x?~RN+kG;iX@U`q(Arr>ia{C(TnVZrhI@BW;p#qbBcTI z)c*Nr$EYW0#8-$a{?FuMiL@w+(Z_g3X?c}TBabeG_d98gj@lM7k@{wG6+S0^<|b-#Ef32+N8*|CoBxPbA(gpdB# zRFy*tWji_eNu%v96Kt!S@V`UmO|C1QD-tF%;E~<~4w*!FWIcBBMt@06A$GCV92Pt-uT_sN7cjNF{FYW4G8U zBdAQi0rVU4(x7IHPObJ*{6tqL1I_|GD!`TtXx&8_^^HbW<1TfeR&`JsrPX1O;>L*v z_F$m8aSI#>O$v9lj=zyvi`KX(OGSrgUz~5G6;W#IzdF&S)^S;cZa0aKG|LJf7Uk>eO%(|5gJx3$N6%QO@l|&*gE!I^Lgn6{XWdsGxFe8iEiFD z_)irWsxEHOrb+6cHyL$HJKZ6Hy65V^$C*T>j5&Wj_xP^<*6_K3MeYogdWO z=9)zFgH6jQ!9N)q+!3>M108CHEiTd*PpRg0&Yr(89%o`M`Sm;Ud_$2o;-0~Ea<*%x ztzwgirha>M9isT|9;cEWprWMH#1NNIJDzjU%3O#lSy=-XTS+m>IvQt%M*DS6cl|IW z_$1k!1zoNmp-7atDH$b7O%g2|`LXl!#N`d}q53%vTgFxjerLIbGP(@pn=3afDPEJdB$dEq<@=CYQ5m3@4GU zC4)x(F06_J&+RJN#ysVZ0Rb`B}}g(r!bS$}gnF zf#F$2+Tp2nKznjWVSal*?aH+v_3zCbwgu$CXL3ix+cTk&zE7k701fN*+zd;kdGFe;jUs3{@m+xO)zVXxM zul|^2oXNhTN&{Zu=2OQUm}xN+pFc&Sp|L$G?NgOg#ZtuT(2kKOBAsP2$|M;Z&fa)Q zn1=SQBcs$}hLXR?X+3#>Bv8QF(E%~<^GL|o*20MCk_akVo@fmB)69d$xQ~|F)-WXf z&G(<`u)w%DkL-TZ%x#&{poc!`oCj^OvW!BL zwPKuJE&(9XwVv?+QRpw}a~*3(H%2z>{|h=n#lC>UJTdCs`qnx-DMpNxWjT)rq3*ub zDrqy>r;;~K^UBhDTc`P+yFR5Out6w5N!q*c$B$m8j>58xl1{DyJS1?E91?#I@v2%? z8ert_Y;aXJdZnR`;5W@L{%IY!ZNC9K`aieFy;0IdY@atWlyNWeHA^u!}{>Hh%Ms3cs2Rx^>*^Xe)$8KDWHv=Sszv~oKI zoq~WyRa+mXdU|H8+uK4UDI&RGaxlbXe>2m)Rgp@uBN_J@AD4QwZzPgHvJjtWjC{Zz z5BO)VJu9CVL)ya2eus8li~7A=%V)o*`~zKKjh&pKnUp&3;zJuuLs!9`kz%SbrrU!U3+SmT3y64jlqzN z6M#)*_!iC0Y1|ajDf)1IUm-!>roTkJeOj5l&1lii3juJj{T}+)FZIGu(`we zkV*(p26pGrbI{{GYU2)5jN|FEvCq9+BIvMpX^T8zW zU58bU<{(2706iFU^fg1wg=AdZ#C=3yyA$|vT!)4}C1?rZ%Ud0EIJmM>6de3p&?>Z{cAT3N>Wxjbb@Il-yOZ&ryLT3Fn>ChEhg=llMS*H z#$H@HP2r7yTqE7Mb5K& z>c<;Ig-7XG8zk(XrYV1TrAc54^dycc*jFQz$s-n#%eQjGu_y7T2px)m56o~lt9I6M zUP*}Io_8mtF|%j#71Zf|2JsEhUnfPLH9a2Wh`{^_&*?y}anf5f4xof~!PyRgUy1z3NwdJ$EljiU13?9T)*V#lo+Fk0tp}$E9(1TOPSUFHY4f2zm4r`eE1be@M zwJ+^Y30X+Zx=5GEf2#05Ojk*o^3R0&kC$<%X;Lr+fSm?=jvMsBuTsr-(v{$k&NE6L z!xcDmz1Pe$OG!t1(DTIdhcrT?~AcwZ7Og% zDI(|b2m01E{sY_L`Eui<=YPoaIadzAX8`r6x{97}yvG;>0OV)xr|;iv{J;G5!9S_+IxN##z5O|LvK(E06`p{mC^VHac|?jGzSEI zqhTYV&PVmG$owe$?;RdoOxK(I&r0zGsSk=S6p%8=*zR%#T1E~emH{4BNh}B@Mn0as zy4GimaTkl2{{RzP#@^T&6%%THT$@k;0m0e;$ol&G)|BAp?2jH*BMG$2RxtguQ4vhU zvmD7OW5SMq55l?_StPg?RxuALM&q8~A719LbWKOkxqCT5B1La4$Qy~rLVNMvx9!r} zDBa|kMe^ZDI6vpLD)DiAyB(NzoO!Qu&HBP&Xc(AGiGmIeJ$|OKr_^mU=#8zaDPn$L z4oUjA^c6m(XJ|BOTwWEK2biwylGyE!p~Z0qRwE!|>sN=P&Ykw`O?v!~&$-Y}^31B& z6CiBiVN;9`~ex|I)eLfWu zEJw!e$0Yp^Z>Z+Abf(`ewEK=XRc~{8K+sw=O&d6ocNJ$C1fGC&_svTM=!ygdnbp2s zfj1Cw{opv~ipULg5V~C<3~&}TAO-g)rxhG_(i{NH7da{k12q)r%1ZLkC0@?aGBxXk zi&Qa$kUK&Pd5RQ+&tb>oT_24P+I|{XTyjc?$s^rJ{*}(_pxR$rv4qPc@v<)CoDfI& z*F&qT&+v9^F2w`hHaS^>n+_t{NbSun-i?`&T#}PxPu6DsHrC)DfMr$PZj#{{Z#to-$m=9o@K4 zVs#t042+JL{{R~1r%6vTVJ)x#ZEAUoM8HU^VoE)EiYJ(J5;us*yGP4N6rA;I{Nkf zD6NBBywTj8E@647RMB^Sx*h16H*B0SBRmdBAJ6I5p}16#;y_LZ$l&!JhPkV0&6R-@ z?Vf1P=@uz>ZBlYwmKmdS(04#XV{{XAf zoo{}o<)bc-aI2O;LC4|Sim2DE1@4hw4N9%Km+5^vn8(BDvO?)Jkv*yN2_O>0;~e(s zS~|i@sM<#ka;=97GrRu)t@kn##uGCvQd zDMGB}E4DR@s-V(KdMX70P^Hwemb;9QZi6w?;VbMpvzGX;Y*( zuPU*`cU!W1WP{VUT94ypu1=G90Kr)!C-PzX8uTc=(Zi3A!!IMqZo`<+F$9jlbgx|a zR^Qq_4Dk+@PIo5Y5Bo?*^8&n%W0>0*$UKqQSEYOsyV2YbLroYPL}%+R)-qTkLDvR>qERvi9ZoaY=7TGqZB@d8GIICSfF zQNv0^j!&sLO~-#9#U<3}B=P5l(-f<5FAvKGBiMf3(V@qDcP$FX`5ww}C4S zH`MbqzIX``vOuhGWQ=^kW2SST!lk#f)ocnAW}whT+~Dbtmf^ zWBFHHUE-O<)uiHhj4=7}%AP@$fGh}br3Z2kL(`{mSZ*-K&>lXOw8^}(gx-1t2a#1vwF55NFj>%^MtQE^PP~&#@qU@8y|uNfw2@3B zw?8VWIQ1lOc?9*Y4mK#t-EebVF7Wm?@f|(x+>DIzjAI~R_n=3#c;?^B z)gqa3ws>~rjDl4E0D-BzHFC3Pvc|*@B3?-x0CC!xu1Ymu18EwdIU!?aJxC0@PwFct z!?%*@g6z!^G0LC-Nc+R3N~I~kbq=L?Q-$X2b`K)4a~#sNV2_e9fB`uCdUmYa`{*s> zX(5QqI&zW8Ck)>HpIXSj*Cq(5Eaef70!E}{{RdyIPbk6w#dk4^E!dI>#?=Sc)cz*08MBc2$>E6O=xd_a^l@&c z_61_O{pdXBJ^ujDdgq%P21z&^;B{K_u@qe?xU=+Jn;AzJg^fx|-5Turk(`i8I3(e? z=zAJb&jjIHhW`L%sjP93K{#9!>OE-*&UXSvFvrw-)+eiTQ%BUBP1F`*e{|<$$T|;v zSD^w5hEfP4AdZArmS-U6(`#d#j>fw`3uof_vkdGlC9C&rhW;a(dwXE7w0h8b=u(y{QiXfJf65 z{Pxe{DG#{^`O*;H{+^VW8R>)8oPBY})|;L>(*eVgfPXq~T<|Gz$Gs;5j2~a8<3J0H zoDRN((@5tBw-li9*RQQ1KKS}}pa>;gj+ytWsVZ5_W-7-ZD;4g(44geP=uEQ~`TJa_y)wdP{z+4G~^%c{a_l1pBP zu}y2>JtzJ@br>aa#wNNl6aN4KQ;*WA&G9S2x{!($z>wA160%wSIDcE$!yFaQUjt|IEe z?e$44)>x(^1UzmT7!VH!q3ikaTh_WfQO3=6brD~@V^T5v>km@6GhO+ib|?W<0gM4a zTw%0yjJxGVamQK&KpbpmILP4O)Y0EK;lLpG>0Pdo;7<^1HgMg+WfWHPF6iaClrC4- z9cT-jD2ezYAUkx%YEf$|b!-m3_^(*C_(62Q8|`K&f$A;cEPpe_YQyl?!d4&~+q;XR zIU-5IgZL=V>qXF2frivRl7|tYq>VsSlryP;0WSoAD%1SZLGW>r2uTS zcp18f5Em!$Z|7FeiWL`St)M_S0fiR z{(1c?e$(LwsVD~bRkVP25Jt{N@)g!y>-w$?k-Tdd>J~mg`kud0(xH7%?Km%dOLs<- zpZ>j9vX@f{V=1M0rpgg~Ebz^e`I?rcb8(JN=~gxThd-@pTIl`=(_{&0q`^G6%AY98 zPCeM;r!|qOUBzbuT*oj_IVx8w$J5_EDYnA!L_1dNBW&pp@{Zwr%zS-2gR*DbzcTr>X5El)wCS{da?;szT{{RTj z^8U5-K9>@Bhrw66RGhJnOMZK2mHlhyxYZJOx4n6zRNRC_0d4-{xXAoFRmwJQCiE(} zxYS#s$@Hm5Dlyb{s3L9gD;^0P;-!zNU&gqehu>0~no?UcpzyAsx<`xj`+)h0oGax2 z01FHs)Yre`9A~9`&Rm!js^OzJ3=3MnabT_?(g6lRRj0Yw~9OarGBQc}dM?9|`Yp!S^xsc4PNIi%-AK~<=mew>}g(be63UsR~aIjUN>dxmc28d(@ zhzO&(7|E+~Xwk*Ym~+`zr~d$6wkM6<9|}uj!?PPT}v&w;JM*OXtzV`8LWs}O%P{uSsy2JU>X83@T3p4LUjsTgnRP3&y7vC`Tg zFiUNM35n40(66`QS;7%)$t2+?wUSRPpmDqFj-%*$R?7ivuG~lWRc>Tc&p>m}>rum| zS;VLs2ULZ;&0+zxr~-nr>k_E${xF*r4d} zQyTzSGO-yWJu&$4is(|p7M14mCX}(`D5w|nIq6I$0HmdqL%A-+xgU-zWr~VgoNp8JT27ZpDyA+dSo0{a>U_GK^Z3?^X;+Vo#*z2Su+QWw%II#u2`)hAkc<8`jH=15 zTWECC!O*8JcP8KYY-gmALS%AhoX9Xkw$&d+9XS;()}t)Q3&#KhoNdi&N26K;nGBL( z=a#^$G1=P0KX-2!_ai6stXkIW=~Aa3h9@B5ot|QW_o76D{)867_Ik^JkLQ;6Rx2qhsPrDaJvH zbYty;Kc#gZN;}^iL-rpT>I5!4##uoi5r$rP`iktl4{vAx00_mb(L$>s1W5PFuE0XK+aTTs*IXv*`fgmT$ITf zKEKYaETkzQ0CR(ZjMC6GA`2_A#7aPH9FxfZ06nS63vI)I8$9E`C%0}X=2l_!q}<(e zPeE#V_ry&t+fcH*3d9(h7y;CdN7L(CUk?Gj@Q$gaN~9He6e;Qz)c*ilUmagfX$6#` zE$}O_k%-PS&{BLo)UBk}FD-;G1;fN7WMiD|Kj42V>!kg|j;ggo#MwNn;~b1o2HHsK zJ!>;lK(}lIOhFtO(`%kM$o{>@HP-mgOMN@Vm+-7-Xr0R79Bn5*$k#8as(De$2+cl~Ak-{{wC+^LaPpn;sOmhi!Oy8YS50H4>AExwG|NN` zzbh0Wq+{++Bafh~E+wDKaU{0nV6rI$lju${`B!B;B3j7uYi4qKQ&#-RE$0@RY&t}D zPS9JSDHFC&QhIefp8YAI=AJA_uLE?*a9bZ-U{iu9W?-rd9Fhkl{b}H199L3`in2$U zDw3PLoyJC+6;v}41~SJaijWVXrN_N4+zgM>nKl_DYbGuPU%JWZ#4rpR4Hq|FB;fs#}F{*}p$qwOi(ZjYkj%+Kn0r3t@sdOs)8 zoLL_(0VIKyKBABgFfu?H9_!woNg;NgoR3fQ{{YvfH(Y~}oR8`&!aq|GHr}HlMh|SM zt2TFXSl!2S899l$J#s&@i`%D~dj|)SIXx)J9D|YXLC0`K80(JR#cX+k6Or|#>x|LYllYomgO8^a2vdLw z&tGaxV07#0NuKwDoe+byLdf1d*pG7gI&@i*X*s{0~$^kY06e+4*Vlz0hw;^ zNaUF%J5S&+1s1c0gxg4r@~H(#Ld2;5065KS!(}Ab?|haHC6XZvypmuQAMEt+*1Q_L z(36Wk!i=F-nw)x)Y1SScNY*DIhk;x~t4&^$?V4Wwo}Mhh${ zlZ93tKLQA^X|{mcZnhA`0DuCC2*QAQAHTRgRbZp`-6p;E4RZ41z?v4PX*q~82re)NcLB6>+mYMWycWy{FA2ybWqko3{c82U zhf=<$;9X}&a!i)(=fV$Bo`0JDwW?_}*_@MV$mpL?j1(6$$-5wtB!`peJ$d~qmCmDY zZe}Jik_PezDi0rqYHc3m%`cKbC8Gr!13AVz_NpEtvGTP`Ymgtyk~UdECm9&peNSG! zGgm5{wK+du(Bk$D)91h8`kGeK&3kU<6^uu_X%apOFH0} zO5`dqHZe6FD)O@9+V}gBv>-Fhc(Rf=;g>r4vcdKwu;o7r2ORL=JR<`!` zl86MPshb>uo`bN+InP{ls;5fSppB-FS$CHi3K)P+I%honO=jGl?C6C!Lq!|2Ls!tO z)$Qge%CWXXE0Pz~IkmcwI-yxn9{m(gXno(tL0sCNe4WPRfRYpo|PDz%!UV& z?H*M=QrqmqBzVX<=tg+QQBrjJR~1ERA8VA>_Ly|PmrwF6!MXRu2m#0OcgG^VQ{hIX z=IM8ubFd~C5}@=}>-viFU~)qqgOiHt{0DxTUyWcvCSbVC1fG~~SM?a8l&v&YZA05( zXBE2De7~l9kfM-#@M%SQ2j_x{IHVMJpatTJLqz}`sL!&eRM&u)?CJ!p_XTT)3z|HTj+kAk0w`a!4cVgHYoKClx#G@*~*1<+mJr{{V$o zC-Sd3pQs`$kGj1*DVJvw!s0YW^3>&9C9pp6^gMK@&&rO24V<2p9oo5u-eR~{LIK7B z!6T(~(SGUidE9M^Z@Ndh{6I-BJT;0qC9h00?m0AT+Bg)!x|fU%Noma83&hk@(dH@eZSKNS6oEP-c^++96}DfePn4A9pzHJN+xnZf-TJxndUfw{D)kI=R=at30b6>%#xW72_S+$TD|bTcK*`5h4|kRwZd0sL|Rq~Qs$S^ts!Ry|%jFVb37wvtb_KL@v zc%BclYnqfBaALV|20z+A<68d!4K9Ao<9#9W0BKBT{sg2^@gmpA@wTv_U^L!fFx{9z6}M2x%%USp6p9zrSs!bL0||OIXnUVDkr|TPCU?0qKwim<&+V`phk$Jwg9ON znH@>ZXZ@_aY|+Cb06^)C;{QJ^Wc?lAc_uTaUll)E?!!GQGGf^AyU#IZ=Kz8+nmIz|nYxTRp<%hs6c6*oBHc$MhBRVG88`$1SQ|@tOz~w1}n7j5$;|#MAZB#;r&Fg zIYm<6?;!r7srVFH$F5AM$>ue_KpbP9c&EiF)cy)VC^?eRhI`{{oc@hjz6y=){6TIu zg=Y-8BdEn?HSRL$yHrf|J^M+(A6jQEl1_2jqb-I6l70PY>{yJ90qm;6Ow`40FYcQbW4$u)8rMTPy1&?t=ri2nd71As^3YkG9~ z6IVIvROe~f=>92N!n)WlKzR{l0Ci!ssrKqC%XI)vn2gT>*yJo>esVGQvyMlxJR0^- zi&{m_rgW+7VKU0W&;=l1aKMs!4!qYVr+6AM4mDX8b}hKW0Kpg?+pan2I@a{*N_!mD z@iggtPhQ8GT4~pMl;RsJX(y2Jgd7j4=sz0mbf1R6Fq2xl3*YSUo`2v?(0&!#+8aAq z9@kHSA`x-15UP`&?cepMG;ebNj_{9_=Ne>z=|kHZw~k~uj)tv4qlkEH?^@%+UA z5t2Gm=dc5h){yhS&#%&fhyZKjk z^7DxSjGum@v%FDd{jcC1LI5)~3Fe?2kWlXaRimq~o2NSi^F~B~`eXk9txnS3H)e$M z9MR;1)aQf8(ASZ)&!e^Ce85}Dz1$0JCnK>S*Q5L{)KbsIki)onSBlWLp*YCn>JJ|E z!uX3$eJfnOv0y>Bo#O=e0P*?}Nu+90Y1*Z=2-6Ry0v6O6zXNSBOrtGMt`5;IHikE zk5YS$@~G^d1c|LcQ!HnEOi4xx zNCXl(j(-a1lDa(lP1AbpY*@v1nt0Q-I7RiU{ISRYE}(P_y-)kYwOfYS;KJLz+C~Ny z2n+|%k&#)CeW}?p18BEVuLMIVG9RGt$2{{_tUO0+eJAd+st+k3+UxZlx$n}W-dmk1 zMy9Epoz&7rBiqRrlvZ{r3cli*>(4{aKGoCd_qRh%dl*Op-LM?;K;VvhRwbpfUtQct zQ0-I31_=c5&+A%{UyETWKnj+@DUQCFJ%61GRywUhDPF{q+}+*lntz^%L@&LhUNR5L zD@Vkauvlr082}kh4?%!G`utWHTY+%5Yzo{?07!w{5dq!!p8o))HVbJyOQ`<#Ls%n03uN7h~&XPyF0~@i9q*pog&2uXsNLXF1{-14jhbufwAji1>02;HJ z<31VcUv2QJ+p}fuBr)^-)5!jKuFD{GuSO2>O#IgwMbWP+eV_a=>M6O;wJvIoQFBKW zloLP;no1263@8*)_|PCUl(d}C1)y_FNGWJ6C=?1P0OE=$GyrCjmo$_B8p`o3UVnyl zNWkT-Umt+a^sQ4>ujBJ{+qlTcXk35erlwmnY&^MY#$B5H&zoEmZVv+ugYS|50M@7v zY*geEY~-F5#~!%{{3-+Vz^^p@S!_o)F`V;*nr*~;bo*v8kXfH=hsu$kPRBmIY4VK3 z-%5g2BFr2RzHiIV1nnQ^K9$i&;XX@0jjKjo*Xny^iA&FdR*fLa#S-Kn_GCZOvTQG7 zg2X&}losmFS7VH14{r6N;IeMM3|zzpS~Z1;h-)y)API23)>C|#7 zu32slK!LH62RslD`831!qI$s*&qi#T=k~Jbc1H<7c8UfjmSK)=>i4AVa*9_5QUq z-W`tVc3$c?bH;fQf)PO#i$`wL1ySaOJMdJ7)5cnQwp5FTjDy#lkLg~Xp<8J{FnJxguRBW#ZM;nk>3|}= z+(7I1zrfc;2f8@%Tg92*7cKm+8c6u!dwAHN_z54(SD)E2N8AT0LC>e*_*bj^X|~3# zr(A)uS8mA3mVs01qp&rT^kKOd2; zMn?ifWh3za0P9m{)a?Gw@tN7#h6cnq9-P%(GgJ$yTE4{t%uqx+B0-P#lpp6@Wzv%zas>b$IPLx&E7g86*(&RncZxuHAOMe0tViS2 zab8<+ciWt~EUk^CV?XECyAKT>!|>I=w0<4yHV>1uvncO|T>d<*a<&)OF<)C;+gmh` zZu{~9$r$K+4*b_a@K#A$Tj(SN!!!eFJr2`@`W}^^6^!;5cd$Zc5{a5JBLL+=KG_|5 zidMbQ?`E1d{{Z1O)TeP2I(UWI3kIB!2m`N9e+sVpzld*ESmxAiZDAP>BPzsyS|QN2 zyY>rgrr@yveM#ff0<>-ZAL~;P&n2anH~|PGenZ#Zp;ChAXzNscPI3r`?>yVv zyMV(1pklc7$UJ^M`cZRbW&*~x;wO?nmU*PyPre6!+}Cdh!&`JFOYKfs3FPfSp#Cgt zLLUtHR?*Z*5_tyzOEGi#XBBG3Ztru;Bek{+s`oDaOCl!=`JQ>J*19eI+#Ra$rBh8c&IrJI)t76~bhUPDMsenn#2qFMv`<|bbQ-k8BrK-7IAH$wv^z&zw zgkQuAei^HSU)NoH%l%3SanF})7StbBY?Jlkw1p~@*ypQ>qf6p?nm_P__+v_pNqA+H zj)G}UTj-;$RF?ZpvR02u(yc9244GObFOSE8_|{#`?9X|Jib+|yd5eMv0024wI3I;SZxz&%N0{W>8W#W#!>6@9MJG;_bsWE5`{6luR_xm&oD zh^07u4g-Jn>$B*17{_|8eQKAsj|8JC>wBN|nd8FAo}~Kz zpPgwDoZ#1=Xxf?6brg}m)k*TozTfAX^bo*+K?D(yeSaGFu$BF-J$FatTtSxoOA_2y zxh+@fzCoP!&w4Y@83K+eGma~$@IdkO{U`(s4%E2G>;C}iq|SPee2M^2I5;1MX#wMt z*Ylu^cLyGZjO6k@v;dtCT2gV}pREUx`1Pc~!ND1z1Uw9WbsG$i@TocuX{<0tXcH(- znCBdHKH{wD(~%lQ9K_EC9ZCDZXN;cyl#?JU$4^S8lAB44q7f?rfHQ(df5Wv?S46?` zO6R2M(?zIQiB1TSO0xd|bo3+htnD^Lf=HE#^C6JL#GY}}`5MmA^#!!M0JbgeKp>t1 zzpu7>Rmmh@?Rn!=U?ou?W62M*!{r0MJM^r=*7RUfxK~bB(NW4t?vA_(6I0yIpfak%1CKCX65Z z$ve$xfR-6MYYS0xFiF|v!l)ZbUvH1`JQ;{-nA$!z_59t~U7t)RMHE(m1_JH*7vAv+a79e$^b)C~vOG;64%yz*t9J*HMT z48!@Ekhlx`-ri$W9N{bcpp#5 zRu6}zyYfEGv?aNJGAD7>{e55Pc&+zeCgG!E92 zCIH7EDnaZB{{ZXP4dK^qJ6(+=$Y`Cd-1a#C0PAyIq*oHbkQE3gI5`+K%<8DIc$t}E zD7O*$ow;ma^IX_Ew4CJ#bvm)osR~MRTBDHo&88cDcE?jiz`BQHuwbh9@d%{1OKA*(V?O0E6<`rS zJnkLxF@idGu7fQ|avAySj_z1mG85N9av@{{UWV=Y2B%O*>k!)Xo4&70DgI;PLdYq1DyY zI-G_X2h%bJpx zb@~l{Rfn}ff^20Ya2Z|0Zyo#g`gf}s=A(tNX?Y&?1)~RJsX4&PL-44>7s*I(?DgGM=XGjaJWC=R(=UfzSmo} zC#y!-`hqK+)0o?MhRQg@{_6K>$-(=?RasF+HTurC@q{Kv~(#17xD=}Dh823qPy0|Z7kgU??|H6DPb zL_3AFF{t~dM&xt()6L%4*get-17|Uf#E(z^09`7;e8|IEfQrKgSIOl5ze;{QqQuV< z0@yoH5;-^@l>kD-nFuR)F~B(ufKUGbtGzFlLqQnfr$gVL=I8C3}lu3D}?bK<=wZ4?hG!tLaYN|9>?+NUHBV7 z3ZYbV0B|df@m8N4`p=l}^E4m6azB{#66&muCa*2!!+zfDic9s5~XBIV8hr06d>Mc94(241Tqv4}#>#GwORl z{u^i&N9Nfya!r!)R3qp8d7SG{JopkckDdW6NI!>4uPl+oz&z0>IUukGyLIq(j}nKv z6VHzKHv}*A6%4wkg0wP2U1=~f@Iv9(AHyb^qTcL>n54bfoN5N0*jqz!a9fYvLd-ur zVze#vEpA3ui%WRGU_)F=vGgPH?OGRq7W^q_3PUa3*&PHCZC}*?07~cYel_4X_IHQD z$MM|FoPGd*TD3(S^UNx}tciXxqMIEmQz%JuBC>xpuol!z!Z@(3Psf&_8b2IjP?lIY;9$j_Hw5O^R9nV_`R## zmlk&3L}P#%#^L!7}M$iU}-J%>-myJ_L09rZk#`He~G9T)Y~ zRJvJh-7OHrV;dvhg=72&aodV(MrVpOyD2*-IVpuF*BuYzQvsf&np|;R7Dt}s*wDV* zVxPC)^`Lrt&^XD)dE?TUtR26`j+w#zX+|+f4hLSmVu6T#X-5Ow-j^914hK&3Vj=c9g;(>==ywga@ z1E8fnc^x|YnnDOT?T)mB9lhv1amGKbAUGXADo`?bBLHNYU>$OE$LC4YJwg6og)VsL zJx{$I$i_}{KnQuqKGf1OI6Y}k1EKzu5uSK70n2!+PV-<}$k+Fg@c129{{YuqqPA2L zbB?1OMS67ctg$?DC@RBi40Q&)<507GH&A#RBm*Z~dU62bnpXp;AaRdTUSamkP;%HK1n_a|U9X09 zPqkVbNl)*g;ZLgn0H4yhj)MgE!TMF3E61|Dis=u{8~nqu6}}p?rBObePbWCbhXo)Ez?g!eR`q$8_G=51qxXYGG%?><$0ibYmo&_`> zew3i#5(YEIXe3g4411bE!9J9B$?J+jJ9H+16aqQVUi5(A0%=bK_TrHHPy%y~ooNrJ zsHHx?%8rE4A~LxhKMJRDA5u80Hy9e(tu=Vxz ztCDND)a-4FmAEKFvwhYB9FN1bWEEKFMui=M&1`uVS)?d5o-ifGOrHVmY2at?o`eGjP4 znuV^Js-_HgcHlAp0BC^U&my;`?IVvB8`FMTiKt6G%nKTjs@(t?Imj8$Zn?!%)PyN< zdn9LbS}bs&1MaF}kMlKl6#Gq<++?W10P;ul>6+)HxWBU1uJ>8ba~;79UKQLy#xsuG zbqB3;;nZN<7U{qE1y_`+Mlg80(I6&qG$NqnaBuN##;O zEaossB>SFvS52L)kHKJP?a)6C@S1_!Q_1bsbVu+fD$>|JYXUI zc*R_cOS{?uh%azhf%PDr#@B$NT&G{Lb$j{_saaa*k*2 z=Y3t*hqV(=o1Ud zG%%bIoYqm)IN29_y46-@!p$D^mE78AGY%!i+aJueiJHzt)~7vk@-3|8QiNtw-b!#4 zJyyT?va~*+><^u9j0ja8RD_5KV(&vx#@?MhjpI!Ie51?$Bi2c|CU>Ox4H*$24@BW{ z7;l8bsMOW7(p@_R^JjHGM4Qy>yIBtEmewx}8_!`nw_^v%=)9los;kH$q?vRI@9(Fl z{9>c#zhCL~^--ft^t)%xu6NsC2$n)+=}{E?&W$3BAY- z{oN%7z*m@izfOHGUcR#@%PfwiJaULXe49vA{4gUeMPx3%qhD_+Q1AYEy465URKRu{ zC99rpJoW@Hg6omO;dpDEPd z8v%pmgSApOY4-xtkN|p+_C=yG5C3ZlUv!7&Ujv@`^BjHZpD$BJ;sS{HP`cr2O(vhx za-SQjKB?$T?IFuXW7-P{4|h%C&L8N1CN`)JvBlPVUVx*=!v8ME-^LfI1jZA}0((b? z3$_}LtAOY+A#ak~*#nnNQi@?#B=b%kutmA=+#|@8ieS8CxP0Ehzq#=6fUn>UfzbED z-up(f7UnHuzlh$Q6)D~~A$_QCPl(O~4bg39ULQu+?KmY@N!l)Tv}zU`3lXgSt|VeS zP?9#g=S_9ad5%>@IWrJ^n5Fh1fY&3ShKa&sey=vq1ccuju_-SnkT!&Y+iuS~Nd{gffHq@+~t zB6Z%(bkKdm$mr)qdW5*44T$TESA)u^YbG?P^rp% zcN8sTO#`6VMEH|UBidjfFG7qoQ#H?YmkfEU#R0Lh$nL}f$w5q9pNV9TnR2?qn+tNX zv9Q;a>ikbjWAf@VYTB==by-glJ4|n{p~a<(6J2#EN0T66nGkAx-#w3MJpkja}^ zc_5k>=*Gu4?ewjcojF|&QtR5)Sfct-I7aj-?K2L)P(hL`X#uP8CC?$3u!c7g*d`3a z)TP#l!*xslh}44v7c59%&OrymKSou51_SBcula1ZO(^PPAUpUF@r~%q7H&;Cl2*09 zp|LgrslQ8r;IT?vZ{6JxI$zG$Yt`;yv2sVU1HN{NH*5YPhbKICRc0?bJGzu%!y^82$F-luj_+?W^2D zVof+r_7(GOHOnJpYnS+1`1tWjE)*IcXYZl3J(R~!?zKu8j2)#?0A0JwES-TU!#$uQj70S^Nij2#V z-3uM203GZ}wv0xg*V(R;n0K2n_0;NHbRZtsrCY(G9m!)=d|TAK&T?UO`9!=Q&j=P0 zx|a3psKp7urnCq;$=H^Bp8ikCo9X=xQAV$*lU+!P=U;XHQubt2h;=_zbo7v+Zb_p% zp`Lp(KXzdgKr%1r;xEv=l5bopeX&oUnH2QRrcmZX9 zEg%rV`C8Btf=Xg+$Fm!Le9>{GsX!;PWe$1v`Rawvs12;IA<~U8%8YWu@m{rfdc)5x zZ=Xs;Xl+02ty2hbV8B^XT$;_r{be&ZHxk+}cO}21<~mhT{s_MX-dM67&PFe{k#BUy z0xXtpl(qkcXh)tq8|n@ePy}`vzZ!_tXb|MZxv;tFC@7ts6e+|%Q+AOdJSwhd&M-3R z`N#cFnEfHWX`B-NWMAh?T}P^b(hh}ah@VK7kfoRchv)#m^O|DzvlEIo_4PMP4kd#ZJJj(M&x52@Wjbu}GE07WnO|3IGvoSH}W?yQq-$C~m3^`X74CuJnCH3_Di^re!+ zpLnNC(%)Kf(#@)s6!u7o|FxBsh&zdR`#(W`K700u@v6M9OFNj;U%oe z*y&ByCuTfX@`1NT!Q34~L8)ncs=80-$Egha)%Y@VsSK2`$d@BY5n!mQ*r_2gQa$)& zTQ!J2>P=()TWP_rR3K#cO991@IV(TuvW%AfI#)5Ms-Yo(w+h8~x(prq%2mFuF`$rH zvM8n;e*2^RR%9Og$%KoF3CvP4wCsMV^AA3qqeDsHm%dt*yz_nB3xp2Ml0lZHN-_18 ze3`e2@Ru_6gWO(9m-^@JcS=84h@Bs_ka1J90HIi4`37@MmSd|fKZUJiWraxRiPQEx zb_#SmSbvW{{?shft*Fh>y^S|6hey35$Yq`{{rLmY+u3xR-$b8Rj}2!ZRZ?2APOv9x zWb#?ctZyaGh)6Jj&GJbJY#8KnIg`jqn@ z8PZvXPgyUQ8*S8{eJ9F`S-lCxJcu0vVw}EW;<3@cp42N7Y7&C532Z$~?Ggq+^U*uy zV0!pOIhjC@exeFdJEH5Zg|bF#ccrMdzJnXbcc0>PU<#fW z2VBD<62F!9_@;zopOj)<;u5b1H2vaDX5I}vR>CD|@guf(%~~7}%4LiQ4Fj47`b)9w zE*~|S!r3Q3pSQnYVwm9b`YDjrXVHD$r60nQ_0AoW_U-Ip;Xo*P&4%iSyJxcWuxqL0 z65AZJ#g(0!=V$DAfeyJW9NmpWc|^j(YqY8~lW!+~@-Pt5X{# zLXZ&J!hKlBrCs@F*Y{u0bIj@;;iM4e<@%&M(x#KLjt5_~SROYU%z)=!P+LkfB`@Bs zUrgk+`pN~Ozx5`peT%4uXVURFA}5On`EZ-;p3s1?iV^=BHn!X_#deCOZi_TRchKs5e$>2wOwAnPrFSX zRvSFbbvL$KH)g)?2T_P*EB%e5uw*+ySX5>%pXQo9q2Pm*6oYk1H{Q%w-F&rTu=k%W zS9(_y**m~nQ_RT*LN5!5+Mi#s)Y#uE4tf2%aOlZ;vmrxSQlh`y(evQ<3U{rxWLmEs zX#EFfsGU4=$Q+In zTF@n`O|0t>)@Ayt6$$cBI9i#vTBHGldTKwxW+FO|D0~Go;5xP!e1v=hU zt-XhHRUso^3eJjQ*-{h^qCk;}F@S`CHDEpLfAiN^;rha77^4^k7f8~dAX@f%Gq_+| zL8x`*2Vz%=I6e_gX~rc1^&B$K<371A5(CsHV?yyF0HQ+4!rY5#^4(S;))0!%v`1@% zpuGT{s>4gTz8eWo#AWC1XYxbV?7;4c=mY5tx-FEp%+lewHrD!dQwF>akbq?ATcIO+ zAUdfk-%UW&0?8q9Ey(P{*#6O5sl^ZUL)wd@AqvAxV^_zN2FT6upY-TK&3 z8R#LD>G7{1$MGp^uggHgyUXelMvG}HI;=^nR@tt{1OC$=nCyx_eLaP8r(TK0-hG~1 zZPpx&xAJDWqu|ffh4#eJBk5PtwZI5Sc=xGrL3}2$CezoFZfRMDdR=TrPCRD0_WD_2 zY3bRtwFPV4Mj$ULQ<`D@I5B>*u!z9MqlwCwo3TVipAlJ6Zv)4Ib5^^IJCbhF&sviSSM* zv#i42Ep}vl1(_=;sDQ|~d`4)?_GNKX1?Y8-AEN2tmOzhS$w!%*GKt9D5ct($a3*+D zF}Nb_t;)Z_SNXvIw|j1~71m+;u*UWL_*kHvj1t z@s2XZ?I)$$&DbS+qWQyb(NENN=t{+_N*eOe8-%2z*pc~eIpv0ND@z)Tlp8`Z_g*RZw-Eny*`BVnu{SiJ*KZK81CYtbFMYD3da3$zSvr-97&TP-VthmOv(Jmw zoRl{ zVkcUaZ=@;yk-mUO?tQ)1(Sw9m^2C!u%-#Fo^@)^~QE~d2KMix6{nlDnBo@HCg(wbY z_|iR|$>sr9JD&dx^y0ajcqp!Q9mXI1Xe$#F&#@^nNhqyn?_qHdoy_`L`mO%kvipQf zhMhW-?(#ghmn+?t!4oyNsJ;>~7WS?^_G@vlc9^&23=p=T`A z6*c}?X&LyT`DC{AR)Zs&F82Blrb)|fQeK72#Qk_|`O%|M190nAd#IbiM{G`lQrMp5 zjO{ZuJ>S&r{PCo?HNo&7PD|bZzg(a=wYB6y!ZOKy%4;<#MeAU0H@xRWUpX3_e1)q7CHm&*R2E0FSdB7e^TiMbQjDLd%5y{oIatY~0Y|KsOvLkgcunX`9SV^wp1 zr{HHUjZHy*lT2BOleI#+ytf>E5BD=Yf~%zpE3?N|?Q&!$da1fvBS4u=-aT!2k!r^nDo?t2}i_+b3m2 zRXXBKKn)r$>v!|=qAIW_emG~ofG0wD1A7hI~dsm z>vU?mw=BLSk@0Zn>nc4YrKM=Of%n<_^y_T;%&rklEz?HIR8&!l^jTRm+KD(Qv_^sOme_+@A4pkkALlJB2F`cs?(VS8lsAXZGdRk&KOp6E9o$* z7Kzx{qadEpr+luA2sPPIH%Xl9*R_PTz?h>_%n|D0mTnlT>ZR%?REGJm*v-*k z(aA{U@$8q>-Z%pziUmbe+le}AcVAZaUKz7`L~%bUvIj1yyj-f-A1Mu+@N%r+zA`H%GAGSawL8UpGi*T#k z61;bpnReOo?H`3$b4r(&9n{&-2j_ax@#_dUO)=IoqxMeyYfYFTtE`p>O%RumGd%+2 z4F`eMaR3%0#YDQwq{)M2YD;eqN+qvHtK2A0KjMz!wGa6L;4viJKo^vGEVCde}C2!XQ`fG*3dVj(WcZ!fmG zI%*48ZrlgKove{Mp5f8rZl39Kz-Z4+S@2ip?)VbwvPgrH0E*&~yz!%XFicsP57LAj z;Yu07Lv%TX073CEcQD9P28=%Yh5!y+Foy4(okF$zpqUwVk^((BweZkE0Qy_F-E%o=`^$YXUKW8o?a756qSvEA zW(#RsK^xFeOtU(t(4ZItzo6pRPLMb(k2AnFd`_zfns6&6TH;0Cbn~$Sw{J)*=mBKD07H*1i_du)f zIzWC>UA0Zx`KbkuD!55m+e3$$jn_kg5!^Q1T-+=%Pc(K?u*u84&h8K9#?W?LhZMQ= zU~+1>G<=MHJ+mvkhyD_Z60?URBz)k*6Y!jhitH`a^vuU6n(@#3SR|VPb-LMi{)lA# zDyvEHVcQ?DQaGc)jwBZdLWj$@541kbmB@q`Q;4sHkXu+?=$gc5nT2JH2PZ{N&V(KvA zZSKqvZ)uo${n*@fEkBlE%;WevigtX?0)3$lRGC_Jr@k@MVs~UzLn05`M~)yekR$40PZpC>H-ca-yt&{i5RA5&t7X z>ODukpw|U#{6}i9mfoG~p5u$PtW~LLfCCa%QYxU>&?k~7D8%AI2aL2MOPq-2V4b))F_ zn`rVEy|Nf>iV0igKMjBLey-< zG4+xd|J059tH!#VX_xm=o~_XZO-|S|2wB(A#Yj40R0eT#Z;Fena-=ru?Y%jY=S!Cb zj@i83!@GF`g7xvnzo$q(hFa9P{tB4zD&??_qr5@aqU?`BZ|ICM&qHhQNX zhc4oEEQR8R<2#9JyF*08ht00y&4LB*`kZP`dwFkWwx=|}6sHb%01{<7IwR?z`k82< zPMU`_3|y!A{J=a%!u3CWTw5OGfx2&b{O*B|dqM)hozVOOTv^SS-D|07U`h0C8~)|* z4y%G)!%p6y<0<~0Q=1WqN))=0SdzL(ZpTs|GP|~W{9;i3c|%RIt!+v7b_j z+Lu#oH8j?^OZvVf*4n>NLrU|AK8Om_+pGSabb<5<2@6v@k~wazIk26=<<~uy?t6r* z1e;M1pdxG0Si;!<38R{?E48VOXNaFmxzPlPWo56XXEHtvoa&$W{M4V}V2RsM zcFPb6o3mXg+Pw77Xk(!Yu=XzNQym*-_f!~oV8zqh`6?Ke9-T}hD#rCD&(Pov$y4X5 zi>I~Eu&?N8e;EQBBJxl1-Rp#UDWbXR)X}=$eEah_8k_c&aUXig0q65rG5zF>$<)!R zS~^a{`8e60>lU7$1&;3W!Naq!#9xAmHyP!AoZKBh?zG-|&SaOPs4S=Pbo-d)wyQ$5 zaQ!*8(#X5PkbkR*iDL!kt>9OW8iXD_Q5Ni0eNNAwwLTOupK$wFmD)AV#n)w~B#br{ z8tC4|E4a63w6-8XMbYNjvwcA!89)Z(Hl6UY&$F!`RCe0%$yD$Yo@V1I_H2`|MH?zh z#r2W+|69OiN^J}klvSx`KV%Yw-l z)W=5|KYw(QZ18S8yEAZ_6;WfmQXZ;TdxRj=_DbhxyBF8wy)cAAfp$wWmcYxDfiIk`^Ai~E#nzGalM z2Tcu9G2X?mm*&-p|Da<}EaI_=vrr! znPSdsk`8B2LhT3LF?R*7HO~zJ@rgD^@Kh(T1Y7WFt@?{j*w%DNe9kNnxEuFkA4i+v z%y%Ft@0;l7tE#H3Z-FMIiQ;32`JMG_4!wy!e$O`ZZ<&Homi2W2KcqB}-bGL(v0&=L zX&3*$A8{lOTq7aa|Jzuirn$+V=q9& zftgAL6doQy9$Z{I8AuW z!pFhNi}~D|pruvdGZiE|$?kbwxFNH@J@XlfLml4=hj@krcuV0yAQ=L>Go~b4&$ylUV>j07iNf%5q-vp=~W(J((27-s=PfcaqoJ8 zOIrCqGweE?kN{WUu-2qraQ6$UnnlXm6za_jv)5NTK!~Vtg%Od9E7w(%d#+|xEz7e?o!QLe*@I;C+g9`LM!*U- z)gN$pSr0MP@w6@-xwkR@9Z)TqZePB~q}2Y_Z;+JbPWhzNMbX<>c)@6zqsK&L=)Xuq zR`aXYzyM$beh{6&iJnK=?YP}O95$ z$p~pPl5&>Ave;9T+;K}C>TV;Yoxtb+%s$wzw2&=sJ}fD1Oe@vn*^i1%l?$zNmYTok zH})w{>vG2PRq-R^=jXy=s-x`fuU{65Wk1eBtP~uvWUJzjEqR~oH3zFe$) zsM}rxj!4ewJ>Qsp$bw4y{y+0~Ku7CM!sy_1KngNGa?Slx@5*rGP8-f&W!)YU;jJu7 z{MgZ(g>pe<^!X}6I>XqN!eqLO;%g}hj9{3tRsVv`=m8ti#7A5Duze_Z~6e+HVA+Qg)wlFzq9yM}MxL z*X9`ANjHe$T>(0KnBxGY1m~+9Cqn?mZ=a76_oqDp5S0?E<1<%ZxbkWuzdA6}iEK_M z%q$F!F-qy|Nk2rCx%>y}VqzZ;yIX3ulN$-vKYeEI@<(H+yjd%!atNHh*+J0b#6J&*Zx&^mu}{(3iez0#i){>)_Z&#qWo3mIe!jB zYoczi21pPaNCW45FvGc2JF#`jZltcnY8YzXOqxtX>B$gjZ_?e+CO4Ap=yxFs**tX9 zN^q9dV8)V-ZE;2d4b~^ZB}%g{e``4AW6k`p9n;RUa%Q)rXm6*Y=O1qUnG6hhPS?{0 z7^T}zbMwFaxG-9Af5GEiN3D8ll=@9=dKQLhx;#E9l5L~ahwE3`Ev#u-r5SU9Zx)f{ zQ#)=DSYe6eHbaR?mU5E*TJGQJTbn{=>+Bs05+0yWpy^ycc;a$k;R`E^=EHxGtL;g= z$a)S3qO|m`mNUE~J^0&D@_k+b=6PxEn|o@ExoNYpUw3~TM|z**rNRfE8QZa-!{Y*| z7e0$D1pDD1Myr^Pu?s27^Z8}pNfg#rx|;o%R=U(JNp zeHq!uk%%@4D&io=(O`;}tZY9_e}c!?5UCACe7VT``cc;~+~a)MTxWN&z;ZKW z37vH)I`K4WS=GiV;P1!V>Y1S-hO_UAZZUkJ??^#GUs!y_mCB447>_^@^T9?|DuH>E z6?R#{EZP6va6M)MY?7lu6dnP3aO=M6RZJL8xxugf1v|`+Jqk>>GFi=$pa1NkiJi5L zMPW2Vi{?nNynHl+FaO|GpEG^CTBBT6Q$1diF8EtcQw4*;XL}^qXNSKD@`M5>jB z?xv?Los(80p4zp1eeMnE4l^+)RlY(vDxC%6K^cq`V*Qs)SCD;raJ9P)M^o@ice!Fq z%cU9sC2EU2&duyjM7m`Kh40@e+o?)f4ajU=_npnL*?UH!azC#F6_ zk!LA4@*OA#L1S5o;vCkGWG0h|lj&cSeub&aZ7uHnGO2R-1YK-J&hIHpWjOrl#Mji^ zdZqM)e&B(+LN9%bAdy+xW6U}R(?LxaFYkk8F_QJ&J=;b#rTYxy>L$tamsI|)t^#4U zw&;5m{LIg9Jn=wKE&6NmG>w)LJ&siqh)=|ynA8~7h@x6_t83$mTlO&Z`{^mm_GK`4 z(SHI5Z6WMg<-y{A*vF7_Iqb{&!;IgWxtn%j>=cw2%gqnvgB1VPP@JE7eQX1MG5ZPD1m;%5<4e|5`D{qK`-#ev$Er_&NkM}p$+w<6opL}lPF z2372DmK6y$fWAtYnPIVlRo#p%k-}J(HlW}%olATF^Q-M0l(_PLl$jgCQ1zDUL_Tz8 zxSigvCnkrSlEDWRKGc+Ljyz5;CP7|DqkMTb8J5pgS(|snk+hkR(HRU&uOW;qR}k@@l({ zOU91-r3+tK1dIB$;PbI_nd=t@q=)iVqNkO{#t)+h)ZD*HkpBmI800?lHLGj#3yhT- znqW$VAdr;i6Vm0&c&teSYD#zvfL3{Ish}PWWpK68)NbanV6cfA^cQ zxoPhbyLbWsYWz6?bfxYz zn*a`HmXRid5TGxC-=I@lKo7NGKE~%VUF5Uuq7RU{JAS8mk`PA!ybZ+{nBXdM89{yo z`)v|If*%f*$GSwGIW62w3SFPgv;%`yv1iMnb3V6T^(!p58Qe`vp~Dz;R*EeW0?7}o ztH2{PRpgK>wIDM0rIj)w5Zq~{T$nt>e=AUXcwGWK3A{oI0sIhPQ{2kumx$jB)K%^a zq;OZoNaw-jGy;9H<2EQI5;tX6K&Jr2(tFjK0K?U#EY&O^LqcH^-kOavhR6b{Svruoe&*>Mbr>fuX(5$`(5q{94AQi0-_)^_Coh#`e|;TRw&)P>_~W zK7KJh-Y}h&f3RkA(j?8k*uu!1^;XHR6~f{n*~y0WY6E#LvSlg+SdIJSGXcgQ52@si zAW{jP9>St0PA|HAxlhVpR5W952l#X$ZWkdt^VA|9eyQsF}Ib`wG)XCn@Hxgm5iDzg&mwWQ-f`P010dPjO{f|nhUcaQlIZSWQXz@GE* zW7%?Q0=Y8))(p6DFcM}2EHYJrMX4es15y0$YtImnF+j>+=^R~)xne3aieAm)o!;Lb zKK|&eqo6P3V&O+}w{}%*qPhIN$fqOXtZ$1dxv1}cy?3OUc;^$;rIoR>#`ZrtmWc0g zB@}aUA06 zc)QLTBuOdq>ols!-uKcvmCY|r>PzH#h~rRcSn2ORhB*C7g@8%VeAB(mM}%>GRavP2 zK(C4mBsJ7rsI?qFzkPS;YR~nDp}U8+32jWz@nsJ;E{futi*|a}z_AL2T6-bNJ}sN2 zG`PmNy1iHQtoa98iP*jje|29X)}Ou71b1hUum~zcNKimft2x-uWA`f#dh_Hk)OT^zYH}?e2t%9jvewOlfqFW@GWb6K-xZ& z+tU~-E6>5W0S+8dPUi;uRzHQ)Woy|zh1YP~b5T|4XscO{0OgKyWL%qy4#dFhyx}Ku z?DLg6I`F&=r05}QuAx+(tM@l@;#a^SJ2&OW>eQhHlL*-28i3ji^EG64?EniDoLApqdYeXw1CJgQ7{vGOOh(DY-Yc9l4h{2_@H;V7N+G?Lp| z%Ad~;4Us_%)vBx)e;MA*)0Xr>I;Jv7-yd2RN5;R8#+8k2UKepXZ@POZ-G#l8p#yZp z;2f{S^N@HJ>W**xhV+mY$*tOCZ(sXV7GI2XQar|gcl+E>&2m0g%oA-|x~4z2+Vzzl zP9Tw`;rQnrbjkE}7b%Yq|4KQ#;b*A~UmHP*k0I9gUxvh=&@VUh{_bDVhC`K!Nr&Y4 z0}K*Iax%p_GhxfK(v{okXbTH@Z??F=JGg5{#lM|H(@i#c%2?u5*7cuvfXnlFmi?+< zqb>-<9;d=DsfW@wJ&UjvooUW(wZQQpaAr`H%5X1yq}Ceek02(M=ud9A4_(0Vb&NB{yI3najJ;`OgOhCSecj*r$A%7rQxXXjKog# z!s_i<{P++z4BuM3_HsJ&729kN3|{NuC^xA(&i6&M=K1{qYHCBKR+U((#T02-qaA`idUe&m>JG1Z~WrFJsMnn@x!fmjA-m zeqzPm|9Q1@ChEM}O~1io4-jphP{@4Wwnsn#6=_=@@ch6r5seH#C8or`5zp~a6^B2F zO~Vl~Et+c@V{KinGBc|Q5B-1iP^`%)=!dRS0z%&t-6zp*vao*^;?xsyeaId_3I1*; z=?|aGOd9qK#U$-Di(l$+$@%Yx2&q6;W@#eJ%K}2@w6WpckZa(B4D`&5SkvMj>niZL zas>}D3JvhSqG{sTL@Jq7&O%}wv7(0E=H^N6mcVuw7g%;hGP0UcA<#Q-1JnaTXBGSh zDo#ug3QrthjT*NNpUqM+^_(3$$UagKcjJP8!%(V#9)}_Kfl)3S-^LiQUd;;ou|0Nu z!?gpkk^yiL{)|fa+p07;do*cx7e5yqLf_3)MMB!s~ zT^D!A-$|^SP*woxlEsmxaCp84a`lRglEs_wI9i5}R02b~JsW%5%(@k%PT#sg;fK$K zXNJ#&P_AkOwOo#8F0Dv}*jWmVNj8jnNo6w+riR=$i|IVT(XXxmE&v5h2k^h34bXRp^15iGWJ>xX%8mdmqj;saIN>}9|U z51Z0CX~j)$zp%}Qo>?S!l%k9Ad!pwM^f?EQT6I(LCAVFJgzw*?N(pW0_PZ~~ zRU5d0fvK;HWGx@&hEv1FTA7ReiJd4K(2Gt0+^L6&7{yYR@3IQ$C5&D;TJIHM2zWgX zVx^%&(kj8E5M-b1SRyoBRdO=!b-o4>q~Ks|*I&qi2HcALwhM?o^~rAhc*J?ZEZIz1 zD?QQJ-i=@ZEN}s5KdNs^#hWMA{~4{B9w1V34w?-MT;qsW1Aq)ob8FAO+Lr&dkctPy zOx&^|mQHAheN59>M`+N^?r9JEoj@l1eHauPc&H>ZD7#LV12|~6$<(gUvE5p%or;tynaqA=#5n$npRaUZ8@}PG(twHA{Ga2$*n1xo>c=qHFtvQ=v7} z2iKjNR{U>HAL@KP8mWUvGB!7RDhnRsVCCETs>%;sqH3h1bQD$c%g}%Mj2P$h&Q8%j zKf6(*uz{vlUqo*fjyZ#~JkZn9il2*@g*^dD=yP5F{jN#<)0xM+bp|t>+QTb#mrRbi zf08+W9E0!uqgujjDF7+q^@^(3$a$xs#vXTpvK)r=>YR(6etHq{*#vM>h(Doww27+Y zmy4$AC+GjLnMAVpem3nCou$>4D%M>IQ#!gR;TGKjR?4?K{YzdC{Wx_Gt71#!LspD_ z<3zkaW3loK`oq+cwQi)d_}UcO-PyBAw+SE z2v~@ihY*87h+^j!HjX}dPgNrUvt=U=Qjt*?2|`;C<)*f2J4fBIDD@ECDl@KG^x|ui zzy$|kqI|PlnP-SJc*xSz=+wA%EdQZcLPcfjHAa@0^D-ca+hcCP3hC_qb(pilpWXd? z;tv}-a7B&mo;dTmAyH{r0oaqGhsvhIIyiXcg!Yq24s`>u;m4!zrEw^VZ{KRtmw#KQ zi?V#}hA!RGa0}n8Fu@ESksNP>I95n)rufX zrrqNTReN<2so)lgI?i$%Z4vFjs!^T=_BnS$pDKnLF_|l+znyJBQv`Yq~ zMb!-vCIw*+W8Gdtm07N`F)Pxajdx?4AFTg{YXBSJXPdh6tkL@M0bwMUYhPn(yGpUE zw!k0(3}sh59y%(jMH7B7Eiug)=Z;jRbt{4pzsC*1b>*I$C*|o;3DTT@t#6h#Pz}9) zH@q7$oEP4bY@_GYQOiaY;`fm^p08i|+l?GUW##_B^u5RSrW)??af^?xr*BF*aeV`t z=af~IvjSwyc2$*#{WD43o`};2TM`G`QrG^9=aQMy{99Kx}j`Q9KK#)x)8cO zIzs%l^78kt|3H?9DF5jvf@#->}wfKNq1zlEs5YiLP1UL)MT1P&xk@H9R1~8(8Om?f=tP(N{axFsYTYx^x45|@y9d=5bRaV3M z9j}tDq6GeQ1O1)}20w-0&wLhEeMGIwic#9rrz-A#J-9o7-x7~?h_MajP|j^|=Xzbq z4g+7D-YQ_dV*cLi+yQZQ!N|{;5Ce7AEXFhz{xi2r# z8=HJzwN>gv07H_)+1;x;wg`@UbzDUC@Uct%x+hO8+i?YQT1QO2PZYNebha!OUVqiY z5z151Uw?peRLXNaT^)R9e;M=8 z)BS~fc=q!2i4t>B$>F()cDy_61zBi_>K&PDB4~f6p2y+2#~&lL&l|5b_UAfGq-JBh zelcQkrPEy-<|(2(F-p0H6)8ywQL+mINWp7 z29tf*;+Dw5_`7Yl#0xJVOKgpv*#wI7ts+&5@-pl~ghg0e$m|bbQZOSkrSXAfNDnG)KIOKORhXVsVip%h{-)it4 zlOe`q0Z%*)tXKI~+-tH`IR_^OvU{2>hYLj7hKU=#f;f$sh+O8Q`B!O79WZa}y&^AbQnzVHLuKL$+hfj+=n*^&DocDbGSj zsr0CwTqKDb?0vs00gyBL92$dmN{5bs=Yl#^hk!HJ9V)a%u%XB;j4%fv8k`)Q4gl&0 zB9j_m+#i0Y)`RKS9gP6w=a1z{-{0v#3*X!f0Y*6Xpbk0?2SY}3Ipf#9C>Vg_zixf# z01~|5cceXXa%sTfvy;Hb)X*Se3lKQ<^q}(A(oOb^JKZP`qaxuqRP;fKOPk(w~ zD|5y;=}LJy&m-TZCU7y2z=}=)IVV3{kMo)UUW2&y6q)0%loaNzDQmt_C~* z0G=p4dW_?WLDwYY4o-j1r37H)@t$#t1O$GasyAGMIud^>et6>tx$jLpjCA6dGl=m% zpX6MN*w^Kd{NQ$P{{UAtjIFtN=y>A268_OH?PP`VhFk;O8uPhedpozblwp*RN4NRr zylj0AS#zzQ=6->~`Ioh%@YDFCx0?R|fEdqUGwayW9=@66eQDi6Il<_7rjEEL0QL4Y z=O0|hk{E8r0sO)L03-9R-^2Qez0-tl-@kFY;5$j@p*ao2)Ic=Vv;j9~kZ{CQdz?MGDy11Fr( z*Es9biU=T(3FEha%8&?0JPvV5(~NW+(&wHz%{Q?;d;b8R!kCyGoZ|zJUV?(a`*-?M z9;EU~9E|s*>&AE#2#!U_Bad9wNfRAAaaT_#jCxgh2S2A3NXkeoWmsl)l0dP`g<$6j z2=DFb?_EBRKbsWVWTg3stPqk8P>(P2=%QyE91Fjw-4vp?Z_uPm;5os zb7E+!kmdWmPe(4sOAl2~w*H^+4ecr>dF7BUOi?ot+#Z!#NL6xJ0tbGiR+gPQ39n{^ zk1j~Wi^n@pKmBT_HrVb3#6T2blB9jpk4pK8!SJ*PnIyoNP0LIWvN$ZYlfY8Cm6+#u++*r}l3URqr} zirY;o9Qka+c0K)Z>E5>PT6aRTLW)NrN6O#*f2DC!-on=~yzn#)@{=S?eqriK>!{c-*kLsb3E#LNKsWF%nc2Q`=3Oz)gjx#8U%-jyPGg$o8* zS9d2M`u>K#eb+1>L-AGRqQo7o?i)UjT=!ACdA0b!*eF6Uf>l)y` zFX?_py>F0F$@@))=O z0Izue0Id{J)Z%*`TYtxx`d2G){#zRJMHMWlZb|W9{z2Mr z{dJfB06-PulYhtD{{Ziw<|v}KTO$sqy!apfN!&m6)=&Kox=8;3)-U_iQBg05(mNJY z?@9Hh&HlANALT_9$SK4B03QqV{)V(7{{U9MN+_lTS3SO!NPnsS0Jf+3QAG#~@PDgM ztyZ7?SK`!BKptV@pZOT4`Y-;1Dmz|}MHTK*{u=Z?FBAUrKT}Q)gXv1&>zC-?(uyhd zE1^Qq{e9p4d>`a1LHduS6jSO5k-qr<0Ct+)N79NaMiO)%?$eL@&%l35D4=~t_dkd8 z6pQyiDkz{rulm;de?d+DGes1^k3W5>tM{X#ibE81A5&4Dlu<$ut@nT6U*S&w0G6Zt z`cXwN>NNC!sQ$F!)?f47QAHgFh9BYo06vtXui!te6i_55@E@%>b^H(XqKW|wUf)_( z`@`yJqJSDc^8IK|r|JF_QA|Sz+&@}({Ix$p{V1YKkwfs`V_l0 zH2voKe_AN75d9zz-EZ^hN_scb`cXwO^k;vff5;-!{m<%Z+R;UQB|m|BBk|l{@#pIQ z05PBP&HkU(oZsrl*ZNUKXmW&~>)-tMrnlbwD58=Si2Ku@?w`VnDU!Aj{p9+8jW@CS z3Mioi5cU56KGhra{Ai-75hA3W2hyd}f8*r;0J;7&QAD;vr|~nO(0}9JfA?GY)5rS9 z{{XH-`B6oD#w+-b)cs#5{{VHT=)WQp{{SA({`FP9{{WC($NRt1iYt+Bk4Crf*=Iuk z0FiFr{p9{;sd$V2Jg&a0{{W#1D4~3RGYJ0x1^OJl*ZP=$>)F5bD!-wBsQ&=Af{H77 z{4C?a{{R^s7l;1<5fb|2QV^*{M<{{UUv`q4#Y_EGA;;!Dug&_CoGC;SKbRbPwW z@(0s@-;@6UK@?G46n8#r5%E8n<;(v7AlN_aOZjm0Cu`f z1O7*CH~zcF{{WzhD6HYR>0*C@KSSLQQ%X8fMS3UagTKO?+wr1`1Qds@6jB5K*&!Lg A=Kufz literal 0 HcmV?d00001 diff --git a/examples/xgenmm/imgs/image-1d100e9.jpg b/examples/xgenmm/imgs/image-1d100e9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5c532ace08cf9c2c0a85da0efdf159ea17434e07 GIT binary patch literal 64176 zcmbTdd00|w_%^x_6fkq7HWn%hDh`zc6*!_MiY6)!nOWN$a%d7Y$4b-&BSHq}P;tPa z($ciFQOf}>XH!cvQ_|EXQ)}xkJM7ae;t~_g&9g?|R<*x$pb^Gxlc+ zPr$L7%T$;%gRC^ z(!I|~{|`V^WL33I?c~%vk3x0m2(wgf2@GRj*Sdu^@C|Do6rBc_-})C)eVeYHKF+`p zzkAPK0@1=@pCj4Hnd0K*?c;mE@1TEhNa(S!@QBEm*fVkQ35iMR7cO49oNA%qaC$j&4fu;Q)MfU#$_W#E922cQl zq!$mY0@wm8gH(5YTmSu2-`wZyka{fU%l^ZYv#jF3N0>OEtJ7<3Fpd6hS-2{U6_Vh6 zUgTuSMl7vgCSNndK%B1~IizICP@9vfSQ$SpQCeqAtGd^y1Z|6`9}X_IWHqmHfBhW{_a;~kHb~!%Mml$ zf&E9$x#9V2xX9XqrWMJ%(4|57^|5OepRbn{Lo$tak69avc8XaW}zopa{mLAn6&6t>cl1wuRWH9t)eS9W8vUVx{yF zO279cL2aaOi@nwcoczPV|9vG`-)IO%c!2mDZrb|8gG2GEH#q{H zB%n*wS)*^~%1G|lL(v9fMe81*0+=4ZM5M2Q0|B$ShzY@TA!&1KkG6Q;m0u8-AOum` zp@i~YgBUJJ76DFo1T=87EKd^XD~0LrB8Nz~X=soK{F&59&%x3xc_e3SeaTDyqQ#O| ztng^DKM2tz-4TL_pLb;sKk@)an66gR9f8nI?tvClx$D!9W)mDcv=uR`^&CpCNtCVm;e<1xG2)lkPh!o8 zdF0PLc}7bZQGfoQk!ubAimw84Adci$c^IZRpvc{mLCh&uLvG zyNF)$@?PlN0Sy=D77_c-7hj7zm8tk;vxdB@cA}X|>h^Ax;b0z2i}SPEcQ&B}a z?!VS3fMBe0EOR{zlJP7#&)<wqY)ICL+Nk8z@#2xYWy3BM zWGeTR%JuTZw?$(bCMGHCs{x0sl#u7DTW(KTBU6yK-jR$d1PcN=Ez+ z=RUdk`k=R&ggYTnS>$${ZyX7RFqf}8<-Sb4Q1u}MG^`4!Pfh)VXo(M$rZ+n%PHDY7 zuvKSIMA7Wup1q3GMG>a$>Seoo;Acu=Ihrx;wrjuC`TAUCfWaAJqB=}?&Ehr1T zNP@a}4suedVCD(=??n*B=;ifP8k2aDQ(HMgWCP zyKXGZu{A_U+m@N5T9rkeB-MP=;a5Dys@c9y)CxadB7zRkp&2o`V;0! zWmXGR;Zy@Tw0Q3myqoqIm~U-D-7;$X>AR zdJUlO*^b0N###02A@Xb*9$O5~F#!^6A-n_Eg8;oq=vqdo2pEVuG@Zpbe&K2M(TB>p z@}bW-2@pQdZoZ}Emt*|-UAVqewIo^Ul)I-O^`EKHq5y{_C27oBY5C51?eZ`T}T&V5?idY5J4#PPYaNr7jUT-4AN zm~f&jzfjBQemu=A>73J%Y=|3A+zleAV~_-E)5AG#n)>Ef^;B;Ss4Ea5Ii%)}q3cD?a zFUI3PVSi6AOgQ4U&+NR}48922Lye}V7b%*K1yu(Gi0jwFx5U>Ox=f+c+2$>1?tw2S zu#F6>Xu~_*oacZfklUvZ;U3h&-CHY{y()_?BB}R6y}0b6hA%b3xt`ea#yz)?X3xI7 z&-OD2g??3yf~~ZIzwKUtZ`8=a)||fIHXZaxH2t-(WNENq2E(RiQj2Snk~p5uAH7x9 z4k+qRJN1VaKTFXbrZZhE4;U=Kk_tc~>16}Lo+9!^9gfHqp~F3JXuU& z|Fl2}ae?k)$^H*X#wH7`+xb{`r?qr|>Eq+2nQt3F9rkoW^uwI2=V2oJ zF~;`v0-Cui*V$PJnRkwu@GJmM;qb?pfGH*2wZJ}HIZp&;I=Cny0z%VA(nqq`ZrXC3 z=LNDzd=nl>7a?fV4AnG)a6AyqZpbQ=VdIp;m~x;}CZMFl@p?!LE&yb;tp=GE{W7%a z+@&%dh8oI4I!r{2#3p+0t_&#nD4dl-~%~+S2v3fo~NfdxBgB(a8kC^3iZs*t@hF* zHx)TVD35fD0#R0iaMRgQ*A0!LnHOvwhy}dqf zJz4K3*1r4G&PtVlU0;54t2Z<=rcxC!*M`2Hxpb!Ddb8T-Qhm#@Z;P3ENO!tCp4*lyM-2-|rTbv}FE_ri5& z_(E{NhR^JSeP8j(`N0$3`Ei2p&cBkC#0Sa`?)eu9?{HE0ouoCo`}vi(0qe%~6W4ZJ zg#w<%N%rQH^nl7L1GFdCtwYH_N?iyDXlgEwda{6CqOb_piAM!7lxa-rKhhi*R#zz< zU`)?R20Ck*nn|dj{Ji13?r|S9XLE_erq(FmyS-` zZKn)gmkS}K(&<^wz<^F6$>P8W!C-p3jDDX!bH9L2MTZOq9^=}F70}#Jv{<2RkJdGW zqJz@1qco_mljUl`3axo{#LTeBn07%r@>2DTcUeo{t z4rC_zTYAVS46rQ*2R*hbT8Fpvs9WQNLANywbmv_*@1fBhWUyMh-P-LR?RF~wOi996 zDJJy*gaZY#M-yat?BRSczdt%6b?%69cCq(^%>n~iIl>fbgK_>$CnSuU`Et;lp4yuv zSPwkldCSuEEGk&G8M~B5vAtyOXA`kkQHLaMW5Bw4V^SyLYS#@!!LxPWX;tI8taihE z*U#K~)Cn<^DDFfpt-mVgP7=DJ>;SXWG(~sxEAI=>2^7!a;ye~xz;Bh}jpH6|si{}& z)LfhSCh#x?KBP* z)Tcf_a^uA#>`OTAmO?72MW4`Nso<#^R)0L@`>u%4N0PR1?wueQ8k}aEdph_CxBSU( z3-ew(bS5?C_wk++lXCYBKdP6VYyqX&X(TPTVwSdiv_RxHhD;z?s-&qRyA6lsS}T`@ zyP;STFe_Y>>z7^>@EU!(kTV@5q3fd=CQ_ufgYqZQN@eO0Y3FJ3EmMg zHqA$riGTzA8j6++`^eL4Ha7VsrxbWhLc4UF?@%wp(@F4R|9}CCdz30`a7l^-3aiTS z9#XJS-~t#JoQtsR2T`P8mLrSnFfed(38W@i<3y|F?b=sdd$i?pQ63<&`6Rm`UI-Rx zR69!HDo(TzjzItyXah{sucQlK9U;lm!PpP7D@}vOkmMr5TiT^RqEM&#Ou)rfK45@5 zPYQ4`eItbc?HUpFv6YGquPr=UE?Nw~lK&PNm{0;d#w|J}nUa7q1Yk~t(@8y)3R~qxc-*(Y1cdV7EiH2`Z4q()0WQL{#7>{t_ZS%#tGL)&-I!hB@Afg ziQ+b2!~5ArE_U{{EmM-z&wcjh#U(XH^FKHM>I12(muzOfrqofTFaEB|B=4>osevQknvqrB#M(yAR_xt_tvyWf4_+oKdU-sEaU;5)(_H) ze!cRZ2!@3mRnyitG^w;oUM?PNm*mEQ7UF4JE6^$!GQmoMs(-|k!m%w}pW)A{^QM)-(9>yMX+ zGUUn-A*u`&M#L&A5`NS?-_p2pe3s|gqVj}7z%WP}bt}DwmT9vC=xf_x7;K=D54(!M|TO$O?{uS#2hPZr4^bPSzRv zi6wGGT~p3(I?Dt?<9#+e2C~Mq_>(mGb2?GZQm<}Xp zQq;U#C;t5)!>DU?sj>p7po|nFr!O|CdO9M7kZGo=BY=n#wd(@fSXx*?d?TJ6!FXKN zrxC6pZR}TsxeEm~AdmuNCuw;{akVR#p{2I#;^wK(^z{pf!c{BJ9-Rx=K}mtqWAq|P z9#__=@ex3HG66Y;8k)u@PvTmIGU=b4tZ|(+(#*@X1COvgKu*X~mNc)zR`7}q!H*F@ zbGfLcT~B)Oh|+XYTNz>-4$z3u`m#*bh`^$x<2`BkN%pqi3%Pi-VtBj8$5|;%A(ZBh zc|g2g=JZ?DD{pbKnY;7rc-pJlBZ->6!Hw)g8Wd~9c`ebY#dT)Gl@sNN%?#T|W7rKK zY_*mewj6@KH)UP~LbmqO`;z9%6mKolU)^f6BFr)r;u;gF7qRMC- z789uoLaJlg(FE-IExQNgZ?tSIIm>(AfkNwS^2?~3n*}&pGvtmKJ%`bp%Itnf>t9=Q zS*9#FnL+X=cnA~VGbqZgt7QSyYhuZK8gy&Gf~ZHjC3Z!r647e^Y-5>egPwI=)JJwG zCTUt|Eqd+y@$iSS|l2DnX%sd5H-V85hEU??5NP;uwEEJY~6y<^77S#n`PYlssO|C zC{q%;xW^8)I2*`&n9)jDAOLFArd$)3TP#-tT6a`8y#@Iz^2~{MZ$0D8zm#4;mK-B( zu|Khy+cL#XbC%9oHxBLgn>1-{6Bf3x)H*EUH7o}VNgO8wgY6w#ym8pDdnV?KYX!kg zl(zjM)Li78ItIe+Yu49@9Dh*Pn```ac(LIs=UYbJGV8Wh-XFkbwc=deML+gM!xuzTTy&r#?ta?xL->L;Mar1tK+66E~5X-|<(aFWG*#C^4 zl0PJk=|LzPlOoz5d}{^N09&JtO+Bn81H5f-$SGW6(PR2pn%9xfhcEY0$zu(|aKL`)B zf0|U${@=@IXCSteNvHs|hc3LyxR=qcqGwM;v#S08+v=*0)MTkESarO2qFFBmoG-)d zKPWnjCK@IhCruU4`9DMTeww>|(0<2`?)bkpW73>y(XPSRT_Ud&+B;h=_I(*sxT{%~ zNoU6P_G!QAHf=!%p7~n5-0pnLK=r0(91nJiKEYD7)4CS-%tCW>WbVm(p`9oA6A0{G zi~Hy(r6Z=TVDM_S4r?FQH!DOfs|zS1wf?gF*Yg9THJGkBlir-6KhDkk101Z(^Ju8O z$dT2)8gJdQrM`c;;Pjbl0~PUwQZr>LcrR7X{N2#^>Ock1F@-%65Q~w7QX3s#sxW2` zed^+vRr{XZ+!DLk7ck6igYW>6>m6fI%jU66vB!8Fo$|2sQ=qZqrfg2FPcZ?5`fX;q zK0H#M|G~#eUO5cQ>E!n>EhzPSn+Ri7D5jfq7%(KJTHHr{8!0*OZ=CU(!vQc~Lqg8{ zvi0xqFS@upr05wJaR+J-?**X)0XqmI>6@+L*%FGaWgxg;hCZrq6(^-BBS%dS88)0C zSgFD#QHr)M=jBES*f5A%dVib-sO#5_lgA_bI)0o(oiuZZgxT)u9X&9_s4ce7H*fBmc|4#GB! z9vzt~)p!Xd2Cz_3fPQ7<(4EJ72B9=XDk|i=ZI{)EgT!$2b9p!*ATlx2+&5ff!PsFfRtXqB?34Iky zhSSPJwoPXR9Q-s?RZ*K>)LaG~VbRHkZeV0t!I9xhsh13hBH=PAuP`FK6Yn7#ueEoE zCbFNKtOxFeB& z|IHQ0m?UI5zTv`9lN?;6-mVv6H_cHis$3)+P}K1u!H>aG3PsWSqZkjIL6ovs|A8qv z1DKFYmQsnkT%a!IWRg&ZX(p`zj!Frm5Kqzt45AFLMnW@zfPoSI2`9eno_=lvN3Vq`|#!@ul3(7Y;HiKbu#pFS{_#EeKLvNL!A4`Mg7C zf*g>U9n)pAk#_ZAL&JrK%6{K&7E%5H^0T*h9`W6QQEij(Y|*Cv_KMWm-G;bbW=7)! zRyF2?lM{+2)+S1)&RK5`qL{m5uUtI&7>SbolJL1z<6LChcQu|krWZ87<{dc|QqR{9 zf>NK4FV~96;-y9Jm^fUM?l&UyQqecFBh>gAIPuNE)1G@Ky$tMXk3;Q&O-`zC~C^g!;i${@YRz5xTG^477 zN%*UWaqHsPUqnLhIr!9XMT=L@KF^lbKe)MbMBb@8X+`Ve4$o50TVcVydflvjCc*SVIgzyu9=yXS=q*QOI442V9RfQ%f>Iuljwb->5RS@tRq z=23}6wnCTM!*qjhTz}x7(z4@4UEYj#2I!KVhG~(lmdJMpBg$KmVbUdurmUcbPQe3C zbwOfQ!0bYsVwW^h3#XhXO!H$~u4s9VKi=W1dhLHU^kFn4sNhv>k)++qmwMWQnu-i>5*(I0|h(q2+!GCey|e#f?rCn>;adwpMU z;81kDRqP*tQ1Rk^(A|rG3eVkv#eTe=%A|voIc>pM4gpgwrOnn0PWfajCVEg5EY~xi zuX!HbJC+YK$kJuYB}nT;+Zq2ial)YYUAUyo1*5GxVzHehn26@DR!`G8;~-)eniDg4 zSK#GqfqO-dD7&LrTauNggl3xi<@{9x8*8cmsXo8*bK^{XZEeXwD$NX5OL1&3x}{f9 zd)d#t`gpQ~jpqQaK@Ob(&8U$^sIP zf5ypCrp^=T1xRJ3$FB6Koc`QsdcTx%x7*Kiyy7Y*fvfeJd)z&euaG1_6!1x@Hp$0+ z=l<>*N7?hGlZPAIE{+qe>-x{MXuirferi#L)W_ZhGb~ezYt)=QUH$+ni)`ohjco7B zF`R_?H1`$B#+Fgo*!P>hC(}(l0`9bmH>YQQPpr~DQ)G`(SWw(_zuU}$1FMGV(QdWe z_(qPhSo)n(Bl|(+Mm7;8+tRr>a^HC>t9G^j3JDC#Gi(lWI=z2sVx}_KM^yOoI!(%w z_C`6OetdG@5|Op9Pla~2CCU;|vMt7wYAM>e3ga$!T0TaBFr&LEJ9-oMeBL6oZ&9eP zuWSAHxq?}OcwIB%)BAM7wNzslNi0$wy>a-44mSMlUvv_2(-=Ei2_u-f??B`1i>_}D z4ct@4k)MB2OsHXqNHYb+V590VKw%HvPBMk|cj&cuL!y zZC>HBueN=%zk>#xKUpgNQh1fP)oVKz-aITrHD$yI_$^-|M|q%wAS}xS352i!=u_hQ zVBEwTd3U#1xvbM19_qH`mDaBLUwt3!dl`2&HD>^*m?1DDrPn~C_k&^vU(v;&n*(a; zJ3lc5^WKC*YhFhLBj;9k8+no6oG1zl_uFUIW`K@G$D+;$i5wognu3Xzc9zMhtv`O- zRr#7ZKR3KD3sNUq6$dEH-L5gTn5b%^c$XaO9hsWqkT4UP27V2bKlP}zQw1@4_0A*ob?>W>IL~F{SXJO!s(X{wIbwMN|Aj9EZk~pSwN?Vx+ScgU$OWjvfosh( zgpZcA$Qf$y%SO1@g}FHRb_zsc{oj;p5eNTnu#68WQ_2fo-EtM7(yk3_^_FB~xkc2N z(c;tY{zXI&K?r!EaxLV?Y`wtC>9Aq#u9#Z(zHD%roL4`Servl?&V4>Q4gOp)^|8`u z&B=Ddm)H9xv6v>COUpc!gSZvMX6%xG$JKsT%~1dQxGRz`WGVQU;+liiVPBjz>3h5JITg@ z49MYq_+|tkZ%TGl7b*ucO_KTz%t_B!%jAUeuO#UC2tBS&#zPJwe)R{UoB_uIL~K!Z zYSlY+5mNuf5d^5iU0r{)#ydzN77Y7{Z+~^=!Qa52W!fDy6OXAInv1Qe)|YAL<_Dca z^`ij6R2b&h%8kKGt=fKf{_WvVt)Zu~U%|>VcQzi-r;8AE^4kTHxz!%Cz0dCW8cu~3 zU5M+5@IyEr<;cP2Q0LD5Q-!(zDhkN#W&wyR-oG^|0bD)(PRVVxx{%Q9% zf%1VUuREF9dr!R~N|a9gEGJ~`n{SW}Zc-bbL9yFbw**+ZqC+o~WlKv=W$YY5Yh1$zdVn3%!m z1Sxj=mt9{vUweU)0a{TZ>BSBd6nyLF)I%>koFys~=VGLiuX9h=W3}X8*I+>&&iO9& z^>@$`K?vh~*&w$xq7r0($?JPvz!YmHeRP!FcrYXjm<-3s&0&EkqK35@+3@o%(dQAp zs9qot4oNNkyH`}%{7Cn&@Hfxjp1q=@VzY)Uf?2WiF#R!H-(!4Jw?=POBIG*^Nzku6 zn=BtvQkrh*KoNY8N$u*Kg8#aKe2qLR#wyn|pdVIshXT4SOqV7kzb;2 z70S7CU}%2l@$h|^8@Xu z(#ube;v5ksPy; z>`S}$(_-E`qT;?qljNYX1;_4V#H*{7M_|{FO$F;Kn0IqWzkfYMl7s}0%}R6CU%^8c zhF`UHM9^Ey_2sfws*P{W%vNX7J@-yl|0RM!JtAT#1rS8=Bx|5(7|7C8@(&o)Yydo& z7E(pSWq(jEK7Tq-77>Ox#*x(s@2k-Tkcd1)Gd>G=L}LYA@4?NJw!(>c)o`p}px7TU zcakb};HX)aDo(gfPFs#e8w_idQ9vz8xnt&jPLc<>j>1kF5Dd;d2?srO@rRc(6b+(G z-SEwL;PJ_72{*Q7kFm|=_h?*xY4_3Ec3Z@r zPkMs~LJ!;j0eUQTPQO(wPf_^;fc(^6u3-YLTlyBx+5SGWqrHn}FOvkylo(z&*})OU zV~dnVM;;tSWbS$F`?mH#1a;bPg+R0l`vbhn9K50%oAJ=Zy8{u%vz4cF>eFO+&j}q- z$;#089_G>V)z}b@b4Rk2bIT~}6nz)%u6bApLeiq*#CYA4G60=UlZm= z@`^3*ioq&8suYhPQ7J_c=Z_jt+!dNZcG~n-$c#pNIQNCY-rK9ox@1qdH?8DSf7>^h zSYQ8kV2xFDK@nk$}jSu*M}EBQ{gj4D5GlUI^<^^j5# z?v?Aws9UM&pzI)O6mh}&cf^lw-TR^P`MoO1XnSiVc93Fi`|>dy8Uy$!?qLx030B1p%qt>0L)py? z>SWaGId;=3;>DpmBTLVwki((g_t)-wZ8SO#g?W21?{vBe8FN60jIC+A%I63$M;${ zetKL|Zg3nD!f>J`&Ar>*SUH)<)QL79a9`Otb5ZRI)#DP1oh&`9Z#PwJkk_6Y(gm!! zs-9rNTaAXu=!Z4z>w?+bM&G;5f0>397bYAn9$mVrkF51Ahzk>KEpXd1{?+^DU}?%z z+HDtS9>dl^^B>_Z@ zPWR`_H&{O7hW0#}I(^pMlN1}H3f%V_ZugvZ-FM`BNEE;ii;_X_tirxk*@R`C4zNAK zNLu=5$R|&X+|NwENQy2S=A{@X4;}d$vdPF)cRWy*1jTB;3mB>Po5>oQ$bPm@ITs?? z%m~TWe`32$br!Sz7EL(;(Az#jD3AUOF zIZrxgIQS)|`($L!Me(r+-Dgv;B)ZhLET<)6 zq36w{|-7{h}-SA4Dc2l$S{$&&?r`PTFe?Qum$QMwE1s@*_qc*bp~XXu zuFHm7U!Uk+cENJ>O8J_`b^%E#?&b9uEgnI0HAyg%=86pK~ zP>jt)P!YyC&qC}+d3iJyjf#c*9%4iu8l1i3wkJ0^rFECll?-3m?a&W_N>Y+0IYjAM zrP*B*M-mvER`~&J1>Q*Z@s=&8$OGP|L&3Eqef6Im5&u9=K{Q@uVFhW4k}wVWc%vgf z-eS1GH)K%GEX~;?51szG!`A_;pL?4$I7WkDX^*NPr1Sjg!!YDM~NdZKm4u zfDEQ1fhg+~n}`#%VbWe!*Q2f0bnZ}rk{FHw)g#i9ek*Ba8}00ng=^oTa|Ir{6E!?P z89PG9Ac3%v)g6AEVK!A*&W6u0)u|wwB1vo&jJU$$@|;hoq<^$NU$$DQlD(azE}g=H zpCVL!O?Q5KS;KKguV|PK)kbL4e^3Z~qBy}baXLHPR^MCBD~rJLN0+K9qKeOCaM_5N zI19mgl9zUQEvZ>`Cb=SMN}~49(5%;^#6%}ZhzUgVTyG+HwVcv+%7?G$lWJ`@cQ1KH z;H|XtcUib@r)(A{v9&LXzUX*eyG!>tU(Q>=fdxfJQ3-QJ?5j2_K@wxlM%TXJ@bLfsBcnCscGvnTdlD(DKDwub?%|B-d#h?i%(Yf zKQ@A#shjZpXHaT8*%^VG7=9-W!$;uGygM1j9;zKg1scVpeOLZfZJ4OiB*bS)IDx zwW-%#9ji*R`A6#_=`3G`eVT9BiG!nK_p3yA^=mx23^zu9v!=oyV6O&bWbqD6FW9HOo0lb1wYbS)Sgx;^#@I;MCzj1#kKb zEkSAEkSy^XkFY?smZKUxBl{hEA6R!80Etn->Q+C*yGz|2a@-C!tb$ZimCa;6xuc_q zokVrQtZ2{D=E2Jc_n&Y8i;X&zMhhw}*LXP*Naf~k1{_UizrBpVrb9VBd-g_5gBd?Z ztlqbJ-nDxqmXy0h?+r|C!D>~tRCJ=MEu9{i zaHQSqqzV(v?!{K2&JZE&$qM9T)e@41M!dN#iYC@w@y%igHzSt!-U-!f;Y5NB#rik7-GqhAa z^-lY{MlhVS#2{g(SYcyRGmDf@{wKS3HSv==cP^=%Y-1>gW^_1dSUc&7GH!N0FmITP z2m*mwQ|z~-Uv8LZT^LiWDH1s~S55r&PeuE+@k3E-9m*-{7%VDYyy3IsZ?BLyaX~H@ zE|V^B&VNHb`|jLM<=cljAa1vI-&A0+$5sJ$BfeWNd|yDvlw4F4*)%P&H>UHw{UMX= zgZ0?W&Gpcp6-+;OZTMcn@Q6vo{GF9JQv-W2G!Txe!aPa(4xRVi@$JfYZD<@tAB*C) zuFneZb4#+IKB0l}fpfr*zN4ext1)j8yqy^EwHDR37;a`oihs_vsUImiFON5;>PAuF zosJzQ)?FKo+KepP?X|a96oWA4~rT zsy;SAxp~bYuQj~eQqcj3q7UIe89*2S_%=0t-OJYibk-F27kXp4e4Nm?w&+{firyn97wrsy)>kXY+kA{M z>R9ENG^bHb!9aAtsRPH>Jnm{V+s1~mP?FVYVcuz+I{9GJ_6#VLN1x|$ps0V9RpRN}fX_W*IhO*bgQE=Q(EwR8{FSxi$TuITf< zL$X@RTd}G3Q$n=bD3?QpYjxjp(Fv?O$q|yqz31p+2)AgZexI;9uT&D%w%i9%5+}vq z@JCWV&kt3JKe(f1qhG#g5=b&>?x!v!FT1m&FZ!h+R*+rT=%gsAEONE$3OLLDA4`%4 z$ZC1CmOxym8MErQ9D}FHGt7DJ`k4D@tqwlr6QS#-UU-SGY$|XE31ysne9b?NCZfX7 zjHpDY)wUj*xIFDR`$?P(F8DzhE^WmH7BmfyW6%neoQeE=0! z)#)&K>$XeRP&qnpEWJN{vGeZ_YrZ#%jMy$cWuq6GOY^?qdQb(HG@SWD_%P>X1Z(eQ6E{_XCS|8Hqr#Y+tdg}kN zvFnEeid39`-$bL6WNWqI)7{&iij?glK$bZ0*55{v$6W<^S7Nn~1vx`h%ZU1b+C=dS zk!NBE1<3dkBs2idw#9c&yMOTNGs~c~PR#((6oS@8Um54Id;Luva3ay#eTIA3(}_!? zj%=qJx$0EC05M?J?!=Z~kyqPX^V0>2AO+BoDENBpFT*&QJ)A7auFVb0qgF)|O2fmO zk=_vuj~gJ`8uWG)+4eX2o{M;Sb|Pol7R%`m>@gVxNAcgIMdL%@+8! zZf~@D)cbl8Qo|;HW^9=I@LajM4wuhU+{5|_z3s5(m6MPo%QzuRl^7-jN_pJH$-=j{ zKcpvN}Zf{-D#wvIO`yC+@fQr#D1crhbLtP}+T z+eqopTPiB?V7NT|Mb+3PfW!eP{~eH_-m~>sO>%&qB8H`_>8*# z@Dp)l*KVIeP!Jrg!XE2u+Gy0liedXp0ER@u9aCHPmWLQ0hT;4|O$B?B!Nb8Qui(PT zS8w)0cEN~8=FsW?&(SBqkBbAFQdWGw``iRcCsyuctPOqXOENZvy91K=;Fon%x5KKNoVl&$y=Yv z6~~-FS9!=q!=cs|A;GsrAm*Nz7U?a=0?EmA!K=_j)`x>Lq$vsdLxQ-=Isvm>*;0Z8 z{(bGWDYak17p(SaWgBhNKgYG|k<=a@zN9Uc4KbCYw?BA)K{KOmQ?TS+1=c)eB%M(jBMrE~(O zT|}9k_~d6+0fG`xNor}4k33Xl(lEX`_HAiih1g&Fn+NJ zpE~?%{7}l@`6&JsJ2N8*$Gh6l0@RVD3Dqnu1ebqQ&j^w`=rR>n0%e=U{8}I$ZN3&7 z{QcU+Omte}#{_kPmP<%9AS&|>K$(Iq@ACc=0cIB6YVc-Wkw<>cjBX(Bswnn)16Z)J zmNnFYoj30r6o>^r+#!WxY^E~jeIBiHa<5^C$WWqJcY!f|!XJ=X2xmIWh&YvvvA?Ow!;V@hnhA2>=vVrzGMKY$W-&Fs8d)&BsC=d*|B zbM+RwP}RQb5Uo?iBKj+Kk7n<-_y?hn`olX1nQe<|jrznzYCD8Zf)N)TcS=nra?|Nl zsh1Z_x2p`|NS8ArE_gMSOP5zL0|rQN=^Bi1spZRnG|lR_@X%?s8uW0|%Nn@zWv^bY z&9Isy04dmC>_RU`dBDuuwMAdaI1n7~=?I|u6NR3iNie$}oVZjfg8!x1(BMgG6LnY) zFataTRdXLBD;{RdgjvZ#_$%ran1WYx@6H(n9EAb=kHlY3YmUD3yOW_z=t9RWkk9>d z^FA%)%L&zJKee%x3MY`FH?(fve1XLj3=L<~7s@nKvOOBN?6lybZ{>%U((MR|APs1y2 zZO%2SYwcIGL7N0B9Qd47(YZ*9=a+u%O^-4QfflfJvt+rro2PzHX)oRPgaMid=-<}p z+pWvF0A=Qw22i8Jj#hme$|G~+ZT0L;iUH1(aR1Do3!eKJ7C!p z*H6<5AHb$@-YTpw!9!1>gs;9+ioVw08LuHwjm!2*_b9&%O3HX@3h;(Y@{4>%r(mPF zhVml=8+Y00!B+WHDmltb(J3kcKjd3yRr~piqjsM7%dG%%lpl&?F3|F9{T(?R-I3d*jz~e`JZ{^(>i+V8b6n&g8@XBClt68o$Tn0srZ9UjU- z?%7T8O#4@~@+w`8+P1VNq;9|BIq?TbNd9i>ws3JPzvMGXcQyHgR2)7E6q<2!EU251tS^m4~$DTg(4m-*nzb3EysV!X+P1%D? z$ri;WW!;mt)V*NwRB}<0x6$5$i{3d#X&+6f00CnyRaBI5%)1U&N0LK=F60Axk?`E*u@cf*N_f-hIbySV#>!Z$LhXiGl{@7ywXXt2u3 z=!HdY4A#kblLD>&F;#q)38%fwrMh)-TuVm%Z=p80XRJj;tb1bLg zm7ZJcRRrr-ZyPE-b%;m*>{DSMmJyo2)hb&|E5l&ZNAWY&+51NBze@Q#6;+D6l0!C9 zKcU*aNo)eMr8dew&oQm%T|T8k!UT3LuAgt1KZU`_rp7l^wh#P0`ELn4ES8KJNfkM+>?d-8LF1B1HE%V3SlXL5q#ao9wN*TRohI87OXnWXbpQYV_iWC~S#24n%~@THYI7!sP0oj+RC0*UvpIw-W+J&} zW)4X+WO9m-V@C=b$vG6&%CwLU4!P*?`Mt02?e~Y>{NWGWw)gAxd_M1w`!fq)b_nX? ztY@zlT@1pG#cl?G;Dt^_7R~@rYq^0MEtW#9-C~U9y-qcAi6Zb|#V4Mo{_`441wI^Y z1Qz^bul4LacZIX#sX`Jrg|#=nxc9p}R85(s5ZEp<`*p20>w%NU>)o8?XW~l$2Qm`7 zre1IdIv>!sB0mJDp6H^7I@;pe8cC-2|N*>!K? z&U+cgN&hC`3sN65zEaJT*YOoKjCH}uUcM;>mcsi6gEh_e`xxQ$q}9(UN9f=qLunwu z{wa$(1qhQ?1j#cKHICXSgEZP%KWu{;}PUyZrDVBT= zgktB3*2;?n{(Eo3FmystD#GZC?5D&Oqw}*BiLCOY2}me5JUpCf8Ma>j=}K!ncs)Xv zG&32(8Yi^j3Gc)eGcTz?%;{U*MxG+Om?T z9jAHqeE{_N&If=aopLh)DGY!4>DP3H!7dnpWdV2?_lkDShpV?Mdpl2`%$0P$#@!J@ ze#Qw;>qg9B!CQjz<|QE?&^wLyyPaMXma$`&OMX6G+p* zinwd-&h&iH8|BKAtyx!KX(Qpe6AWhb?8K`qgp9$oub-T(KIS%+5B3)8WA`)0++OQ6 zHyV_aRN!?IGS=T@Y`#zb94~Xk_y!}P9Al0ulW6-aD|OGHr(ooP8PJCk=XQ5%O8X60 z?r!;Ro^Y>5#6oa2=mdP9-Olq?QSK2tbmO9K{nI*slI|?|clVTZgLE{Agji4OG$Q+t25M( z?jp>gAe^L+HM(KQ7jIwx4*+qB_T6GDi&yz>aTR;wQL$LA^zqZTwawB%p^p^8L;P=j*wvVsV^oHRa57g|OLdDds5r3;w^2ax?6Y{M-m=2m128f{u1iZnM&0ebeeSX3x&5=~9Kjl2-}>dv$fMV;MWAcb zVKEp&w?4NMJu!UtR^xI_(31lmK@kBF{n9nIPeeQKX{eo?(CNnfte8kf)|%R`8+38l zPzf2dm;n7&XU5uzvo|2~(#DsDD=Gb2FOjiUGNdR>sDrMH*ImssZk3e}Y_g;XvAU%7 z-r$cN#rK*%JHOG}jl9KXJ=^q-0%MTAr$Ds^*e(OZTHuRq1`S^9UTnAx9EliGJwO02 zhWEn2+FLSjlAXT}N?daM4raNpI_S}MsFi@EjDZrE89$X~FJ<$otHMGKA|;8a$UXQ1 zs+uLOmSSY~7R3KRp4VGq)ENwvHN^M)^-o7XMvkWZg=8oG-jLPlapBm;DjbDTpiV19 zZ{_qgXcSGx{bP2!|4?vFQ5rj8(Mx2MUxq5*Hs7Crf$X5KnWANzQSavYQG;Tyk6#)I z`!1-(c^Ew>J4Hfh2gdOL+lt#U+|*Fbbw2#Scu)yTsrIOS7S*p0glSKfvq|=WZ_-Tr zyDs2JT|Y$7fBo~S_Z(~XZxTpL(g2R4qkZk;q_TKl85WtWpg^Jiwk>$*Wm$JGURGUY zp^=1r--zdHPgWG~l`!jc2!Tr4q>8cdNIbud>T6wk&U>_tkFE*)A7GukUQNP~3(Z>^ z2x1udlGge~f9f0kq~OBIlM+Q=jwVW>m4^c9=6Ro-u{w_Z8FD>v7q7Sd!#1@R_9jIS zRu0btU}go-D(cr?^1UMyJYx(s;s$adFxCF$srHMBs*U*env+NH%YPzVJ)u`HK~i*PDN5E2S&FlT6yiC>Zn9PfMi_6|}j8gGmS$r#9q<9%3=<=(={vaghA3!@>;~{k~gY8pVJzU8U;z1}3Cc1_h8S=prDccilTO4@=F;l=(Dm zIZngT2&VlAoUuS%%1G+{`_;r7+ zSJL9{<>eK>sJJ z2y98SfPV_R3IJSQ>h%AB^_mlYXR&vuCTcEuyCd_l4B1E|63a6GY?VckY4q8119ryK zKhmZT-w$m)+O}_5MoN$K1PX(GqIi*s{5e0ufhgpKeJ0^`5>HI|2-}j*uUQ&xSE^6E zJ)d&?R7^xP*4J}afW3=LP%3hGg8MSuz0Y(BVs@lgVX(CXHg*y4hnG$csK_&more_X;xIh#3A4p zOw7R{xE3d%a;heuR}oX(zCPPJ-;{j_>q0IMf&vgMM_f)2&oGT1j`bU)#Vm1Ya2jVF zQ{|rQfAj_B;G^K``69ryZ2fldbB$T3vX-Cn6(}eupAYyBRTzP=Q7zfUAatF=MDd-45)NC@KIhqU27^=?eIydfyjm6gZt^zM z%V>v}0mKOKq;?JAyc!he9SZD;xOM8c(MxoK=y5>a!Eq5I<_pEcUgC zCh|^v+uAug@Z7|qogfd+O5kFEWqT!AJr-54E%hF{5tG~J?A^+7S7H=U9N@_z3dG?L z#!h-6x`9xp11hZeW3f%5!s5akO+A)|y8Y_ z8-_8&*yvwRjLOk^A>QU>d+aw^1m-*bx6$VnsK$A|NnrGB41fMtouyxa6Cjcxd--uF zDXN=tOH6T2&@IE+UcyD;S$?dWs!JkCfP-?v8JuM z$C>k`4>(70ShPej6np&6`5bwbQ|NN(p~(Bri}r>C3g)@9k%)4({p~pUjz*KBu0Hdb zMLWyThpd90FwxU5D4N5NYZbT#*L=e6#zNtR`5~*+ubk-D*{$+ay_?far)#h6 zm#>jD98q*DVSG1V>=CeA|An5A;5%o%Mi{nENw)e*DJs`4WlYS_I@4NGPT2 znh#Kw1Tb|#>1aFtE3wNJ8RRJ=Y=Zq%_!X8FdSPgO-7cyF-m`g<*e9hHt7(UNiHsk- zIhT3m&B|lK5)+DU+~HQqn5p`8 zW1h*UF=OQxQ4NYkSpPa9R}?It=~KXaPLFP>OD+8X&4~t~>1FyNF_!P#uYcRdDwjL) z>KJ!#w6dIYIkC#vSXI3&Nj#|ZPhXvOp} z&0(%IIJ8P9;^AA~!`r4i2|MPoa86j~uS7ZayYO+dL#9Ak`ZkuUb>xf``%#-AZ$A9X zSiII>Q)KC88O}( z(DwGQ(lfl9PFc&?R3hY?40;XunbciwG4eY9_}#A~1xo)E9@aXNPmtlm6kaWJ^GAMk zBSVYSae%KU1utS-!qMG zD6klNC$2GiRMBK^$tUxKF7J%jq_y4Gemo)z`^5{xqNp69--oMapYshu z_thEpFAaIM?(;p>x%+~w_%s?YB@CIRE&S_8^u2z+-)r4_iWu?4+{esCK4S+peh9qFnMA4KLxPSCX&e6 zu|c>UDIf?H@17XQEYOA4DRZTv1d-}Oj_5QtAmF)#1YNDVUjUA8#Celf8}nW+bgUf; zK)}NYvx#sm%Qb^7Zk09BYvG~T2tTK@TT5mAZR*VDFK_t1~jcM zpSgFZIXFR3Ti(3+$t-U(45W+QIx`mG)p7{Uu66ypc7;aa4cUgz3y(&fpq1)`gwH~J zW#`aBZi?!=CiPZX9Y-^42t&DlDe(J{inB2y>bVlgw#nMq4Ig>g76RxnM?4{@R4ykm z<;chCoTKoycY@i`nH_RvV)Wb!cvqPeKMFaW?mQ6=Kv4<_{nEcYE>3N8uK6;W{K?0K zSg}{CoB@_QoDVd3(r7Q@1sdipG3M=M;SK49rMeJ!-QXZ6q4EU`R9+l#_&<+^MMb5a z+5acI^6%h0^tM6etbAP00N|mt54goNcp>57Q2BwyGC;(F7(kzapbYv5S^1yy#T-0s zK#iBegwJcn2vi??bbx|)bV)9F7Ay!xm80RDt&QHe_UwNMRPJHzQuQ8BVJ`E4ih_$5 zeqrHt>ut&ht*l`U&oA<+cd>mF+d;p4~;S!}6Arj;crd#Nljt5=JWc z_lS%DSlt8+L$BN!y)hqI=yG~rZo8arP8aW}|P8 z4vkqqH$0Vt`=G(o&~C;J!&qTzDD0Lc3{8S&fj?|4u4SkNO?C|^K`k;;DZn@#v+w*1gUF6zh_D0>Ez*ZdoPpjtr3!s~%!10TfB3gC$_?$#` zoy7$}+d!vt(Mr*s`OfW3*C)(56(XEyO;aRa!uRe;Uo1X-OjfcdVrb&xU+9?4Mkzcj z%=XEVlO>PGBs*eZg|nUtW>c{EwI4O1T>TE^JDK6v`qAN>T$cS8o6Gp>3x6T2A}P~9 zYt6V+fpoQ6-s5UYaE<=sr7u`o(QeXfBsngvyZ?(~l^?&7gspqL*B-izxYTuwhsxu+ zx-)*CDc2Zq-PtlmmxXjdxi&61s~?qq3LoVj`!x%9fKjf*Im~UZDYY4o>7q{E7_Z&q zH)|v?giwisi$zLAtQO}x@q-yQ_aOdK*KP#GlZZ+ihoxjYpw26~*vFfz`)YsT1LvM2DOgLT6wbUoJ1)HMRE5NR!ova^KB)&hF|+ zIHEF0I5kUtmLrOK0Ln8hZpLq(+xx*_(D6Fi0lC1&&mk6;7TnJkh0+iMHxLE-bQepy zmjgR`tqv#Ed44{O*bj^D(xUyS#j$lG3)F5kI_o3{hgC!CbsHFzjiYDpu2b3zVNE+i zeF8bgCxa<6Veb|WS@T{GrahN*tM1omn$=EB9~F0!>sn<5=RzbD(La9Gj+#t*S2!ul zpqqt-bC|pD{oBnZCzEzRUNMltX`5CDc+1sb(6L+NDG9JNz^a)oW!}zE9k1WNn_QRQ z_Y^>&#E+8#QVHG1kIt(~{jxgsQGZl00EO=No!1kykOXUuO zolb7i8tUaW@9Rd-LcHZe#IP7BlxvpyHQ)<{eP#Y2U8WBr5QeXgw>Omsf*n;0W1a@I z5SY)DW`su%N0UA_33Q5-0Gk2_QR*V(x1`A>+_`*|_I1tgbi#0OO14SY9TL)e-+X%^ zvd^4pLsBaNN3rYf?Wbgl2@vasF=RAHLXm=VQ52p%F0If59EhT*;GErI+;b20| zYBa4FXYLZE6q@GZ>V`8wgp7rZiHpsb?WN3IqzrK$V2$+G8B;@uUe}6N<+qivV_I8PR7+%xnY-T6I*HcEC zX-R{KSL#o5imsQthZ9(Ax!gWn!eUB61@?mmhk}z$!yt1_sXQ5N*UT{+HgG4F^t|TK3h$ph9 zaL0}~n50BzEZUhorH44kbA$7`v07DSPz0goJnyWG^PAR)jT@^+x-iP!7)N&D%A4tM zWdYa}X4&nmgqyc*Qj{GwGl(!zZYn4D(6OKfUgp!btI-i}%apoQcBIj8Quw8;^8!Y7 ziQYDotm7e4BeME<>2Y+J&5BXM{i2~CabtrSAJos^Q7d=V?bMg^bTJ=MaGm#QIk)2? zJK237B~i=8bXmFxif1NJn_9lGvS2YU0UHcLo9x2^md5YEkN_6RAS)Yp$k1hr2?nXsw zK5jA3q{T&;nqjXZrI9FZ2O5$#>!)>Ww6C^1)X{C34{*z37Z{3XG(zsQy*l#R}O{mUFAaYR6w^nfcy7>$jKk-6AWBR zoY{B&xzj;(LCK^Z_U}##wH=A3j<=fKVj~B;UlYIWhr=0()OR+EwZQ}aDuZPJA`HoQ z&t00V@15_P4?0lhErWr3T}VTSHa;6(vzR%&*@QJeh~Q*Q7yZeK8$DUXE^RL1#|Z=C)4(fY633q;x__ezM%pLzHnk#R6>Jo! zey%Fr+xa@bx`-z6k{C|TSn>IK+30)slH_GoE;wZxx48t0p;go=i?M6U@J%BCfMyKq z=bX$frs4gr?`B1>a2UB9Q4-ggbXD29{F{2CYZ&)k&YHz&$=8ux;89yjbtyGp<@|*I zcD5x|6zgVcJ~$fpnc9u#Ke0KTEBJzy>y_1=7AZxKL#On$3rv9%>39rQ6#mB*ofpDzEY0lw_+VRK(slvY#$i3;KKF&4`NW^upJ?cW>LmfMbS`RO3RM zgkflDaOvQrjPJO)Qc(mJi0(-3g2m$zwlD8pST;EWRU$rRTY;X8yL-X<=}Y&6I@^zL z>e^<_(4SGU-HQp@2R08MA?n*coWejGNPTXp!|vkX~RpSIwC0=oTTE^BGNHSqzL z2{=nvhR5iD^${dpFUmqrN{aD;G1@C3cc2(7jwF~-%cNpJ#g=|EM=3oYJ zS88gG_{jLQs3*pJ57AT)VrPm?oXZ<7$N#Fc=?>f=>8(B(6h`8OISsONZ|}N!BBUfd zcnlP)v-_7o-BY-IK&;C-4}fDyzPSHKZ>)2Sbz;uJTP+U{$Vz5n>3}p^#oo^S&+Qr{ zEW)iEjgmrBT#rj%a1P$XGM_PK$Ej{mS0(V`w3+qV1|$}kXiNao2tHY8bvBLR*U zM}@kaHFE#q(ad@VFqm6+=Ukac#4W<3!I)fUZJY13%#UN6?1Qb8p~?amQp8+27Ouq+ z7C(V!wJB7|@6wyj+9Q#c+C3bc%Zce;A|3yDe{c9&N_uOP+s7c^_z3Tht}sppj3(>H zy@ZZkN^^gl>y&lKA|gOzps^bZigXkzvRL%bdtgLx-9wYLHFI4q|4?PTq2OM8v6? z_Vva(Rb#TJIg5W=MnG5|GJI-SlIbl)JQt$c57jiA_RsBWeqd%4U>_RIfK$vEk&K_K z?kA&RS-DO-Rd^4+d;(9x)nA{^`DDM%@mf*q`ToJT!+ziiyg8 zm^Pp@10mx1D7J&3i=UWT6SlLt)4r!uA4^O4$_E7zH_iy!`HcL9eLYIxmTn7(axXGjdseub-i)QqiVoE_`JAAu;>EW`tGFA9Q6F+?&je_ zxMl`n12f(}=gm&P!n8|h*{(r^%i0jZu6R+MR$}_@?ek1W`(ytD%rG2IPIOKsHgBZe zm$Vx7Xh$x}1^tJ4kudEzUX7cPsV7H5l6>!||uvd6-h7NNnv_jMF(JI2~As z&K*Zbf@p_@5EEgNrQ8Ly9i&cS6{HDsHZ@go-KRap^XzpZ+6!iP}2!Dg>Lv2&=mI zr`3YHp}~$P9Tkr^on2PC^I%NzEs=-%>p6o-?1L=uAGxf;7)BDc58N49@z7yvH(~9T zT@h*5b}Jf;W_w)PeH|uYw-YX4%p7K!mQ~5&MXP@jZeN+NoId#X-?_?rkg2;nMBFN< zjd|LT)l2{E5y3t6!J<%Q8UT6EYA4ReMpB)=ZQw`GZ56Nb_^f(RRLaiT^iAaq4N6Tt1j4+CMVYHPCvv6b!GQQL!LLI9W;5Te4B|0Mafk>qjL zO;Q_sD3HDcSQs8ti8F7+dkd~&uynGiL-$6nmqnnqxjC^1kSA9(kgDoOTILhaINi-o z<(vaUZ02cG(Am;@;&|#Y% z8OzZ?$J3SY$;E7I%wlr!sr7*&hD4m9cbS4*Y9HB>?kYZfClE_+_=pJM#aVOaY-x<& zMl`*HtiZf$GQRc*{1A1#Q9C3X0`yI|HJbjR0rNI+79pl1kbmqkDyPz0!)ZD%5%YJ>DDM{M}!EL z9D3Ct2mM3ZTp9DUKAUWceFXFURNgu)Z0m49UFjNH7V}rXX2;%vCLNqwcrM6F?rpVU zx*vFnIJHl8(5-;d>mubc!q+a9{$5ML-3ag25eP&mt!MAb>IwU`Em-@(7~mEs=9jVA zOZVD_UQ#~-0jrk2d!eJDh->3C`A-<`eX$eND2|2@fta`b@QA9L`i-ZO!Kz(dU10xe zuGP5g*gT1h_)G@1HczUMzfS+ND?RG9M|r9oPsp`QV|HzA)T@sBu}PiADaW2u-B!+R zldC8&A)aG|t3*$W;95m5_I2g7wmyeq<_7Gg)fTmqtx7pgdu z;yZ}y`j_VZ-eCmxw3v?|06PKCYma(a*So*H$~RA%7!D67sR1(Dmr$aZIa>6;AMOx? z{=p8a6Q$w%4L;#y2-xtrWnXt$_6b~=MM#}8x}U`&^BEiW!tZ>j#kg5C>Gf9dnV!X=zWJ$s9Y`N!(S9hJ`|z^Soyw0NoN$Q#*Ij8e*W)%W zc1FoE?Vwy8;Ony!fH|8d)gNxindY6T716z_@B-T>Zal`(g{J7;qtmyD-z(4q9m1`3 zVchRCJD5>(2Ga(zFg*xtO@FGAL|d#%7J9-%gf)F2d}xoDtM^3PU-8h1I8pTe$0{S0 zNqx6+Yu@1P8yQS04hS9l1`mO&;PHZPFg-EuYk2HtVx9_c7>N%_Kx$fQJ#MkO!GUqg zB22K;3S&v79OP0qqet9jXrGQ-wvMQZQP{k7X`j*FgK4kIMvEkf>MFQS=vXq$Aa?!zlH z1BiPrJx#vAGcCOB+}4B(36nzbH#Y1e_RR@~l5%?ws?AgcW6VUXy9^K?(6ancJVLm+ zRHL!UPYhS;==bZkeWrC)`qP=^r*v!YZFo&Q?Cy^p+rFze$r!6eL?W=x*H+xB^1-79PgX*Zp? z@pb%3z1tM7ggpt0?*9~9eSVDgsmB1@seS&eJKSpzRgjm1arA?#uzqJ z9?DQL|M&a&%5C?XdUBumgk$^~RCr=o>ZTlL{|y#;_YeTInNOs?<0Q#NEFOf@uZ!mF(s!=Q1|2QYw()T{k_IwEvk(PD0b=98h*J}ztC$zcpe7z0lI`rF7rE*&OP$Fi(S zPt4mND7@ex2Dq1_Nd-IZX_$FGxIsZ?-GFpUFMKgC?Q20eYmQxr*U4%lc7;#?{1PMW z-$Um|A~OsTjhML4sI=VFVb4zO^r%-~OzYhL?h1!j5xVqKKU-lBm zqjNYC*ofe)FXRlS7%i3|+8IhXrO1^A-)RWaNTv&HK7$8WD7@!NywAJo!=>I^!L~h| z(441vt$M~7UhsENB0ri1E;Hapy-L*VrqmW@b+e7KnvjmKA1 z-&&x(1(akdj)-m_Qf8N3%N06)j_Oo1Nh5f)GRo{fi3Nd>b{hPsdsgaN|!HQSz16UQ+#t1f0u_H?LRleYs1 zf&W`vk8#27eoiwLidYn?0N5%$)6)D#{@M*pw!a_^lIosHC-w`_Y1|tYGh^LqcawH* zNmZOzxJkW;4nsXYnQ6T38HI%d#29O^c`^1(Ib))t#f^PU>?cvRw zc8Z5*Lcn8hORm#;Jt1zGh9Lto(6Kpx(|9BAD&*6V5zRG<2 zR9Q!Zp3Cl1=97uP&a|)#e1o2nShf4aWMscSeo+elwYX(;7(02lNxuG$_x!+@7$E&Z zycQ?29l7<>KPg{5!1pNT^g0I0zI=7blaXdZZkCld-JSEEfcKkuVzV%UW$wg@@&y{H zMh?R#7ls!+_EQZj!>_%3QzW6V(#2K;Wgxes%!;P5=e8fF_%{u?D59Bf1Q-v-UGRe_7j*`3o`l#^3bJDANGBiv{78&=v10{R{ zL;bM(Q+uwuH^7YrKpeFc6yYhT)ioYdcK5);$06Q4ddG7V1?OqLe%qRD#8QP^XHm#i zC~6SpfyLNk51E9|F`0oVHTvAFKB zX|0rms0o1$3A5h%Iq}W0TJE`0s2bb88_M9U+pKyH#~8nP#TH!CyuXdHQiE1`Bn{(Q z!T+>|%AN?r-O5E1;)5AXPn*tVZ z$$fe-D%@*#-~WKGVffF)&r+my8-q@{-HW7)sKx`Rx*bLEN&tJ(b>kX_WjUvv?H=wL zzZs2*!6R(0*<{#AKXO8SW&U;FEkyXkhO{-&kXZ#wM@tM!`*Ghi+SIb|&H9#1PX2aw zcyx^7`qN&d%CxImA|em%3+WI*n6knxt=Y7(8%jdRu;f)(9^XAVb~jf)k`YEPZw3I0 zOLE8Dg8!TLT}EH}728~5L)4T2ef?A>?@#_n=$D(|lj* zxVG0okw~?bUK3)EC7ViF#?P%+*2Iyp?T!}@2ZrG;L=j<5L6 zyVYzd2?m7)FQn2YX>zvtVIZTyGWfup$Ok}GwYS7h^Mk}FrAeek=bReKXYvY5#`E;y zCzHjeXGg&O3>G)O9%SSZqY#f46fK^lgS*)HYQAItrK3QdnHkab5xCS3y_CMe6 zE>RG(gRk?@z8hsQ(5fMrY<_gQ;K!=b+=LE#IvH}*PbdXVsw z2R3y?7iL7@rwKgO%TnAXZ;pIyU>qzA&$aiR2{iO3phb~dvmm&`bdPAu-394To)D*U zZwLdP-@@$qENk+fIva`n#RQ>_{Lbt);OLn-E>B5uVy&V2K?T3|&a3Nj&;cdGP+34s zK+_06h8Mi6&)?WndqAoSY3B|GBA0fy2*#|6rZ!TK8hHX2%EWEdt$?VTXU@f$jHB{K zY7o$+kw3A~pQAlayj4duJk^j>2y7G#p95QdX@_HevtN)$W*$9d!86O!13oKK%$X&f zA8Os@ogN%eDZ5~;oH|nax3cR^-PXbEbAPkTAnE8*vLdoi(e`1$-rMxFdk_9HZu@Q# zZVjQ+j@?6I(pWAIc>cu1DDs@sh+#a_-MyND$nc}f!j>`m++P94vOCX@@XW5VvUyT} zrS_vxTK@{xmJdDbkL=jL@0;2&evCnHkTi_#sfdm20E$aIFc<#~4f-N}cmVG9#}k;o z;|US9%?b3`r1f3ZN}nnZwjF?W=q>Y5-xN_oal3kG=!iR$-7I*9gag|I=+*V5MQ1`N zJIKG#SyJ9ST)889X^Cs|!q=_OH=lF8MzWUXCk$YB_G1>7xa-H?W83xPriLWi$T-3rlepk( zvP6ao7;fPziZHF|+RoAj;Q^QZ3DbRY93f-^l2cO|<<62=!MRnSb7`*0uI{V+R>M<= zuc?2C@Qq`E9l{)hF!D*5hZFlCyLL?JDAueStpJh!Huv^w?dUdRo4~1nAnAf#g~8@% zf*1*CC9ZDz-sUx$q?i2t^g$SUJElX>7d%I`I*6BQCuBL=e?dkqwJK* z4Gp>D;(!LLD|@kKuY2AZ!6c_*J8vsy)g%~s>|qncn;#PbVS<0mmrJ#05gIH5fo>}H zy=$2!e(7KTIfb%^9Y(SU?00se_S3q2N_w&5l{QsCNe0bf{?eYT*}U=PO-Wuv$G!P9 z9TQLqdeWciY5Ci1$*nescIXh6jRAN1_6x*ST2G4`0G)Oyhh`7ZJ*lPD=&XYJF8ki; zw3d8?wm0d=Lf36(QqD zy&cUeGpT(!a>ar051=a*b1#z2rG+rO%)%7`>bk~(JaGNEW;kz ztV`8bob2d3;%;2-2afmmT*9vD-*!2pGS*j~OE=UvB3d(I=kUXxN&dMq`4=GnkmDtY zU5hDNDLU>Ca=r5%RwAte!%8pM*p+OHLM^v+4fGhCYz$|)i5iE|A#-+BM}R%Oyg1#P zjCO)Ro5f>NQH^cwrhV z6Rv71nFhwW$@q_@psq+kgrZe&i?$L^Rnd5DpRHT1$fRW!ekdu-uHZ1MGn~w*YNIlK zQWvsXUJe$=J$kNcc7@&$HUJ#dO2Ffx-!?whSqxhD>C6C^N7=+a1uEk|3hKJ<;h~Vh zo;Fe+*f;o;VyPL|Xw>WJ@Y)4GeDaIIr^82Q94{8Nj0g`5P8GSqgEuLCYi;-`8BX|dS#+J?b zi@_$Kl@dM!zt0}(fv6c|x)#RKJOO=}Fj2)U=%zd^^C||w4`UfDulXZ|L-N;ERMINI zUZ|0)I7cvL3&%>X>2V7;1Kqm$rjl>g(-C0M@=J@lf$Qp-S9RjZ|gXR#C5rc*HquG~B#?WR}> zMBA&XP@>AX9>}tjU~vX{0U&NM*L*sFitiPi(MfPSVH7I1q8>nq>opA)T<5^y7x-iN zGAOiOA`8C-g5#Dxc|_O$&OFGrkr$jUvU{XN-#MIlL@#Vsd*9mt?ANdY{j_m>Zvfk-8u01Zm-q*k^4CE9(4AE! z(jG)w8VzzmQDo;Az%yEEy=K(&-YZzbBgQpMP~D1qADT7*TxYQr%up6ClOYE=i`Q#Yk6^xIqVK?NH)q(zOlP9Li*t2ch*D(buQTuVkyHo(M7o{z-yM^05TNFocyRpqqCysJ=z zG>EQ#v(100p<5u4%_w6@na@-UwMr|4b{Nz;j}%F4heSSllFPt7E0edgenNIR`F zW)@!iM&yjjL)=&zC^(oPViS*{^5p>F`#A8;azp&YeJqov5H849>Wc59j>MsoW5&*g z?kV}wuA7yjCu*98h6`y9FcqAa=bOu?HYn7o;G}qvH*7j`zU=@aV4i@sN zL#CG&au)nVDiGt3N~QtdS<$I<8}oN#SH_9wtyxICj5e zIJ0|pe|Tdz0VEZC;eqXaYBbNp5Um@(3>$SaCT?{B__w5f{k+`iXp`{ky=|kki5~L zXjejrzv1NUFC8kYZ7L#@y&QU-F?9o`NID?FrpTZ`E23h<8Q=K>#>njv0`wfyQpCxr z^gW{Tw*D3}j6L9>)$`01pM-udEuxX7wpuvZ;Th&Q(v- z=!u<-k0hC>iMm*aUqMht$+E!pRT;_H#dj>T zE-n2&LlE1BpV31?=eq=achSZ#11EiP2r$zfa@i6Uk5u2O{{jInt(0e3{(0PYWk+-4 zuL~}-((-gO7C87wio{N=_H+*H`;jO2Ft0uJU#$+ibcaWoGEHR%R_K9H&hMm&hJHj> ze^mG3%|^&48Lg7U4GW1^vk;eT2?L3}&)L+%{^f-~QiTD#aWw)f2cMB@)JpP=>Sd?& zfAm}s9EGdsjg192;ho%)0Rb^Uzx;>(OzA*I_g}j%&B{!40w=E-c?vxVm|yGa6u{%Y zdZY)=^zQ|geEah1*4n`6xz`$ROeA>fB~iqWT^hhZDGOnQrBmb*_@+R4*a zqo6e6Rx0?y`;b2tQw9a6Q>muB3UIOS4GVDhxppO>^P^Jj|8aD#flRmmAOFtA(3;cK zmeJUlQ{9GY!<@|~IfYcJIpk0(qa2GlBs4P{l6y`$D?$<-?kE`&LQ$#ImP0~0RNV6W zfA9Yj4?OS$*RJdHdB0z;iNf&NVYUwS0A|q!TS&5g!p)5a_fe)n{7m4uo;g^s0%g{w z;7hAe4jdtZv^_9sczpCas7nQrdX$RZQfYB@J?nltPLM1kOQcfJ4LuiR+ST1{A1q)E zayY^N#3guz#}&LjlrcC!2uNjaiRePoB|D!}HuQxH0S|luN7->SG&v&S?uB$^sSZvs z8XJiHAAnzfwdIxE*}8t`RH0PNF21AXo1K*x?0?klQv2o>Ii-j`gUe|Xs&>;Xuj7~M zW}J~|j1ii@Pf5^85!Fmi-J-MeqXP z12UZ!enC~P{duFTimT(X=CWOn|C47yxOx(?h4!t-x5m=#Q#ybUGB2#|aSx2C_iVZ} z93CKY7fzFZX;s?4u{hg?md@1dbx$^OJRYBI=Q$j&vQ2zp;w~YF;{WxIMUu-F5>CpP zF8Ab8j4*A;M~yO^@u6>Kog_GL6};W%R%;8Nr046frqSvA;Gy+dHONL)*fX*8>A(Fxdt99M3Uk!);hU)lIwvDc zI#=i2n=+#^b_C;gdss_drd~1&*qKKX({=bV%SX?u9v+n;@mM^Q0Xh*b^r&m}3c|_w z-vU5fFr1h(n&A2RzVXQm_0J8!3rke5&IAaKWpdkRbCP=IUf}YMJ@3IYxjueruISB2 zfAsEYLzdMnnQrJbVicT08GQU{e`CSgiCn#tCxMi1;~40VXWsWTSBJ7N>hwG(h!7hL zTE?0v(|6^JE9>ShT5Y6R4`2bX1j@#2Q}6bY9g-K=bTqUV{N=|-t{)7z%Jf4Eg2OnI z5el&HaS6GL5xe?YCZeYGy%3i>;6%*3iSHC>W|-de9fO)(Xao|nC@6ma?@DFn*m=!| zUCJbF=j%8`JS+PM&-{pARmjWqw~H!@6&HGdm2S9Y%ZYM5+WLpZdmiXuyDf??(b!Ls zz3SkgsyMlVU{&DqDW^RbFafb~0}%;%Bs=t_+l^<}j?sxk7(#=z;NWq%*nhlN!t;R? zI}*o)o96|gglB}ir(Hb)?PIh1dgT1KQ#vLkARQwxYSEsTYNucHb0atzT`Wl{1}RFR z&N*qeZM2?VK%z7NZ^ulo?-0_V1+E1%WX(}#N!WSu>mosv?SZ$8L#Ha5ZZzRYXsCsh ziw%+bdv4w}OY%WUyxaPT6!KN)S7607IsEl6MTS%zo#w{jAb+bbNgp|!QW1HyTn`Ep zTxOD2zh2V2nZh_Sc)W7oy{*RiXCCnUGJ2-b>zAmr!}{svp^Xr-KRYm>v=;;8h6W0r zN5}5<8{IqH2Ca&4wdovuowq|Z=P=E;masqo1%~966;w~?Up8ls>)exA`HOfty-~uoRFwitlIC1;z=q^a> zu$^R=kymP$dxWi!OBzprFV`ps*#mV=0NW{r%@S(7uVZK{-(DE&Wg5w9a^X~25gxx< zm1nG~^yI_g-U$XCocC$ymF+)v@PV?ugG^W&PlFQya^^M?HEsERN|W2%MB2@A_?<5L z#?7j{TVef>!FCI_YZ$2DzFw%kbc3&@z0<^87cAPq&yqJn&bd2;MmM4C43nefRkyIw zkzDhoOKml;O&t)%oDQHOg_|nm7Qv@o-w_(~tpmkmXde9W2|rP#t%u{=&(4reDDM&j zp3?0o-hcF$T4+TpwrBAC*P1OVIcYuc^`SQTXz7nF`-sJVtk*Yl-&i`jag0>z{s&~N zbc$XV@9hl(yv}^w9oY*NWDjO{Uk+!S)!!;Sg9px0KBXzl4hu;z!*5-jJWh&G(Gd@R z8eb+oxs1)TjZmR%Fg8N_dvY}Oza?c5G%j__502g4$8z}UOH1!?V(;a<{il<%axb<_ zYcVWWQ@y^I-LXIj#45UeVp~=)_sOUWI9g{i&6e; z#LGfwh?Chth{ABLTii=Jv@&9SK>2eJ)&B20TsY>%1?uUd*J7<$(#u?BYK&;*X4;h3 zwfhlc=%>m?W*YI~R!^62y{kG_9TT82Nm`&KK3%H6dRZF4373;Q8hNcFw?>Pl$~xUYU3FL<11lx{@QtY zKRUecxq{LVSQs`fEi&}YaUNM4#!2Z+H zayR3qTMx`6AGt-Zce0tY3QrlU8NBFeUjE^k+LXL)Fo#Pf{zDR>jJOAlZx}nJw|kH& z0G|Ywr`G+8)|CaP=Bgya=%KZs_g>+;EkVZZhPK*IrQZz+ z(k3Muq`^J5dqVu4>Mg5g@>OX=pi(k+#@M7|s(QQeb1bjrS-Dz^%= z*8&_Pn~FUOdK?2rumGfE?rn8U1#<9r{U@tB(VhFGtI=B-V8G8E z7u`-M`gbUa5!0#V0O<+ZlBba}_PQ`?;=r-Pe}P6~f_>>tz7Yf#$fuB@jq5```MRAu zytQcz2@`2E_wlWe-%{zVD(i#DpGFUuY+lj}l8<8;xw?u(84 z&LD44>&72EiQTXK%@D1N6^6%0gI?2yRGIOw2)}#k8xT$;n=-qe$yyHFij%J^=!7K= zf@iFilgg{BUaG;h7E81-*d4vW?~3$}Zpg-^2g_arwb8!4TkDrN-;FPsWrzK&6{n)4 z*}EecJ+mEf%GX%&oHG;FjB{txz$A#qsjBlEp#I1^{&g} zG!IW8ipNmz#4?%dKWn;DfrbG*Y#vx$pefj_)B;L!Z~brei=Hb|0A{#IJ#qa>>Dz|s zcv3Hc84)=eOa|vy!v`MiFLdgaJ!~iA*G&fm%L)zuEeb6^MsmnB0y1{Gn7t~RduH2| z%TesW=A?{HV_7bj3)<7~U1Phr&|)OPvo1fQ18;_9i2H0*yWa#LE9x3GuVI@F`1G3j!S@eKHgo|rp-;ED7b_R$XVHzxDY$sRmghSN6 z)Bku?e=z0Ob4sj9{6vi>LfhO4^fq!2K_Id=M?UV$%i-_6R3TN40S!6>$eSefliUk8YFAn5xoaeC3Ke zllda(J>Wcyc@U#|v9#Sdtpmo4B8x7v)(X{!M_HkdwtOoHack6}c1DnKY{o2Sn!NJ) z?ip*P8UT_b$UdG}EOY55iT z{VVRzz-=1VXyrP!3hh#xpZ4m!Y`wOnDig@=Fc`BJ$IjH#Q5@Hxr) zDJtH>4TKqmnC1q7?;Hn*SOnOpw@9_1#W*avBsLUpa20Nn~xdEf#Rm0Mm3?D9XI zdGQs~B)3Cjt-kL0YaaS@I*>GCXQniul1z zfeKaz45B6xWSpYS^hoxPXMV>a;HKX^z)BB2c;Z}S;jE+J@L}Knu9u82ire;TzY@8$ zYi#_gYbJTe%1Wo9nP5^9k}@EZSah^NmYyMyuinBDB3*r_YpsgB+T7E}feQ7GoX$n= zKTk8?_%2rs-#;;|rrp)aKcA^%sG8BC>665Rd?o)0Jxk=;Hq#Ct^90W?bcZ^Xb-<|m zU+&O90oyuYY*%0GugPHxT&wZRvYS%RcwIBPT^cQ_Q&uJi4x_t!J^Q-^YWfp>v^ ze4l;b$bOn^Im-D32Cyb}PsijtK(6#Zl98Pu*934$`G+l9HEpyR@40abc4Ob?yw7C6 z@iFdd2(&1Jrwq{GYg2{RlG_R3Np=iwtHrau9`kbGRjlmUjU`~lTKwTw(-NEe{_*R* z5FSE_`+LUKYV-b}@tV#d4)BIRdwQ0FO~45+QEoPMjldI4ac^ze;U6BZN608NRn~VN z8B-$ApL~<@NsG88!l(2A60UG(_jTEpvxCzN+yJ1O?Ec+5l6hXSP6AC5%4`|R^PQsSCUtMKJf)C5z2Q!54 z=#k0$%<>=^Zy6S19Jg5bxhli2K+qxuXkUYdLq~|1>6NgoJ-Y%RtH~ zotpX{piKj1V4S3nk6E*xMQ?Uh)~vMh-Wz<|rScaHD-3S?Y7}$R&2Z~k4@c&jIf<>% zddYQX-oE}Erg3jetzp?RXN4k~)Hj~miF^2$=46dL1S?VLA$PmPRyRv8n)3hc8R<{9 zG2S~G)cHIB#_Cnqd#T>9c%um_&F&=@pW36Qo^mpeJDyAHAM-`Zx#Dk6 zt{UttoFEZ%SBRGwxks8;w$UHQ*%@i3p=u-?M2dh9DqH707*dZ4bCtF305Bov7fx9G zv9IvvRDaIX%hIyD)dYeeF{J}OGMr(bpN${X4Dq?_QR8BgY{n|iZOBi{7d*IRui~I& zuk=8X5w;g1t`%2PDzk2U;P|9r2-1^EV3Mo7_6uEB#V9C;#$cKx8yQ)5ldbZ+e6~N= z%+O5CqCwzQR997+$ALYspK@yT)6dEty>uUt7u;D>|tuug3D57ff&Xuja=JLHqt=ga!Z zuI>}T(4E(&-6+Ns@1TI5t@FZ_+Pzxej8&u2hG1*t(qno5f1YQUfZGk2P|enTuKFfFj@z{{XL{!as+$c^X0iVQoaIgjGzM%cMIcWhmay=7B}ofB|GY z;{1wfWs~RoFSTVm%m*+G4d(N$Psunzi8GJ1U~p=p_M|r5LNNFq7P$(VuE*1&2&9g_ z@0K$J$lCI6@A-d@7+cGCZH!aU#M=^s!m!ovnbTgCh|rfi-9V9AUc`;NkTaS9uorsGwLR2bdh3`dtr-w=S*yW!{?98eRXjgp@Xxgij`L2@#kxh;yJ zx{wGx8+6s@T%@j4fUuC{6jx~OM%s#U|5&&ebk&2Y?f;lWx~A^WKpiB)p_kQEhj-Jpu= zH2GZwV_%Ao-n@IcCi_D&9cUz!giW$mcjldQI*d)W!N0pbwsLF#>fys+demT$5aGIH zRdk`@is0}u(;(l}h!df1;tkj&`7DrpN7Oo_DV%TbweIvhmgX>T{RB*~jUWxAf_xLE_R>{VC3_ARAay zqfcOvZ!!bW09n&!5@b>K>s$1f-8aqX&ed5(bbk{*i%kX;0=)Kq8QwF1G#)2nh3BV1 z0i`!yhLH92vrBtq)-5CrNgi}BiQ~U|AVo20>ERnjv?>%>6O_idly8^5zI8CzE8qHA zEq1HO7y3K$_l~5>Pa(<$Y3=v~X5rE+}DPas*C^kw*QlBileW@na@JI*_G zYv;%CC^F@1%&*S{n@4I-25%9b%yx+Zm#U_(>!u=_qg0O}>?mZaBmCV?HcI8X9U`A% z296#|y+CDwo;pThGxvJ_V-e zXt(L=x8Ias`EWmMc1Q9X-F8aPQ^b$2Ry!`dq?gpJH3Eomcw+5r@u<$bXoo(1(gE{- z>fR}}jgM>Q^mc9?kck;^#aHf$)a%=J5(>lN6cv~t?@UZjI!?&*Y0Ay>u<4FW;m53Q z=GN&@mXFhIC90zL7)%FoR=6YdeMqow%ePnQ69q#-Hp}D-1TZ&<_&K+h332cOCwoN? z#e`s-xRvzVDIE++I;!z_YLbBy6pwpqYX@cKyD zKHEEs>FrxmmL$QqkC!-)s9x)ZDGzV!-8EpPGVaN3J5#CTXaRX5JP)rrWu+J3N&hTq zqV$?)R}v>w`5_lsA`}A0K7;YEWAVe0a$>w7`_%b>{sYfa&V-4f*>-|Cf%~V5ptWve zOIteZe}L2VZ91M?T9id-3qGl2^WNxCetE>1*k4~AF@YT8YG59=Jb2I>=f9<{G$_KF z@M}(ZesBQ<~5S^e=;*@q62>09#W&VXzq|c3mO}wvt(kDHsz9?&mM1m-B zUfIwOTxqH&zn{2M{m*!6L&sK5e^+<}1Hk?yB-|Z3EaSk{l47@?gtHk|ufdq5jMHaT zK$@~<4a=E-d*2-WbZ|!_?2eS2mFryQm%m%9@1-7UHtRw=Xk@}&Px*QjGoEQjXbcY> z*Zs`r3e%Tu!P)=Wrk2O6w>t(x0~M6D+!+ab6R=ki|Q-uwIm$Znn+xp zJ*HE?eeonmSvG>E@Q>q+>$LBmaru4*c56AFUtKaZb^pdf_U?ygbWS>_gmFvr`zDhH zAd*B&{aZJ$>*yz5B&ivR4LF==3DmpN{CbqpZVoCr++`v4k1X}lDMb(ZTZXKP@uy}Z zntAY$;?}dR7VQfQo}3q^Cou#4VDXyVyJ0?><4Y|yyb>dbKzvst4^kb^H5@=2IaOW* zK$nR|7tdTNd!V8~FFij3-2|b3I)s52+eqX`Zcv6p?~OO0?AP=$(a_73cY+RBq-dqo z>0;H@#ckSjLNoxR+q$O{R4vltN8GxkWbomH=E>F%1yq*jOM`giu?D#u^+0%td77-~ zu~qsCs$jVsxChIFbX$wZBL^<6d))H4qKjH2Mr>3_899C#oTD%FJWDn)294?efH$tX zqo;y?4fzMELh|>((kPODq3`ZF#n3LO{e4QxNx+A*Mj9d_S8FS?ij96dyp3WerHKe) zJ}YOYu>fo{1bWE zFZdwJ!m4_y@?6K6w;vK;m$hVIY=u3L9zk*>n~F3}G&}pG)Ichbf*GLWlsN0>|D3YV zJhAI&_+bCJFNQ7c?$GTwMqmh;u2uvd8)hQjF)Az>e^)h8R-!4q}NQv0+F-&dYFJP354Z$&s+wp~A`2Kg`h* zfo?j6sXI-!wxx_|{Gm+$s8)4QYWM<5Rdk;s1e}uRnI2B|eaI;oLTT(Vbg7Ddr9ZXn zRzPH0WH%rYfR7LWI-EAVF|XR-qmM()LOli(xci7@rPRvMlH=zgVzL(~Vr zv7}Tlo(*(%o}}~)!rP`dPDLU6EN;+YOXXnMAcs3$eNXk!5JP8Gi=@?8nbT||FT2={ zOf+k3R&|IZO|~LGUW&qJX5ZUrY~{t8zCLtoz0E{e!pFgWjd&}k^Z!; z*H1W^G*-I~-Tx0`N*_{|QHD=(VvFSw^3I}NXZM}nCXs$?wbyvDMU~RY zjKNUJ;*nWr#moG?(dT*4e0d`grE^uI#Kt-6%{bh!3)&Jz?S?!xp1jjoCT+4f^zx6?cHF?0C^*h0n-dUmDhz0=L za8c8LFe%GDGA1UMy$DipiaIj#cbwM|$*ESYchhVR9nY8h3O}B*vFvisM!U=KSn6s1 z8$1SHL;PMVj$*uT_GKo+34y`j0&{t7q&zC#Rh5mPj>UHRYZ8n|2 zBoCx226*Uk^`!J1{5X)D2XJA@iOol54vtFSq$7+Ewp)!o^6)kZedJWxX8>W>^C4ly zk6hQ*M;DvX0qz*&L*4}61O)wYmOm8!s6cRzMr|i(q)x+_8WV2AH?xNK-*^5JuqA1h zi?p6~`nlt1!;LEjS|nW?Tober4FzRfk1=2Zz1cI`@_QS~r#0PyHgd`da)=)M^*f6*kyHluv&e1zp{kE$>%O|E!s-_}l%X zMS@he$MM!CU3R*El%G)JaE!<;I|TI>LQ zBUaU9ud!T^8_?PD^e0+m4be$mY`R!`mew<=Ea7zs+a?3hqIQXV;j3*F*|0A?SgCNc zReUM137$Co-R{?~;3$*9>&1!k`5#q!`sw3sJ*&6?ZU^GW?@?FJ^X{1*$6g%0fTN+M ztYJKJamnm4Z*J<5J3QkToNOE4F5FFBF~H=kbbd3qJtAJZ72sh+Cu}Bu%KTin(zJs1 z8olFyV)>}wh}=&DWozoSdZD_&yn}kp|L1ODKS6)mR7%HZw*1z8gu)ZuNsy|-=p&*3 zuPvyCJOd6Chnno4V+*3rgXJbDqnCH zT4)VI*{C4^%EJTvp-#RYiXl)dpX^I@|5(mN4*FZIKWSI1+^TXLr%`fzd}FTlk*jgv z$%XCsuN2T!C0+#dqoKdpaCL>-OZByn3?uxv-J|>|d1Q=D?uP*1>2N$+j8hoC^CkU( z)GZ^ML*8(lN~dK3E_&v`DQ0!Si=s(B+=Gn&ftR_1bsFA=+t#zj_>?qR@^n3OhFdN0 znDaDkj@`n>=YW?Ih#wE$Mi^D*Yb_%<57qloDJz*D$d>u#?!p zSnZ~h|4ZwjvoelVnJgJ3N0@PtMWPcYoDM(yT=DHpeb!Sf69Fj(DMVAX<~MfS!KPfi zWi8K<*$?H#D_$}g`~J*Xr6-~zA4o%^iQMuZS~>0&$bLT1rPRU&)-TMjj~ZF7J=cXn zI{>Aj>Eu{yD00qmygBM*g?!)u!&jM%At((>r7cxl(7)PZWXTNUFqu+`kiw8dj7o%k z=JR$RMA&39MBV(yqH89(3jsQYCQH>oYQZH6m8zd#lW~~Dot&3e&-nT-3HFoH(}Q}d zFI64UB>8YB;sJ=&TN@Hw$sWXsw$bYRA%) zv?k`epNzlL>X9X^b4$-}Jfk;oKG0?BY9N4AyM&b^^8GREj&VN}3dv-*<C2af&@oae%I5#BjaSu6bVV%@BQYt9V>b2M;Pn#}My6p3UwhF&A zd!C0B?|{Wq-eYV_AbVYiYF``q+Pv%$cHD&B~rmC~8`{3nROh*+3Hz`DH8TWC1O zvmn5ke+Upp3RfnDqxZHOU03%DP*Ij)3;-ChMvI(-cKkPj|2bS;Zg{zN3Aept!$|L{ zMJ@Jn2N9$11H=o2e-7Y^3T-umwzPaAfn-Obg)nk$#1wzF)$voys-TU(jsL^IC7j<^qdP;gzlk^HX3f8} z;rTK~l+}59??3yztdo!*b0yPd9hDw0rCEGW#k$DLm`>3AYiBpAa@xasKvH7JI}r&y zQYh^iv|AsUf8r=rwe(Hx%LFYtf?@f{bxPlm?q;v1dz7W0Xt|k3(1+}%>&wE#U=TTg z!?HWpcXj5zU^~*-XB)y5Kg1DJ*@M&GyOfRu?BE>=B|x5VBnmbIR(P*MV#E#y-HKv* zXGdqp`t-`ppZZ#d{3$`21Bts!+^o#{&iH&Q{X6eXbx38as)wBl-&MTPvvKoIMwI>4 znbL&3zS(rd3_HRHV7&v^eRBqx4bww@uWq1nA7`O&aoMFWGfB=;{A*y)NBKXXziANY zID_?8Og}Quv;|W-bF`Ref1y${VO^G^SE!+HKL4F#4gU|-43^me;ji*~czHTk6gT_c4N-M;S{r zWLSePdFP8t^d-urU_DeFpnR2y*<*VsSgop$#8hqi6k$Q+#;)KTy1&li-cPMX>A|H=ea4Gr|T5Yf+-P`m`M=Q++ zfA8~Fr?@q1fJR5ic2JX0+7ljehvnpA=4}nC4P_mDg&Hv{i)$lVxCyfPFKJDX;_(x| z^V6Jll%j;*dEI655Dvj%o~O?GIU1t!ym2Rgav)j_0fpkv4&=waCpvFJ;d|JQ(@081Ji1( z?Bfr`RQ*_QIcr*auWm208ps(*nVYveS9(}BkRO9?vM~VKS?$&Y@psW`wPp9+z*yuA zTb{%zh7i?FW>;&Tmub79wk*x#^9ld{C@!Mbeofm?8q{@!@igpza46YR8A-vTCsWpk z{c>eII(Sk%lLzBTAT(u5prLWgPOW}yJ`vJvJ>N-LtpWbbYO;DeItExA*1PP{k)dYg z3MofIV1&sCwSDvM?tbGnr~Wt7Ac>#`U$%4ezD3%rS3}%7{?>NwsC;JxXPHek^U5K3 z?C$c#G3`__hzMf^)U|!*3~jgl)e@|$u%$SKns}t99>ZoR@}$TdsiXL0(qc*qV=cF+ zUfAVijzm`J{ITcnt3)p?lp-deyJEc8${0_C{$FJbOk78@_NT>wVtncVNIJvYRX}Zh?6wE2Ek) zhVNaWrRsNb)<)i=a-S!)s6S9JM1r@JSuYg*uumxwMCf&hNATD`p-qIJ%Eomzn(eUW zpRg6+5^K$~N7;DqV@)FwMglT=+cxtQ^XY^MBS?fb9v3i_Q`}yu(%gg9wRhWD)=A|2 zm=#ThoVLC3ZR5EIL_eRfU0+HfNM@CQ2*`j#^*tmbUr65DuJHHSmcRcv1XfMC$@;fZ z2mve$MQVI@ck7u?J@hfafGax#cfJJNI%Ue;Fph{`I5$(X^|cuEyLXd;q>Z_9)hlIs z<`a$N9=OJ8``b31Mf}H#dKA1KQ&AdSyJy>faH}mK?5YX^dc@TAfENEuJ$8Yqyp{Za zkS1D z8KO&zxFt*OnTZ@B^7QZ2lK7lkV8?|P9*av*jj-O*BlCHSswQO3H9X;wgEt>V&dFim zYRNc0xp$}Q^pUD?C#p;dx%*ast(=9b#ci0P&HTF`RXA6}9$9_x+ypT@vKk+cf3Efl zyxf2I5MTr_i{Gr`Z_m1$>B`C*8);v+>82q;lzLrzw!wYp)3%@N%9EWY&kUG-mldI< za?cuX(?<(|$PyOFhOv_O@2N8Cq^mhXnyW3MsS1>hnCji1y_GZxh-8|P3~8OOcQe$V zP+Tieg$yW-1_;Ht`1s(pgKo71#}-8!{2k^raStYpWUMr15fS;0Slnh+c|dxn}4iyzF5l6Yjdx$<_<0eJz5I zt|3yW>jA@sSc7fWok)NXxCy1ysXV%M8F3rxnCC{vg<%FkQ1giv+Ob2=kEJsC3cdkf z%{<+B#AAK?RlpSZuRo`JjZM$oby^r;iH=WY{nYvSQs8H=eJ{-mmx>cN;#S3_6%Ntq z9A1T#YPN=i+Y#{iE(7h&r+NA*aZ;8cDPtug%XK;To%ZrkSVdd@B_mqs@u#PL2G*!P zrKw-M>zl--54cbxE`d3TZdI2lH&-BPUa-zN2%PjsDN<=Ph`3Cw9y)aDbEV{QB zN|h5`R2axgp)JOzH_Di^b8?k>(9l+sxs}>&_R99<`p|7ixcg8thytkp>+KHGj`V?F z;HQ=mX%xDGnYvN*Cg+zChSH(#TGJs~L$w^)mUpW&L;iojLjF8AU}=vTQGQv!qDs)PlIc5yk0yp@SwN zB1)A;U@McJj!cjAK>ashK|C*yRD1%Bo~RBVEVaB*nf1j)Y9!21E-UKNkhyaFzyB5{5$p_F zE_Vel=%OhWfvn9)kNZ&FjKe83z(S*7nrI=0{xCh(n8`4Q$TpvW$Eo6y<5nyJN?iLY zlCTsXwnJpHTrVM+HHeK|JE*8-rl}7ly_Gd|Z3hyp(raB-43S1)uGJMY3s%>y%;J0) z<{!%x9S$eg8K~lnXqed!!jBhv1a>m<-A+RkmyV%mOdNdw6Ta+QHkq$)4Nr!T&AIEd zO)vrZPtpi!y^F;zCiAZa0q*-f^?je477&{AXB6Dqn1N#S&Wx$z*TZ%9+oYFndGd@H zJsR(&4?vrhhv$9V0F*AE$QY!q0hhM=1mGEJPATAv9#$(>Z^JQ5`pU1@Zxu84L@h?_ zan1_{zHD?un_w%QGmwpEr)}8iC1c|XWUSaKTc++PEW@@@o-J)pEb9&@uTTHD`9pc~ zi;FsUB3hh1=ksD>rSZppyULfJmEL{_dPQG051fjsYfaa>tQh59ay#zs+qQQP!hUMq z;wvXTLwV@xmfr>E;NS&XDpOC7-4etBy(Z3}7Pu2c&2Rf$=={<~6esmWg&`aYF$J!G z_h-|QG?KN7P_pbOrp21NktnO>KPq*=srQ8cwYxhnqX2&nczZ^d`e*8HefGI9loq;m zD?7Sc{){;ah%Q|zM``4;UH%hv>4a2Ph)n^{$m+k(X$36mLFc$umHP`_R-les5@d16 z>f-C&2Kwh6mrA}6_Ck7AaNNZDb6;P|-mdT<8?ur{+V))5C4^l-ccaB+ofNBT%c=YR z3k|V(f&=AyKWky$`=t6Ue@Z8ULf_K4l~O)FLfg9&Lmr@+<&4c-(m(XJuTKW~@;_la zbV#!4#egcclo?iVs*gQ9zbdkb^L}}0(CpNpO+=3Jv8$F0H}LX3%Zp!LKNzhlzxH;< zM!t)00}{|zfx?8J%gt|Ot#L*@*vaYHKhmc6ouIY)otb&~sX-5gvqhFSYE6HUq8a0v zp0f7gH~yI-;+EhI@#2*#{V;pOj!7uzr)5RtnrGK`Ar5lXJcfdyA{`rIH$wT+%8G|z z8pg8f;oyJSJ*UdD+}1~zBW4xRmPZv2;R(EWux`)wmcEw@@QiH#U7`!G{J{l*?Ak4SlLrK)?9IDJ?i|7N&A!RL+OK>kSnr;Nh*!GgBDd+8 zN^`~C3^2dbJ5AX+%-h!Ax4eL##83wMrs4%aTFXqfW4`)ezv_f8(^ zrS8@&(3SSeWdpv`90a1|Y)g-OgM4xqnF3XdyU?p1wLb0-yM3>|zDj@9nQM{$phLcb18zuw3lva)mAo&orKBsBON+74 zP`FnG4*cagK1J;mf=Y1;0c6yQURN!h4dQJ5dJHg(5(6uE^>u~5tS-U5^F5#ajg6g8 zJui9+(~ux8>C^uQ#2ZLk57>0VA&ss?Zgr-Y!k4v(X02(p-a**v;BBX@Bm*@=j)-i| zcp9btufpsB=u54WeP!7#T=Fbmh6+Ev@^=y1@RriJ z@?{%>6qyK4n_pO*hMlr_W=zhq=Qu`$!)BUmxy_ou2S7VYqD>Iaoh!X;YIiRjV98%AB{ZdBK6os z;poSvQ`=wiK22oE0#bXWaU4iDW1h2_cXn$pCd=jnjXvN?Oipx;+eFcF9wsHhdU-|^ zrWtb`86SPqTz>cGjs*&oEP@lb6=Fc4yX?UXx4@>}#};1beocFORAhG?GXYRpt&g`~ z-i^x!k|XF?$arFjQ(p_AyVSl8W8C#8VX@kI^X|)8(t@yc$Qm;^Lt6LVqa7HB_Br;k z{VrOqF4E2Mb@jY&$g2H~O!5mOdA*p5&jr$;`iakz4XON`37C7hua*wsDmFq75aR+A zx;gPL3+)PFFb}0?GMRvr`!HXqfP#>qI+=2kDb6sOMQ73UNSVytG~Egist0D%3(8SI zI~Y-KD}`zaOao9J0P5koheviju=6skP!}ADpxkL800{N_4@Ua}1sRA*Lk##JkCRds zj7>IfUgz&6@#wqTft6l7EYWQKZq2=Sct8%}=n}j0&na;*OhQ_SNJvS^PMkezGsE)7 z*Iw7U!bIBgJ)>s1CbJU>dk(&^4^w=4PCGo@dO>!9JoxEQ(GFwVwv!b9)WCoy=qPKo z?(ZI3Ir$ihO$R|Kr{v4xi*wsz^Hd&RSMBcg-_0h8h~q1GV&-j`;f1c=X(LFQrAN3m zv1w2>9Q)7yjJ=bJ<-({Qb?QHPpk!ok9zsMwP6U8Mbc@#+H^--Pj~r6lVcJCN|JIz8 zkISl9XRc7K^wYjn#Qut9>Wc|Y#A+f{ww7<-CWSHP`tYo0E^R&>`Qo^gw?&bTYc4h! zisxvQ&v~6qfAOH|qsN1@!Lm0qg{CnjFbL-_GLG9eu=Ru;>ecv1J?1&Rr)Kj=+1?5B;_*dmcaj=y zbZc^dETF_G=}W>fKz>?j8M>3&2`{Dm33bbm3sq&f^MLM{ZfaZA1I716og90{L%q(; zA3c=r$XTZbAYfF3z9nbm@tpG;f-~K%o)?u_gQ!@(64u=C=S|pg??a1Qy9}UGeVmb%a50F2bhBCBm&``qi zP)?>Atl`@gzNaqXWSTL6fm^7I6V@-_=TQoglaxi5)vF z{3$N9kJaL}%UX0W%#(;C+>f7Z`4hF_Bo5>WKRosu+^R@t;cDiXOGU;h?E+QXcB0(q z^w=0?aN6z8gs+npI6gCPpE3Ekeo6l+tl6N|_M@(JPp{t~IGDf&7Uoo2y5o$`$h$%b z5fIn~IL@OvthI1|e_*530OxS7aGtP=l^~}nH{3KxdLIDIbbp)4;2LThTqAeplWnTO zUV`5MuuDXNey<=I0a6tW?lfy$uwbx8G<#qzV{fZ=`ej~T7|l~c=?Mc(?;8o{$FKdO z?*y8Kl1VX_v^bI(p5De$1@v$aQaAy{R)!uo7&z&^D*)gFhVs-E;ONNMl~|UAgl*Pf`9h zflcm&KK>uz<8e~wouX9Vx7vT7gm>}wf$S~QnWvAs-zfQ*s9OBsFnJ~;pj(fQMIS51 zYaZXuGoJ(z{8q2?tS(^FEgpT!EQRyvFroN%VqaNd=jn)k8-|QF6Z~8tza_&!#$q+0 z=H!Jf?NzhHLTTJe}@`Ut%M zW013o&HQ|JNKUo~7Fa8qv{7H*yzyWd&hkB8x&6&zkxUGM?Rf6?aD&28^r1tCjCKbw zEnX3@las%&(3P33o9};(BHxNZnFwosR~2#33dC7dEv2?NyWt7W+Pf!1f|6RYY)gACmN3v?s#b~ zTw<8l_wL?iPpTy~c|%h8meo|Qz8qd@uP5oawIO7Kdg>4Y&!d0a&&4>xq9okXM1B8e zRFFKq2N3k4!OomlWi97zx1q>*(C$nO^)q z{@I10HMglP8)M5h9LKHNT!)OwbuOjU5z6JLV^(4vHA6~kcA=y-%3U`~2k9~=3@4Yy zDV0i138@qn(fNJ8zyJ66JofqL{dr$rujgwId=Dvo+oX-ch0~)EdAdYDj4Ry4{DRSM z7a(Rtzr#unQk z$4KLewXX_)e^bT2(6d>o=aM@}8+<%hTVfk27;o%m(8%{RYUv0&YAe~Gr^?ab5LG!4 zoC$L~eB@ZEq$Gr77hsmt?UxEsoIkoCcb&G8tBr%T$A17v z%w0;nc1NEr`%9SdsaCIx6{-ufDEgBZZ*>kvjr%Z-rfO((vx=3ZCz#k;h(Iu!Dl{t) zxtb5HI~zm#Y|k#7US(wOMeD_VQMgapyyQH!IY5_^B|fjb1*N^%sgd~6mE^n0>-ipg zU+_PrBW8qpQQiid_xR`x{LL^uKJf_^61Esoc+J+5!mt%$R^JB>O%1|4OV;|Px9mA= zA#m&wcG?!e@l{aT$Kgjc5@;i(YBpRZjQ2P@qo6)B`Q@o!54$tbEjycM7*OZk=KTyL z$c}0dB)+G`qx0gF<`2vSNGO_=+j>#Tq7lssYK~eTu!Sx@!9vNA8=%~Qcf15yK%XY)Sy1mo4AA1g7S@*nn9qIyvRw7qgeWEJi8dnFgb7>@kn8^UbyC}S zI~jez+5*_!@_b;vYa3+M-O=9lx$*`0I4UFfoJ8C{$GSDH7e|Gsg;IugME_6+J z@m>2CK!h#SYj$3~oxsna-~iWhl?g7XvyN~Hs*lXX z_QH?&%v};U6xa;n7BsMz>Kn4ACjpz1*SwE&3|Y62-~f||LOlQ3Fx&%R@LGG5>=CVw zlFk&~f!_HmPY7GhN_xYdk1o`8Kcs;Def19qA2HVpeo*_ux+c$cl#8X>N&hpLbb%tY zvH*uJZT?zxaTr6)W~&d?IOz1emLckE&5DbG($^r6c?RqzLupkX1HgYA*3~-MNl$|N ze|0_=>$Hcvi`d*bJ&ps}Lf;VPuXcL)UoJA}y4z)5RML2)plGrXT(MOGupA!F%S_JI)Uoy-Mq7Wa@`^XEvy%SD^;m z(jZF-kLu;=L6YO*J|7Pl)is1L`LZQ!^ISrAg3$GI)it*T#HWRt_FH$~fNbEN8G);L z$W*X{oqa=3L)SS3OeSTbybvCvSB&3rSv#Wq-Ws0S`%9jl!94MT=8iKh>0&PdHo#1C zL2=)wZ2eo>^$iEY_#B4I>zqGDVyvxG^%x{0g!j=aNs@u*^9doQL^~h5_Y=lTlP`09 z2svkSFU2MpK6`0)$CvsC9o^1%5=SiHX=IVTr}e4oagaO&-~bkV-ZP)gGv1e4u*I^e zZ{F=&cgWGna38nd5heLi^j}Iy3cBhVwmW4L!aDR=pMWz=TFF!`)bDvwW^G zC1O;%pHGfrjGfAfYTNE!_uv^_wYrtJ@H2ky5?9-3tsZ2*j>=g0?Yp zfOB1b{Zxgf6+1DfxM|!kY1(#!f>ja{-M+o9J@NT?&L{MotcTnNio`TlIU7I58R?fj ztBP9HL0T~DaX-7w+R6yAH{M9BTd$Y22hIY;>LhwZ9HwEn{tqSJ;A_!l>-qE z(%%OIY$F%icc6^zE%pcj9X4Rw5aMRG9AA5(N}?(?Nmw6`A?FOBK#lI2bVi2`;um?& zK;|;FuP|XOVlo*)#FFoN%}Nw0k^jhoX1^JGe`(&_@W<*e&>0z@`z&YpTFdrzQGN%l zA1JRfTzwKTy|W@pVU(6-VrT6{y1kE4Fo2yn_|dO;jqZtk4YyC-Cy7ci?wMV%KfEJL z4I+dsFO;r=ALD6<7tkZCTA3B%bni88lEKHbnCtc(JhTbf0R|cinw9a*#sAZlWIk_b<*)jac2%m;seSRE7EjTxVgoMq*XmT zbwYJ}26xd1-2VegY(+6gW_8)q{C5!!#}%8`Oh_hXuD7szuQI5 zEmt?mnw~RVstzoELts=CL@mp07IM_So26}&}B+{!YiEeBNcb1u~>rq^RS{{Ow#Z0iF?i8!@$3|Nx%N(iy z%jg8NU-l*q@IqluHkOxU6*I`iXNN%s$mTc8;^YXC+}B7$XQ(sIN8YKKR4{h zvaHdQZ(uckp`NA7m;Q)s`QdTFkwP#ofz{}HYPPj(z5sy?u;2=PJnHCRz*O$7wl|MY zZrxe9)^gkP2LYr39dFLZjs?}{y5CZ6z0zB-4hcP6|7~V5@@&BIe_yQKS0Jb@>FFRH zBqWS5fG#8-(1^$>0C`N@r3g3OgeYNsw=M$V@uEWb zGt#+&62;afIlw19?iGPI%$zrcZ8)P%%n*#(OU?GpZ_0N_3>qp-Af*39*}In=#KJ?Tb~w$A+IwMS=)kGru;;7}F}>&~6pZWXI; zTLu5z9U#jUue~rcf8iu`uurM6)~^jLEIz-yGR~7S%y(&RIL?mPx1Mj7kRYGD${k3N zF{5~=gc`aH)?JrQq%F5EEY!m;-LPb))p`9XU*XMr2+kURjB~4sz2gPnuL{)27o7RC z83c#uwV3X7SIKDH$>PfrVzb^k5@~IkAp;oa41fMO6Tsgbm=L#-FwPJ7G1)@+cejy- zi*)(ih9J8oM7Cr~yyaM|*O;;FB|LYyntXWLece!8pCQI%FETAy8xhF%cuY@)W|2^p zFU^DhCFS4FKQRK(yWN)(4r=g8r%EZ2>m%<58lZR_mGi2@gw-WNlg%p91XodjsO7D4 zx+O7un!Eqd3QyoLGqpJB{0GDO$mPEzR~Q>mI8}(2MIsmYwyy~Isa@XaI^a(w z(s=4+%R&va;#Y$9FZu8T_!A(adZ;5w)%pkJ7PMc$MA-5Qw{^htNNebx4m8q%W%_cx z4^3#j#>kGP!2+L{nKNZJssSRA;L4L*t-st}xO ztP24};$Md3uP;PDXbgB2-DAopUr+%#-;Eb8dF8$`IrdunwbSE!E#uaIaDCqfNGN|& zv{{au6n^{vu03u-I!zKg9z`B_Z{56y&6GSBrrchxCARFp(=fIzvxDK4>hPZXA^ZDE z%C3SylYeyD8MaxVFr66z`v2or$EF-<#l&kxBdOulVs;SDK&Gx(xi_EvuDjPb|z*?SS1R5nXoNs5K_nsHLHdHjn>N&b6k%lY| zvlGG=DxLY>AStniAF`ceDb}>0fq1B}Fi`TS(AM>&OfAc0l~D}Zh^-YHc`zoWhCo^Z z4R>Y12p*7;)bB*5LC<*1#~>0%)KGn&{`KgXMEBR3+cwdj{JBm4B7ooB4OoWT>Y6wyDFA7i=Hus#|w>df4 zKLbPkf+MffKqE`j-aj4lieP#O`n#<8iMY!FYY2|W)R6TddLcF71$!7`r*{MAm?Mmr z9~^A^8F$O_Ll!scmQ8QYDzSGncwLZ`|FM7hN0}i*-1A_|K9Ff7s^CnmByaAR{kER~ zMYrK5IJrx?{&{Zawq3w!zo2*1I`FPh68gl}gC!4p8KDYou9P?aZPM8}u(8p$l62&~ z;<^7MxQMyDCWJ2DQ^{V1{Oay$mqJAQ`nWI@gTz zS%|wyMo(Y!6S6C${;wFlp1%c-6be9r0i_#48kh9IAH}#FN5Xo*rfEjxvJ`%t z4Y92x2)3oQ^-Rqay`>yZqCK7dVf1nAyXy%eK20BQ9|DB?V05+q`lJo?5Qlh&ralJD zwVcNAE8+!A5YNa^_HQ84lF{{gT&Ys*GFNnE_1)YpJ6U-JT!VqNqz?d2HcU2GB=_C{ zprk+RZLpYML{%$Ubt5+#+4qq&7#a4m<=V|?Lgyg|>(1S2ju8qvoFfL)Nw!q?ojl}! zYTICAogSF-%bO&m`O$Dku+=ghJ%R3Cx;1SuRiQ!GZhX&Vh37OQ9Xysb*Fe}z+0~j5 z@h1jcgP_gIB~Npn{`lbHwnpEg`^Y;LcF=SCX?E2Od()v@xm?qD5w5M3p=VELrz1GB zk8HF)u{e@ozsJZ3L@!k9$4Qe{6Z)P-D_>NBP6RB7H=I))-Ojr;UeE5>3KM>qk!IgI zauC(Iqa-Q~cIv(yXDkS5>I!(rwl`tDsG=$%UaD)=tbm!YPn+Wp=$wlXDzfOX@?GQH zcggS8U?cN3zdD(ir6il&p4ku9e~*im zIV}K3;TIO&GhK7jZuh3bH+3m}QH=-}(J2 zV4WYv*G8z@Qe5BhzGps(iQzpI>t7)tM%0?8O)fCfJf`9*?@MrHVVr4WMuqTTHF=0V zu4sl}!GJ_S0dimz4BWTs0UjGOq^NbCt;8t1?6Tg9Rs+sOf$G>I-Vyg8(1f*#c%i2n zquQ=BxSPSC0OXNShw8ryOoF9r zQv!obsLDCEdplCn?-Y;PSQeMiX%s{z-l2|{#YC)2B3KCv9Yjg+jMt^Pq=%10#q3ss zQWZdPR&!pQ)LUSNUfQK@}ppfER$O#EQvKNb?_7bvnW&X*w{ zsOkU|$X}bC*%`(&^NkqBeQrAOTy6hkT6a>x(p>ZD_m#d`#msZ)6f#jRq9d&{>S~nR z*ohs0@Kog=_s}DIxptqA`+c&Y+l)#^zl`#t>fW3d5P|CGLq>;;T$he+-_#v>67WGf z5DB?6i@fmj=GM$l-s7>89-W7|J^`Vp_e4P$par7-bu-f_3Tr85_Kyj@`S=V_Sz{K=tMoIlmBUz^_od~xBcOwn7s|%huUqrF+CXj zy_2y-1ne*U?UpA*Z;%+H9e;T5XdLwV7;^qmGHiFWhWs|TLMB3li+SPNuPlnwO)+Ei z0oV9PwVd|NS*92}JU-nY&UaP(2mJG>WDkjc=69qVnm3U+azHg-6&u)eIxdpYp(25j zPa#Mutt5%x|IN_V(jA8=yOZy9mWNp|lg?j%psE;T(3VE(Z{sf!!kPsD5G&#;#`a1A z?GhsYG(zcMSIs;YvI{m7bpA1D{iD<%1Sc6u;4n|a&mFp3@#X|OdP{CIh`BpBk*FBa z5d9&N6V|+AO7ew{nq!9k-hw)&eqVak{LCWzP4T78pyLb!lPSQD*u6XJF6=zCehex3 z3r2w(=+Hj!=B5_crH0>kgDfknS)qeHGoy!`J$iCk36g)AIOik%jyKNu%7vqnau&inC7z?=J=MM`jjn2kyL7MRtcPr65i>23N;~%7c_P$z|M-|E&C(?PS zmsbpO>{fLeF+5(&ms>SU;nmicNP#56I8b1EV{H7u)M99~Swh6#z||E%8p|E>)$ONq z*}C6%Pxl+Zp-|8uBxR|{?zcwfE|3grlf{Pe`ELJ#!*l*sXjM;Ng63^+EXtAlT^_1( z@K`-(>oD& zDcbisDG`3Xf))z`7JwzqwUNlqUfcE))=$XpAkge?!A_latEF>1G`-c13AGFMBPrgt ze8)E&lVG>}GKuAap9XLXzV0>)#}DH4K38;YNUEaYmL7^-haE%y4C7mG!M6`DRKX|9 zg2(jqgNuPre(_tn{K_1&lc2$_>cXNUAwI!aqCg9>TZMTm3JCz9XPkjCvx%~>oMCj> zBffq$`n(H!E$r%15K~6R&-tVLsxkP*i?4R;dmb=m4os*g)I$J(*TP&BH-aHKlS6FO z`TD#jHzfNiXJ`mc8ZUDyV>3hWQxKJV^0rte?_o1)et+*|q33p6Xl#aa!jRuh^kr!5 zH|JQr7)^bR)RoJCRH$(pwal!o0+0-G{aIv;vbvj^)Ou{(SjuJk+Ce6(t-wMCI8FI& z54vAbNt#Ckx5GnlxrO$f2fGL~kn{nB?x3kO;H$}4a?xt5B=DN~R9v>-cN4j>HqE?j z|F}z8G6@{Oke`xcY+X6;Y|J~?yxo!XRu5CTkBBc?T={(mZPH}*NZjFQOXd1v&FI&f zb3qOP)?rEG<{VZ))5Mzy5k$qSYI16N(KENEhXmdxa^`zfnyraNJyPG$ZBPbn;t=oy zS+1^#6{hQkneK;QgG+J;{{)kacXAH-6@}661WXy7_|5eADQ*Y6agwCJU-ja=4zix? zsJ#oPA!Q664IE-Sxdit3)Y;q#-ijf^4`Nm7@}%g>(4WB@&o|V{6R&y>ro3>)g>SEN z+idU1Wa>^1FBhpB0;}k8lWA_8@|qC!wI(9zDo3f*%$b-nuUQrl!1l_(OI15@)a3@% z=HjV`iQCH1r;eLD%5tSHleLJYk@X68O}fd$2s?YP`2kPE!KO=lHxjgj1{iQ-H2Bw1 z^GBE7M4nAxY=Umh!VK2p=5qYbp8I#h?t6RnMqeBfY@y)wCkr>)hf7OwFh#3!Mx9rf ze#qiQ#_}5;G?Pef2|1E-^hUSXk|5kuD0aL(Zcs}$<8vbK*6bsdSxO%H3mVSY2>L=` ze{tl)m^SLo47jI9hd7&;0a`58ja(a>RDCI0RN!7feEG#$QdCly(_9aCP=${`db_nu zKbWSRAnBRHbGmgpVDzuOAN#GIS0_Ag^H=U!JjOeG;A45fX(Oq@8z7dUS22!@ch8&+ zolTN|AV_+FdHMn_@!GncGsJceE^4ZLw#YVMR}o$n85az2*t zD#2B3guo!zA@q(}sl&%-Hv=y0e1uKdlZbuVz@ljczWUx5KU~f!F^gp-E;9A%0fnQZ zwu5TB;_3S>=+QgQWrRB&V>XYww%!^zZouo(dXNJxpS zn_)hAMcYygGl<+@0LyqUKjd-K>e2d>^sRI{Cs&2PrQcoMURJ%kQ<5KQ;`J)cF>^xy zs^8>h|8YV4Ht05mHZTr&wDo?i-yCh>h3UnK81OXx;o+y%A}eF*&a+rX1b!{>)BR}4 zmQ8~8kuoEV-Zm*BX2QsiY&I5^f0@ZLGE477iWXnj?+`;fkEF_NjQ=rmDpw|czA~!~ zeGFWW|3~Y@3IMVb@^^1a;5idBt{#vuo0v^?kO)AFRhCpe@O$yRx-k#-NKpDNIRyQD z9EX%KbiB|;RNcMP471`wJ%xQT6bEtFrgku9`b|I6UkejL?MY!ib!2(q;{VW(NuDm=x&AS65hz}I#CzamlcLMR)GE7cvoVwJ7sitop_h-7lgKR1a2$a15 zC6ac3-#^kEkABZ)8=M7^g4I3lt1XCb;r^th&~uRQU7%qD(7b2vfr<+wk-Jq!L(z-(5s&c zd=Z+51^)AEm}ChQt53@OgUF>-hX!k?)hFEaeDLKwZ)WyN{k`8F2^hF=nD}vOna%a^ zH@$XOL}xLcG2E`W{XoO@V{v|-bIfyXU-zwljnI*y^M<4-`P{a$#^^=ygAQP*wx ze#Hz3jhfo&Tzw!bvz7GxSg++i;pM<}%%|88g4ln+Wxl4v3Vj+u$y#K1{=8PZi~4F% zx(AFN6GJeusC0Mz?Rl2R)d9y_3)g^xv0wWU(#pChHWHAhQ<{?ldqN+L3n=;Z2O$6Wyqb zsQnYU^d&T~{p*AL5eo44wtevm4SZ>SgU9$^lY=Fj>P*WC<_wNvIXp3nbpi=c92Ap^ zJl@>1wi@-}vo)HA(O7=FG(Xg=t~TxC5KN>qs0q}A`u~VN`^>h3F?pc}r+86ffRt)0 z$684S`aZo&5jj3rN38RV{K?1gTxsUlhF!vm|?x5s9H$Ms>U<$h90v3W6c6yid_j;<|abLLGG>=Zzk$(0tp2O`d~}y5$9KH&lvl|Q~BdaC@5|2 zos{$kJXyE#!}I3#;`Vy)A8!bJZ+_McKcwR4!DI|ZlpYgn=JgciB&GcaST+a&oOWdI zTc;KwN+0Vs-xUxUXp7onKRyerS-@(DBF>C|)f2m@C_^m*6zJ1bOiU~#7IjyRArEi; z-DH>7{H(%dgoU*&3=~4JlUKnF3&i7J*Bw`*drvVLPlK7<-a+m=M737erUv-vTetn} z=th7>LUP~JySAUu`Nt`zMV{C=&3W1KHU*QbeK6{oK~WrNZ$3!&cscT~*vA1(k3t%L z2=Sb}xrfYc(0fGYf96Be9U&N+YI16^aOQx2-{^_xjOIK!52E~ebKpt$-eODn>DV@v zW@l=W(FWgF5dbiMHRalcncc6|Z5tPG#)H+)?Z)B?eI6Pzq<4tdIb$jfAr13kHIyzZ z`*hI$!saTHy$~1P@==tjo=W);&8%5V>43lCl}Hu?U#kcF0w|3D@R_RQmF=@29n$8T zE5Uohxl%>(0`RT32AVJlp2Ku;!%a`m+*|`W7fka!Z_zWJ8{&NU;6>#o`b7a^douZZ zRt@aNbh0i?sNP*f`Ux!tv9W1PxmWYNxFyum~r*bdsM^>5SL~dDNm_z|a zdJN)10Y;az=t;jT!c7JiU=ZHuoRbnvZ3fhpja41BI9Z{6$aY(h(d|X6rs$KMVg%!x?^8|ACpNUS@qP+u-kibd!QN_ZGGw?KS%wP8)qS-yaEu=4)t^5a6 zCkL}!aigW1Os^JrTs{WooW|k|G7)Y9FzZzBhJk^d$#JRz0(1KjUgLBAwoxr#TNi%F zqgm3L9#QAYrw^>c%@fCtTx_?@EHMi1>OqA((AEKX*-k%sddCh#PmI12Tr#T4QPnMH zH)MJ-P-BK;IX_Ld&%z%)`|~wY=$}ALU(t^#n%VwaeTgh8HMaPyPeA2AyYpimpTO>B zVG)G}moB8UJTswfsy0~T&k{^YWy>nJV=??T;zz@779IuQT_co|mOe7+0|slRDGI3i z5dRDQbhWqJ30%7!*kS6VE*TDcYRXPqoW)&#?G0!#R5212o+`C0NK6>3%^d`m^n=T< zY&iGr+~ag32C(rElKcJNU!ANj&ud%6G}~Udj}s>3jt-yv2oW5kKa)BrmlQ#$_+%h- z`v#x;0BB`G;1u&scQraQ1gsKE9^%aUp17m9XkA;X(%U#GoUk8^U#P9JFkl(l4CoT+ z_@jg9=cr5t=%_6$r`ZA0gNim{(ga+(aA|2C?+?y=?lX8jz1B^BY-k1L* z;^G(CEpdzIu4lU`&&CL|i0+$WaSo&R;+Hw|}ed8=*7 z@LzdFG8pZlse)^98TD5bq$uEgzZ|f48i9Od*_@iH9NqTIx#Sm97Vr2-ExNl`6(s*Y z)%VZUeNPs>aEY)A@SrhLc=CJ$1jK)!Aj?P1yQ@wcgdN{mKwPh5dMw?`ZK3Mlzkacq zwr5!f?99mNjjf6uPYXUgA>@WT-Ig-<#J4eD=bDvHXr>k;ICWa*kMDej z`YD71P?=dXs7m4XR(Dwt4lRQ#yIeHiqqQ<^Ks!WTOu$KlD8K=lPI2$JtrC&Iu<9OvhWa3eicbULo4@Fbyac6Km1~3UI5$BFb)N%+NwI-Cd$O3 zv31Fqcbr*Pg=tYrAX_$|uU#ynK%L41k~Oj%wxNhxS_Q+*iFIq%#2wbJ_V;pDS1b)V zVpr~{X~Q$! zyMK?Axre-otOLa&~G_>0x$fpu|LO|ElTxj-UqK9wQ0V7rWFw+X{HK8bdyO`)d9*|P1}^~uloaQl-=B&dXuYC zUc|#Khy;EglPi~eaCqFJt$h!>WRx`EPRx~`^p0;u!MutPNJ2D*v;wCgtU#~ ztu&l`!VFXeq&74KyuYliL!U$JsEDFyXl=)JXH3y5_m zK=!WsRH;p~=Ntcrqx`L~969Q&>d5_1^(EMrI=D9c6%$Y$eN1amw;F0cjWEtn#HFWY z-$~L6EW#x2)kAAxL$eWeQ_`eXlK-VMl3Zy&hD=WW2x`}JpFKKnbb9?Bi2p8QI&CTE zakVea0!ZKTA7I#{=kR!DH10HE00xr=2Wuq!OzdoU_LdvloxDO}%}(_Ae8MCMIvRIf z8WF_E?ZH$kM-=NlV%Q`)vNJV$hg=NX-{H+kPZRhZu0B5fDc$hi%L zhb^u?T)A+Ojdm1NA~KwEb4SyxAMD1!0V4;gKB?#(efiW@$6{5PcTKA0DUELlD{1>7G5vsDYBy>pP)aDFr0Wz1qp>P#fLA z!-w#rT1*{;*lBs@(s0F^Pg*qaIDwy0R9!9#Umg76Ul@4El+{CQwD;pIxIFKC-2x%rfHQ{C;6Ph zP$soXEqu*4&6P>~Lycu3>cnSmzm*fCTg=c@TTwd`ufU&f&QB@Q{Gp? z->c1I8(1?g`(ezln1$k5`NYIrR2rb{+GIAO)z@wBqTe};WnJ3C zeN-BHsKSnYuq(3ENC(;+t?gnQIdPKyx}Xks^-8Q=>>fg53w}l-dHQ1fzCGo>D+_0~ zyl%d2Urvu@)6HMj*fd+ufmQgTV=?|w_d#m+#T#_3WKvsxUUg(xZtR{N?kIQP-&)e< zPQM6=^CB6)ca2>2T>EEd(_iUx<>PTjeMTZ_2Z^a2b{8_uFQhx7T#Wn{uHHKMdY=>))1D;AcKFU&lhI1~BC zPGpjM4W`fME?5fQFdz3Bq0+b#dHl=5^~4;aO6Jo-}du{bK7xE2-1WBD&&gm`ygpsTHp3x?c2i2&w|39 zG?waBuuoggwlP64;11D{mLj9l7)CZ)@SK~Oh?&PXU0jb3^~BhBZ^YpwqL(6<;ak&d zrq8BTP>4HDwN41Ca|r1WH%Px})fc!oLv80T1m#c0Qd(D(QJtX{Bl-k=fPozdb}=h) zIAbk5?&bNMOgc2#GCUP9{nR9JB+UzVsTJ-eZD%?j7Q22Od{mQY2gdZ}hkw!XQo7$5 z+#PsI^+ZX?+LI3P+{jgu=4aK_r~dBoi8U1Hn!V7>cm;f9S`$r*kp1PwS{MvSIDK5k zwQ-(+nrxiy6T)UzJWA`X@AP2gHJSPd>+pkM2H?_E|H0L*ky8I}(v5D(U8i ze0tEk6Fn4e^Mphw;`1gIrI!fy^z^ld0RUGXOMS%4{&z~tGB$6bmqH|unq|()w}!=E z^o5|t-Adn-Cai#*;+QX&9-5UPO>dVLrb8KE8*TRLyg;2;-QL?X%Na;xBPU}?T z3L)>kE_QIJ+#mu(QG7Ojh%=p8<*YIm6`yMxH%fX2#v%kOVTG}qkUY7*7lVQF`z>bp zU3U_sjrc-=G1Euqu@lT(Rw$58(b(`7fYa+CI43wPc0WbY@&gQ)9;P9iG<^4^VF6^2 zv{c4WmixR*9#J~{46*>!3RDpGH6P={o19_E55mLS+$kg!q<0?8Pjch*W1#|Fy3#x# z2BJO*l#*-Fyuxfj#V-Pu*8-Nz1vz93x@AcoXq1>s(hcBcE4>0}CDZQ_c$=w4yA)Udo4TNclvJ_J_?q{2=IWP_P>(N zLq=S17&=mHD6&6lt(9(2&}MbU>|ZM|I(nU+WM(n6gnzsqHv#JS4Ifp-K~$cB;6^S@ z=UW$G<>>NOta)^*0E{GT1-FCxZ-U32Wbdf0_~{sEh_<*Md^_I{HU{>ZvJ+xs^%XQJ z9sFD2qdy}c{++Ic!e;^(y}RKs+6pya_eVp>Juh#=GwTKR^w6Vv|FjS7tARVGaA$W? zK|#}T951KqS8;2PdG%_mb&BNd@y(Kdp4FfD;V7-0zkK$#)77UN)4*#zY;36`p=_t8hEXyXhi;hKZEnN^%XblKcvTgh=Kq zK!VT_^tnBxJRQhj_;e6WKP6N@_Na9lVn~fCMF^a>H0G9&4x9P9!ZSQesN)H9{_(4> zfBNTpZb%f;k=jN|tNI}cup~{$zUygIV^I*&lAg9^Bxm`ZPDCu=r#>Se7f^twh_(gi zi$dd>0LZ*&2>>JC1lYzR`d|$OQj~hs1psrqIYBR2O2o{TMpT7tI(c*u1J;3atT!h- zp4W3kK()1(#AA!B!l)tu%k$c?$oxYrWTc%v* zFi`M#(og67?#$kAu);v2Ecj3K?A@2u02ZF^H^k1#$teuf_gM^2zh6PSIPM|khj@eh fdnbsBv;+y%0#&j_Z^@Dzu+|6\n" + ] + }, + { + "data": { + "text/plain": [ + "[Resize(size=(384, 384), interpolation=bicubic, max_size=None, antialias=True),\n", + " Lambda(),\n", + " ToTensor(),\n", + " Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))]" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "print(type(image_processor.transforms[0]))\n", + "image_processor.transforms" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[217, 212, 211, 213, 213, 210, 210, 210, 213, 214],\n", + " [213, 211, 212, 212, 209, 212, 211, 210, 210, 211],\n", + " [213, 211, 211, 212, 210, 213, 212, 211, 210, 210],\n", + " [215, 211, 209, 212, 212, 211, 210, 210, 210, 210],\n", + " [211, 208, 209, 211, 210, 211, 211, 211, 211, 211]], dtype=uint8)" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import numpy as np\n", + "np.asarray(image_processor.transforms[0](image_2))[:5, :10, 0]" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [], + "source": [ + "def cubic_interpolate(p, x):\n", + " return (\n", + " p[1] +\n", + " 0.5 * x * (p[2] - p[0] + \n", + " x * (2.0 * p[0] - 5.0 * p[1] + 4.0 * p[2] - p[3] + \n", + " x * (3.0 * (p[1] - p[2]) + p[3] - p[0])))\n", + " )\n", + "\n", + "def bicubic_interpolate(p, x, y):\n", + " arr = np.array([cubic_interpolate(p[i], y) for i in range(4)])\n", + " return cubic_interpolate(arr, x)\n", + "\n", + "def resize_bicubic_pil(image, new_width, new_height):\n", + " # Convert the PIL image to a NumPy array\n", + " image_np = np.array(image)\n", + " \n", + " height, width, channels = image_np.shape\n", + " resized_image = np.zeros((new_height, new_width, channels))\n", + "\n", + " x_ratio = width / new_width\n", + " y_ratio = height / new_height\n", + "\n", + " for i in range(new_height):\n", + " for j in range(new_width):\n", + " x = j * x_ratio\n", + " y = i * y_ratio\n", + "\n", + " x_int = int(x)\n", + " y_int = int(y)\n", + "\n", + " x_diff = x - x_int\n", + " y_diff = y - y_int\n", + "\n", + " p = np.zeros((4, 4, channels))\n", + "\n", + " for m in range(-1, 3):\n", + " for n in range(-1, 3):\n", + " xm = min(max(x_int + m, 0), width - 1)\n", + " yn = min(max(y_int + n, 0), height - 1)\n", + " p[m + 1, n + 1] = image_np[yn, xm]\n", + "\n", + " for c in range(channels):\n", + " resized_image[i, j, c] = bicubic_interpolate(p[:, :, c], x_diff, y_diff)\n", + "\n", + " # Convert the NumPy array back to a PIL image\n", + " resized_image = np.clip(resized_image, 0, 255).astype(np.uint8)\n", + " return Image.fromarray(resized_image)" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[222, 217, 214, 216, 218, 213, 212, 214, 216, 218],\n", + " [213, 209, 209, 211, 209, 209, 209, 209, 208, 210],\n", + " [212, 210, 211, 212, 209, 213, 212, 209, 209, 210],\n", + " [217, 212, 211, 212, 212, 212, 211, 210, 210, 211],\n", + " [212, 208, 208, 210, 210, 210, 211, 211, 211, 210]], dtype=uint8)" + ] + }, + "execution_count": 23, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "res = resize_bicubic_pil(image_2, base_img_size, base_img_size)\n", + "np.asarray(res)[:5, :10, 0]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Model surgery" + ] + }, { "cell_type": "code", "execution_count": 3, diff --git a/examples/xgenmm/test_anyres_img.cpp b/examples/xgenmm/test_anyres_img.cpp new file mode 100644 index 000000000..51bf6b5c4 --- /dev/null +++ b/examples/xgenmm/test_anyres_img.cpp @@ -0,0 +1,530 @@ +#include "ggml.h" +#include "common.h" +#include "clip.h" +#include "xgenmm.h" +#include "llama.h" + +#include +#include +#include + + + +struct clip_image_u8 +{ + int nx; + int ny; + + std::vector buf; +}; + +struct clip_image_f32 +{ + int nx; + int ny; + + std::vector buf; +}; + +inline int clip(int x, int lower, int upper) { return std::max(lower, std::min(x, upper)); } + +static bool bicubic_resize(const clip_image_u8& img, clip_image_u8& dst, int target_width, int target_height) +{ + const int nx = img.nx; + const int ny = img.ny; + + dst.nx = target_width; + dst.ny = target_height; + dst.buf.resize(3 * target_width * target_height); + + float Cc; + float C[5]; + float d0, d2, d3, a0, a1, a2, a3; + int i, j, k, jj; + int x, y; + float dx, dy; + float tx, ty; + + tx = (float)nx / (float)target_width; + ty = (float)ny / (float)target_height; + + // Bicubic interpolation; adapted from ViT.cpp, inspired from : + // -> https://github.com/yglukhov/bicubic-interpolation-image-processing/blob/master/libimage.c#L36 + // -> https://en.wikipedia.org/wiki/Bicubic_interpolation + + for (i = 0; i < target_height; i++) + { + for (j = 0; j < target_width; j++) + { + x = (int)(tx * j); + y = (int)(ty * i); + + dx = tx * j - x; + dy = ty * i - y; + + for (k = 0; k < 3; k++) + { + for (jj = 0; jj <= 3; jj++) + { + d0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x - 1, 0, nx - 1)) * 3 + k] - + img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k]; + d2 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 1, 0, nx - 1)) * 3 + k] - + img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k]; + d3 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 2, 0, nx - 1)) * 3 + k] - + img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k]; + a0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k]; + + a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3; + a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2; + a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3; + + C[jj] = a0 + a1 * dx + a2 * dx * dx + a3 * dx * dx * dx; + + d0 = C[0] - C[1]; + d2 = C[2] - C[1]; + d3 = C[3] - C[1]; + a0 = C[1]; + a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3; + a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2; + a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3; + Cc = a0 + a1 * dy + a2 * dy * dy + a3 * dy * dy * dy; + + const uint8_t Cc2 = std::min(std::max(std::round(Cc), 0.0f), 255.0f); + dst.buf[(i * target_width + j) * 3 + k] = float(Cc2); + } + } + } + } + + return true; +} + +enum projector_type +{ + PROJECTOR_TYPE_MLP, + PROJECTOR_TYPE_MLP_NORM, + PROJECTOR_TYPE_LDP, + PROJECTOR_TYPE_LDPV2, + PROJECTOR_TYPE_RESAMPLER, + PROJECTOR_TYPE_UNKNOWN, +}; + +static std::map PROJECTOR_TYPE_NAMES = { + {PROJECTOR_TYPE_MLP, "mlp"}, + {PROJECTOR_TYPE_LDP, "ldp"}, + {PROJECTOR_TYPE_LDPV2, "ldpv2"}, + {PROJECTOR_TYPE_RESAMPLER, "resampler"}, +}; + + + +struct clip_hparams +{ + int32_t image_size; + int32_t patch_size; + int32_t hidden_size; + int32_t n_intermediate; + int32_t projection_dim; + int32_t n_head; + int32_t n_layer; + + float eps; + + char mm_patch_merge_type[32] = "flat"; // spatial_unpad or flat (default) + + int32_t image_grid_pinpoints[32]; + int32_t image_crop_resolution; +}; + +struct clip_layer +{ + // attention + struct ggml_tensor* k_w; + struct ggml_tensor* k_b; + struct ggml_tensor* q_w; + struct ggml_tensor* q_b; + struct ggml_tensor* v_w; + struct ggml_tensor* v_b; + + struct ggml_tensor* o_w; + struct ggml_tensor* o_b; + + // layernorm 1 + struct ggml_tensor* ln_1_w; + struct ggml_tensor* ln_1_b; + + // ff + struct ggml_tensor* ff_i_w; + struct ggml_tensor* ff_i_b; + + struct ggml_tensor* ff_o_w; + struct ggml_tensor* ff_o_b; + + // layernorm 2 + struct ggml_tensor* ln_2_w; + struct ggml_tensor* ln_2_b; +}; + +struct clip_vision_model +{ + struct clip_hparams hparams; + + // embeddings + struct ggml_tensor* class_embedding; + struct ggml_tensor* patch_embeddings; + struct ggml_tensor* patch_bias; + struct ggml_tensor* position_embeddings; + + struct ggml_tensor* pre_ln_w; + struct ggml_tensor* pre_ln_b; + + std::vector layers; + + struct ggml_tensor* post_ln_w; + struct ggml_tensor* post_ln_b; + + struct ggml_tensor* projection; + + // LLaVA projection + struct ggml_tensor* mm_0_w = NULL; + struct ggml_tensor* mm_0_b = NULL; + struct ggml_tensor* mm_2_w = NULL; + struct ggml_tensor* mm_2_b = NULL; + + struct ggml_tensor* image_newline = NULL; + + // Yi type models with mlp+normalization projection + struct ggml_tensor* mm_1_w = NULL; // Yi type models have 0, 1, 3, 4 + struct ggml_tensor* mm_1_b = NULL; + struct ggml_tensor* mm_3_w = NULL; + struct ggml_tensor* mm_3_b = NULL; + struct ggml_tensor* mm_4_w = NULL; + struct ggml_tensor* mm_4_b = NULL; + + // MobileVLM projection + struct ggml_tensor* mm_model_mlp_1_w; + struct ggml_tensor* mm_model_mlp_1_b; + struct ggml_tensor* mm_model_mlp_3_w; + struct ggml_tensor* mm_model_mlp_3_b; + struct ggml_tensor* mm_model_block_1_block_0_0_w; + struct ggml_tensor* mm_model_block_1_block_0_1_w; + struct ggml_tensor* mm_model_block_1_block_0_1_b; + struct ggml_tensor* mm_model_block_1_block_1_fc1_w; + struct ggml_tensor* mm_model_block_1_block_1_fc1_b; + struct ggml_tensor* mm_model_block_1_block_1_fc2_w; + struct ggml_tensor* mm_model_block_1_block_1_fc2_b; + struct ggml_tensor* mm_model_block_1_block_2_0_w; + struct ggml_tensor* mm_model_block_1_block_2_1_w; + struct ggml_tensor* mm_model_block_1_block_2_1_b; + struct ggml_tensor* mm_model_block_2_block_0_0_w; + struct ggml_tensor* mm_model_block_2_block_0_1_w; + struct ggml_tensor* mm_model_block_2_block_0_1_b; + struct ggml_tensor* mm_model_block_2_block_1_fc1_w; + struct ggml_tensor* mm_model_block_2_block_1_fc1_b; + struct ggml_tensor* mm_model_block_2_block_1_fc2_w; + struct ggml_tensor* mm_model_block_2_block_1_fc2_b; + struct ggml_tensor* mm_model_block_2_block_2_0_w; + struct ggml_tensor* mm_model_block_2_block_2_1_w; + struct ggml_tensor* mm_model_block_2_block_2_1_b; + + // MobileVLM_V2 projection + struct ggml_tensor* mm_model_mlp_0_w; + struct ggml_tensor* mm_model_mlp_0_b; + struct ggml_tensor* mm_model_mlp_2_w; + struct ggml_tensor* mm_model_mlp_2_b; + struct ggml_tensor* mm_model_peg_0_w; + struct ggml_tensor* mm_model_peg_0_b; + + // MINICPMV projection + struct ggml_tensor* mm_model_pos_embed_k; + struct ggml_tensor* mm_model_query; + struct ggml_tensor* mm_model_proj; + struct ggml_tensor* mm_model_kv_proj; + struct ggml_tensor* mm_model_attn_q_w; + struct ggml_tensor* mm_model_attn_q_b; + struct ggml_tensor* mm_model_attn_k_w; + struct ggml_tensor* mm_model_attn_k_b; + struct ggml_tensor* mm_model_attn_v_w; + struct ggml_tensor* mm_model_attn_v_b; + struct ggml_tensor* mm_model_attn_o_w; + struct ggml_tensor* mm_model_attn_o_b; + struct ggml_tensor* mm_model_ln_q_w; + struct ggml_tensor* mm_model_ln_q_b; + struct ggml_tensor* mm_model_ln_kv_w; + struct ggml_tensor* mm_model_ln_kv_b; + struct ggml_tensor* mm_model_ln_post_w; + struct ggml_tensor* mm_model_ln_post_b; +}; + +struct clip_ctx { + bool has_text_encoder = false; + bool has_vision_encoder = false; + bool has_llava_projector = false; + bool has_minicpmv_projector = false; + int minicpmv_version = 2; + + struct clip_vision_model vision_model; + projector_type proj_type = PROJECTOR_TYPE_MLP; + + float image_mean[3]; + float image_std[3]; + bool use_gelu = false; + int32_t ftype = 1; + + bool has_class_embedding = true; + bool has_pre_norm = true; + bool has_post_norm = false; + bool has_patch_bias = false; + + struct gguf_context * ctx_gguf; + struct ggml_context * ctx_data; + + std::vector buf_compute_meta; + + // memory buffers to evaluate the model + ggml_backend_buffer_t params_buffer = NULL; + + ggml_backend_t backend = NULL; + ggml_gallocr_t compute_alloc = NULL; + + struct clip_image_size * load_image_size; +}; + +static bool load_file_to_bytes(const char* path, unsigned char** bytesOut, long* sizeOut) +{ + auto file = fopen(path, "rb"); + if (file == NULL) + { + LOG_TEE("%s: can't read file %s\n", __func__, path); + return false; + } + + fseek(file, 0, SEEK_END); + auto fileSize = ftell(file); + fseek(file, 0, SEEK_SET); + + auto buffer = (unsigned char*)malloc(fileSize); // Allocate memory to hold the file data + if (buffer == NULL) + { + LOG_TEE("%s: failed to alloc %ld bytes for file %s\n", __func__, fileSize, path); + perror("Memory allocation error"); + fclose(file); + return false; + } + errno = 0; + size_t ret = fread(buffer, 1, fileSize, file); // Read the file into the buffer + if (ferror(file)) + { + die_fmt("read error: %s", strerror(errno)); + } + if (ret != (size_t)fileSize) + { + die("unexpectedly reached end of file"); + } + fclose(file); // Close the file + + *bytesOut = buffer; + *sizeOut = fileSize; + return true; +} + +void print_img(clip_image_u8* img) +{ + const int nx = img->nx; + const int ny = img->ny; + printf("num pixels: %d\n", img->buf.size()); + printf("raw img: nx:%d | ny:%d\n", nx, ny); + + const int n = nx * ny; + for (int k = 0; k < 3; k++) + { + for (int y = 0; y < 5; y++) + { + for (int x = 0; x < 10; x++) + { + // data[(i * 3 * n) + k * n + y * nx + x] = imgs->data[i].buf[3 * (y * nx + x) + k]; + printf("%d ", img->buf[3 * (y * nx + x) + k]); + } + printf("\n"); + } + printf("\n"); + } +} + +int main(){ + /* + Pytorch Image Processing Pipeline + n_px = hf_processor.image_processor.size['height'] + image_processor = Compose([ + Resize((n_px, n_px), interpolation=InterpolationMode.BICUBIC, antialias=True), + Lambda(lambda x: x.convert('RGB') if x.mode != 'RGB' else x), + ToTensor(), + Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) + ]) + anyres_grids = [[384, 768], [768, 384], [768, 768], [1152, 384], [384, 1152]] + grid_pinpoints = anyres_grids + best_resolution = select_best_resolution(image.size, possible_resolutions) + image_padded = resize_and_pad_image(image, best_resolution) + processor_size = processor.transforms[0].size + patches = divide_to_patches(image_padded, processor_size[0]) + image_original_resize = image.resize((processor_size[0], processor_size[0])) + image_patches = [image_original_resize] + patches + image_patches = [processor(image_patch) for image_patch in image_patches] + return torch.stack(image_patches, dim=0) + + this part is already implemented in the clip_image_preprocess function in clip.cpp + */ + + const char* clip_path = "/export/share/yutong/xgenmm/llamacpp_wd/llava-1.6/vit/mmproj-model-f16.gguf"; + // struct ggml_context* meta = NULL; + + // struct gguf_init_params params = { + // /*.no_alloc = */ true, + // /*.ctx = */ &meta, + // }; + + // struct gguf_context* ctx = gguf_init_from_file(clip_path, params); + // if (!ctx) + // { + // throw std::runtime_error( + // format("%s: failed to load CLIP model from %s. Does this file exist?\n", __func__, clip_path)); + // } + struct clip_ctx * ctx = clip_model_load(clip_path, /*verbosity=*/2); + printf("Model loaded\n"); + for (int i=0; i < 3; i++){ + ctx->image_mean[i] = 0.5; + ctx->image_std[i] = 0.5; + } + LOG_TEE("v_image_mean %f %f %f\n", ctx->image_mean[0], ctx->image_mean[1], ctx->image_mean[2]); + LOG_TEE("v_image_std %f %f %f\n", ctx->image_std[0], ctx->image_std[1], ctx->image_std[2]); + // [[384, 768], [768, 384], [768, 768], [1152, 384], [384, 1152]] + ctx->vision_model.hparams.image_grid_pinpoints[0] = 384; + ctx->vision_model.hparams.image_grid_pinpoints[1] = 768; + ctx->vision_model.hparams.image_grid_pinpoints[2] = 768; + ctx->vision_model.hparams.image_grid_pinpoints[3] = 384; + ctx->vision_model.hparams.image_grid_pinpoints[4] = 768; + ctx->vision_model.hparams.image_grid_pinpoints[5] = 768; + ctx->vision_model.hparams.image_grid_pinpoints[6] = 1152; + ctx->vision_model.hparams.image_grid_pinpoints[7] = 384; + ctx->vision_model.hparams.image_grid_pinpoints[8] = 384; + ctx->vision_model.hparams.image_grid_pinpoints[9] = 1152; + for (int i = 0; i < 10; i++) + { + printf("grid[%d]:%d ", i, ctx->vision_model.hparams.image_grid_pinpoints[i]); + } + printf("\n"); + ctx->vision_model.hparams.image_size = 384; + printf("params.image_size:%d\n", ctx->vision_model.hparams.image_size); + /* + part of: + llava_image_embed_make_with_filename + */ + const char* image_path = "/export/home/llama.cpp/examples/xgenmm/imgs/image-1d100e9.jpg"; // Porcelain + // const char* image_path = "/export/home/llama.cpp/examples/xgenmm/imgs/image-1d100e9-1.jpg"; + unsigned char* image_bytes; + long image_bytes_length; + auto loaded = load_file_to_bytes(image_path, &image_bytes, &image_bytes_length); + if (!loaded) + { + LOG_TEE("%s: failed to load %s\n", __func__, image_path); + return NULL; + } + + /* + part of: + llava_image_embed_make_with_bytes + */ + clip_image_u8* img = clip_image_u8_init(); + if (!clip_image_load_from_bytes(image_bytes, image_bytes_length, img)) + { + clip_image_u8_free(img); + LOG_TEE("%s: can't load image from bytes, is it a valid image?", __func__); + return NULL; + } + + print_img(img); + + clip_image_u8* image_original_resize = clip_image_u8_init(); + bicubic_resize(*img, *image_original_resize, 384, 384); + + print_img(image_original_resize); + + // printf("num pixels: %d\n", image_original_resize->buf.size()); + // printf("raw img: nx:%d | ny:%d\n", image_original_resize->nx, image_original_resize->ny); + + // /* + // part of: + // encode_image_with_clip + // */ + // clip_image_f32_batch img_res_v; + // img_res_v.size = 0; + // img_res_v.data = nullptr; + + // if (!clip_image_preprocess(ctx, img, &img_res_v)) + // { + // LOG_TEE("%s: unable to preprocess image\n", __func__); + // delete[] img_res_v.data; + // return false; + // } + // printf("img->nx:%ld | img->ny:%ld\n", img->nx, img->ny); + // // printf("img_res_v.size:%ld\n", img_res_v.size); + // printf("img_res_v->nx:%ld | img_res_v->ny:%ld\n", img_res_v.data->nx, img_res_v.data->ny); + // // std::cout << img_res_v.data->nx << " | " << img_res_v.data->ny << std::endl; + // // std::cout << img_res_v.data->buf.size() << std::endl; + + // const char* mm_patch_merge_type = clip_patch_merge_type(ctx); + // printf("mm_patch_merge_type:%s\n", mm_patch_merge_type); + + + // for (size_t i = 0; i < img_res_v.size; i++) { + // const int nx = img_res_v.data[i].nx; + // const int ny = img_res_v.data[i].ny; + // printf("i:%d | nx:%d | ny:%d\n", i, nx, ny); + + // const int n = nx * ny; + + + // for (int k = 0; k < 1; k++) { + // for (int y = 0; y < 5; y++) { + // for (int x = 0; x < 10; x++) { + // // data[(i * 3 * n) + k * n + y * nx + x] = imgs->data[i].buf[3 * (y * nx + x) + k]; + // printf("%.4f ", img_res_v.data[i].buf[3 * (y * nx + x) + k]); + // } + // printf("\n"); + // } + // printf("\n"); + // } + + // } + + + // /* + // part of: + // clip_image_encode + // */ + // clip_image_f32_batch imgs{}; + // imgs.size = 1; + // imgs.data = &img_res_v.data[0]; + + + // /* + // part of: + // clip_image_batch_encode + // */ + // const clip_image_f32_batch * imgs_f32_const = &imgs; + // int batch_size = imgs_f32_const->size; + // if (ctx->has_llava_projector) { + // GGML_ASSERT(batch_size == 1); // TODO: support multiple images + // } + // if (ctx->has_minicpmv_projector) { + // GGML_ASSERT(batch_size == 1); + // } + + + + + return 0; +} + + +// make test_anyres_img && ./bin/test_anyres_img \ No newline at end of file diff --git a/examples/xgenmm/xgenmm.cpp b/examples/xgenmm/xgenmm.cpp new file mode 100644 index 000000000..4f0e940dd --- /dev/null +++ b/examples/xgenmm/xgenmm.cpp @@ -0,0 +1,597 @@ +/* +08/18/2024 - Yutong - The file is adpated from examples/llava/llava.h in the llama.cpp repository. +*/ + + +#include +#include +#include +#include + +#include "base64.hpp" +#include "clip.h" +#include "common.h" +#include "llama.h" +#include "xgenmm.h" + +// RGB uint8 image +struct clip_image_u8 +{ + int nx; + int ny; + + std::vector buf; +}; + +// RGB float32 image (NHWC) +// Memory layout: RGBRGBRGB... +struct clip_image_f32 +{ + int nx; + int ny; + + std::vector buf; +}; + +struct clip_image_grid_shape +{ + int first; + int second; +}; + +/** + * Selects the best resolution from a list of possible resolutions based on the original size. + * + * @param original_size The original size of the image in the format (width, height). + * @param possible_resolutions A list of possible resolutions in the format [(width1, height1), (width2, height2), ...]. + * @return The best fit resolution in the format (width, height). + */ +static std::pair select_best_resolution(const std::pair &original_size, + const std::vector> &possible_resolutions) +{ + int original_width = original_size.first; + int original_height = original_size.second; + + std::pair best_fit; + int max_effective_resolution = 0; + int min_wasted_resolution = std::numeric_limits::max(); + + for (const auto &resolution : possible_resolutions) + { + int width = resolution.first; + int height = resolution.second; + float scale = + std::min(static_cast(width) / original_width, static_cast(height) / original_height); + int downscaled_width = static_cast(original_width * scale); + int downscaled_height = static_cast(original_height * scale); + int effective_resolution = std::min(downscaled_width * downscaled_height, original_width * original_height); + int wasted_resolution = (width * height) - effective_resolution; + // LOG_TEE("resolution: %d %d, scale: %f, downscaled: %d %d, effective: %d, wasted: %d\n", width, height, scale, + // downscaled_width, downscaled_height, effective_resolution, wasted_resolution); + if (effective_resolution > max_effective_resolution || + (effective_resolution == max_effective_resolution && wasted_resolution < min_wasted_resolution)) + { + max_effective_resolution = effective_resolution; + min_wasted_resolution = wasted_resolution; + best_fit = resolution; + } + } + + return best_fit; +} + +/** + * @brief Get the anyres image grid shape object + * + * @param image_size + * @param grid_pinpoints + * @param image_patch_size + * @return + */ +static struct clip_image_grid_shape get_anyres_image_grid_shape(const std::pair &image_size, + const std::vector> &grid_pinpoints, + int image_patch_size) +{ + /** + Conversion from gguf flat array to vector: + std::vector> possible_resolutions; + for (int i = 0; i < 32 && params.image_grid_pinpoints[i] != 0; i+=2) { + possible_resolutions.push_back({params.image_grid_pinpoints[i], params.image_grid_pinpoints[i+1]}); + } + */ + auto best_resolution = select_best_resolution(image_size, grid_pinpoints); + return {best_resolution.first / image_patch_size, best_resolution.second / image_patch_size}; +} + +// Take the image segments in a grid configuration and return the embeddings and the number of embeddings into +// preallocated memory (image_embd_out) +static bool clip_llava_handle_patches(clip_ctx *ctx_clip, std::vector &image_embd_v, + struct clip_image_grid_shape grid_shape, float *image_embd_out, + int *n_img_pos_out) +{ + struct + { + struct ggml_context *ctx; + } model; + + const int32_t image_size = clip_image_size(ctx_clip); + const int32_t patch_size = clip_patch_size(ctx_clip); + + int32_t num_patches_per_side = + image_size / patch_size; // 336 / 14 = 24 - used for embedding-patching boxes (24*24 = 576 patches) + + int num_patches_width = grid_shape.first; // grid 1-4 + int num_patches_height = grid_shape.second; // grid 1-4 + + const size_t num_images = num_patches_width * num_patches_height + 1; + + // TODO: size calculation is not calculated - it's only tens of MB + size_t ctx_size = 0; + + { + ctx_size += clip_embd_nbytes(ctx_clip) * num_images * 8; // image_features + ctx_size += 1024 * 1024 * ggml_type_size(GGML_TYPE_F32); + } + + struct ggml_init_params params + { + /*.mem_size =*/ctx_size, + /*.mem_buffer =*/NULL, + /*.no_alloc =*/false, // NOTE: this should be false when using the legacy API + }; + + // Python reference code for full unpad: + /* + base_image_feature = image_feature[0] + image_feature = image_feature[1:] + image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous() + image_feature = image_feature.flatten(1, 2).flatten(2, 3) + image_feature = unpad_image(image_feature, image_sizes[image_idx]) + image_feature = torch.cat(( + image_feature, + self.model.image_newline[:, None, None].expand(*image_feature.shape[:-1], 1) + ), dim=-1) + image_feature = image_feature.flatten(1, 2).transpose(0, 1) + image_feature = torch.cat((base_image_feature, image_feature), dim=0) + */ + // We now have two options: unpad or no unpad. Unpad removes tokens for faster llm eval. + // In terms of result quality it appears to make no difference, so we'll start with the easier approach given 5D + // tensors are not supported in ggml yet. Without unpad we have to split the sub-image embeddings into patches of 24 + // features each and permute them. Once all images are processed to prepended the base_image_features without any + // changes. + + // Pytorch reference simplified, modified for ggml compatibility - confirmed identical output in python (for a 2x2 + // grid image (676x676 scaling)) + /* + image_feature = image_feature.view(2, 2, 24, 24, 4096) + image_feature = image_feature.permute(0, 2, 1, 3, 4).contiguous() + image_feature = image_feature.view(2, 24, 2, 24, 4096) + image_feature = image_feature.flatten(0, 3) + + // Reshape to 4D tensor by merging the last two dimensions + image_feature = image_feature.view(2, 2, 24, 24*4096) + image_feature = image_feature.permute(0, 2, 1, 3).contiguous() + image_feature = image_feature.view(-1, 4096) + */ + + model.ctx = ggml_init(params); + + struct ggml_tensor *image_features = + ggml_new_tensor_3d(model.ctx, GGML_TYPE_F32, clip_n_mmproj_embd(ctx_clip), clip_n_patches(ctx_clip), + num_images - 1); // example: 4096 x 576 x 4 + // ggml_tensor_printf(image_features,"image_features",__LINE__,false,false); + // fill it with the image embeddings, ignoring the base + for (size_t i = 1; i < num_images; i++) + { + size_t offset = (i - 1) * clip_embd_nbytes(ctx_clip); + memcpy((uint8_t *)(image_features->data) + offset, image_embd_v[i], clip_embd_nbytes(ctx_clip)); + } + + struct ggml_cgraph *gf = ggml_new_graph(model.ctx); + size_t size_ele = ggml_type_size(GGML_TYPE_F32); + + struct ggml_tensor *image_features_patchview = ggml_view_4d( + model.ctx, image_features, num_patches_per_side * clip_n_mmproj_embd(ctx_clip), num_patches_per_side, + num_patches_width, num_patches_height, size_ele * num_patches_per_side * clip_n_mmproj_embd(ctx_clip), + size_ele * num_patches_per_side * clip_n_mmproj_embd(ctx_clip) * num_patches_per_side, + size_ele * num_patches_per_side * clip_n_mmproj_embd(ctx_clip) * num_patches_per_side * num_patches_width, 0); + // ggml_tensor_printf(image_features_patchview,"image_features_patchview",__LINE__,false,false); + struct ggml_tensor *permuted_cont = + ggml_cont(model.ctx, ggml_permute(model.ctx, image_features_patchview, 0, 2, 1, 3)); + /** + At the end of each row we have to add the row_end embeddings, which are the same as the newline embeddings + image_feature = torch.cat(( + image_feature, + self.model.image_newline[:, None, None].expand(*image_feature.shape[:-1], 1).to(image_feature.device) + ), dim=-1) + * + */ + + // ggml_tensor_printf(permuted_cont,"permuted_cont",__LINE__,false,false); + struct ggml_tensor *flatten = + ggml_view_2d(model.ctx, permuted_cont, clip_n_mmproj_embd(ctx_clip), + num_patches_height * num_patches_width * num_patches_per_side * num_patches_per_side, + size_ele * clip_n_mmproj_embd(ctx_clip), 0); + // ggml_tensor_printf(flatten,"flatten",__LINE__,false,false); + ggml_build_forward_expand(gf, flatten); + ggml_graph_compute_with_ctx(model.ctx, gf, 1); + struct ggml_tensor *result = gf->nodes[gf->n_nodes - 1]; + + memcpy(image_embd_out, image_embd_v[0], clip_embd_nbytes(ctx_clip)); // main image as global context + // append without newline tokens (default behavior in llava_arch when not using unpad ): + memcpy(image_embd_out + clip_n_patches(ctx_clip) * clip_n_mmproj_embd(ctx_clip), (float *)result->data, + clip_embd_nbytes(ctx_clip) * (num_images - 1)); // grid patches + *n_img_pos_out = static_cast(result->ne[1] + clip_n_patches(ctx_clip)); + + // Debug: Test single segments + // Current findings: sending base image, sending a segment embedding all works similar to python + // However, permuted embeddings do not work yet (stride issue?) + // memcpy(image_embd_out, image_embd_v[0], clip_embd_nbytes(ctx_clip)); // main image as context + // memcpy(image_embd_out, (float*)prepared_cont->data, clip_embd_nbytes(ctx_clip)); // main image as context + // *n_img_pos_out=576; + + ggml_free(model.ctx); + return true; +} + +static clip_image_f32 *only_v2_5_reshape_by_patch(clip_image_f32 *image, int patch_size) +{ + int width = image->nx; + int height = image->ny; + int num_patches = (height / patch_size) * (width / patch_size); + clip_image_f32 *patch = clip_image_f32_init(); + patch->nx = patch_size * num_patches; + patch->ny = patch_size; + patch->buf.resize(3 * patch->nx * patch->ny); + + int patch_index = 0; + + for (int i = 0; i < height; i += patch_size) + { + for (int j = 0; j < width; j += patch_size) + { + for (int pi = 0; pi < patch_size; ++pi) + { + for (int pj = 0; pj < patch_size; ++pj) + { + int input_index = ((i + pi) * width + (j + pj)) * 3; + int output_index = (pi * patch_size * num_patches + patch_index * patch_size + pj) * 3; + patch->buf[output_index] = image->buf[input_index]; + patch->buf[output_index + 1] = image->buf[input_index + 1]; + patch->buf[output_index + 2] = image->buf[input_index + 2]; + } + } + patch_index++; + } + } + return patch; +} + +static bool encode_image_with_clip(clip_ctx *ctx_clip, int n_threads, const clip_image_u8 *img, float *image_embd, + int *n_img_pos) +{ + // std::vector img_res_v; // format VectN x H x W x RGB (N x 336 x 336 x 3), so interleaved RGB - + // different to the python implementation which is N x 3 x 336 x 336 + clip_image_f32_batch img_res_v; + img_res_v.size = 0; + img_res_v.data = nullptr; + if (!clip_image_preprocess(ctx_clip, img, &img_res_v)) + { + LOG_TEE("%s: unable to preprocess image\n", __func__); + delete[] img_res_v.data; + return false; + } + + const int64_t t_img_enc_start_us = ggml_time_us(); + + const char *mm_patch_merge_type = clip_patch_merge_type(ctx_clip); + + if (clip_is_minicpmv(ctx_clip)) + { + std::vector image_embd_v; + image_embd_v.resize(img_res_v.size); + struct clip_image_size *load_image_size = clip_image_size_init(); + for (size_t i = 0; i < img_res_v.size; i++) + { + const int64_t t_img_enc_step_start_us = ggml_time_us(); + image_embd_v[i] = (float *)malloc(clip_embd_nbytes(ctx_clip)); + int patch_size = 14; + load_image_size->width = img_res_v.data[i].nx; + load_image_size->height = img_res_v.data[i].ny; + clip_add_load_image_size(ctx_clip, load_image_size); + bool encoded = false; + int has_minicpmv_projector = clip_is_minicpmv(ctx_clip); + if (has_minicpmv_projector == 2) + { + encoded = clip_image_encode( + ctx_clip, n_threads, only_v2_5_reshape_by_patch(&img_res_v.data[i], patch_size), image_embd_v[i]); + } + else if (has_minicpmv_projector == 3) + { + encoded = clip_image_encode(ctx_clip, n_threads, &img_res_v.data[i], image_embd_v[i]); + } + if (!encoded) + { + LOG_TEE("Unable to encode image - spatial_unpad - subimage %d of %d\n", (int)i + 1, + (int)img_res_v.size); + return false; + } + const int64_t t_img_enc_steop_batch_us = ggml_time_us(); + LOG_TEE("%s: step %d of %d encoded in %8.2f ms\n", __func__, (int)i + 1, (int)img_res_v.size, + (t_img_enc_steop_batch_us - t_img_enc_step_start_us) / 1000.0); + } + const int64_t t_img_enc_batch_us = ggml_time_us(); + LOG_TEE("%s: all %d segments encoded in %8.2f ms\n", __func__, (int)img_res_v.size, + (t_img_enc_batch_us - t_img_enc_start_us) / 1000.0); + + int n_img_pos_out = 0; + for (size_t i = 0; i < image_embd_v.size(); i++) + { + std::memcpy(image_embd + n_img_pos_out * clip_n_mmproj_embd(ctx_clip), image_embd_v[i], + clip_embd_nbytes(ctx_clip)); + n_img_pos_out += clip_n_patches(ctx_clip); + } + *n_img_pos = n_img_pos_out; + for (size_t i = 0; i < image_embd_v.size(); i++) + { + free(image_embd_v[i]); + } + image_embd_v.clear(); + load_image_size->width = img->nx; + load_image_size->height = img->ny; + clip_add_load_image_size(ctx_clip, load_image_size); + LOG_TEE("%s: load_image_size %d %d\n", __func__, load_image_size->width, load_image_size->height); + } + else if (strcmp(mm_patch_merge_type, "spatial_unpad") != 0) + { + // flat / default llava-1.5 type embedding + *n_img_pos = clip_n_patches(ctx_clip); + bool encoded = + clip_image_encode(ctx_clip, n_threads, &img_res_v.data[0], image_embd); // image_embd shape is 576 x 4096 + delete[] img_res_v.data; + if (!encoded) + { + LOG_TEE("Unable to encode image\n"); + + return false; + } + } + else + { + // spatial_unpad llava-1.6 type embedding + // TODO: CLIP needs batching support - in HF the llm projection is separate after encoding, which might be a + // solution to quickly get batching working + std::vector image_embd_v; + image_embd_v.resize(img_res_v.size); + for (size_t i = 0; i < img_res_v.size; i++) + { + image_embd_v[i] = + (float *)malloc(clip_embd_nbytes(ctx_clip)); // 576 patches * 4096 embeddings * 4 bytes = 9437184 + const bool encoded = clip_image_encode( + ctx_clip, n_threads, &img_res_v.data[i], + image_embd_v[i]); // image data is in 3x336x336 format and will be converted to 336x336x3 inside + if (!encoded) + { + LOG_TEE("Unable to encode image - spatial_unpad - subimage %d of %d\n", (int)i + 1, + (int)img_res_v.size); + return false; + } + } + const int64_t t_img_enc_batch_us = ggml_time_us(); + LOG_TEE("%s: %d segments encoded in %8.2f ms\n", __func__, (int)img_res_v.size, + (t_img_enc_batch_us - t_img_enc_start_us) / 1000.0); + + const int32_t *image_grid = clip_image_grid(ctx_clip); + + std::vector> grid_pinpoints; + for (int i = 0; i < 32 && image_grid[i] != 0; i += 2) + { + grid_pinpoints.push_back({image_grid[i], image_grid[i + 1]}); + } + + // free all img_res_v - not needed anymore + delete[] img_res_v.data; + img_res_v.size = 0; + img_res_v.data = nullptr; + + const int32_t image_size = clip_image_size(ctx_clip); + + struct clip_image_grid_shape grid_shape = + get_anyres_image_grid_shape({img->nx, img->ny}, grid_pinpoints, image_size); + + int n_img_pos_out; + clip_llava_handle_patches(ctx_clip, image_embd_v, grid_shape, image_embd, &n_img_pos_out); + *n_img_pos = n_img_pos_out; + + for (size_t i = 0; i < image_embd_v.size(); i++) + { + free(image_embd_v[i]); + } + image_embd_v.clear(); + + // debug image/segment/normalization content: + // clip_image_u8 * tmp = clip_image_u8_init(); + // clip_image_convert_f32_to_u8(*image_feature, *tmp); + // clip_image_save_to_bmp(*tmp, "image_feature.bmp"); + } + + LOG_TEE("%s: image embedding created: %d tokens\n", __func__, *n_img_pos); + + const int64_t t_img_enc_end_us = ggml_time_us(); + float t_img_enc_ms = (t_img_enc_end_us - t_img_enc_start_us) / 1000.0; + + LOG_TEE("\n%s: image encoded in %8.2f ms by CLIP (%8.2f ms per image patch)\n", __func__, t_img_enc_ms, + t_img_enc_ms / *n_img_pos); + + return true; +} + +bool llava_validate_embed_size(const llama_context *ctx_llama, const clip_ctx *ctx_clip) +{ + // make sure that the correct mmproj was used, i.e., compare apples to apples + int n_llama_embd = llama_n_embd(llama_get_model(ctx_llama)); + auto n_image_embd = clip_n_mmproj_embd(ctx_clip); + if (n_image_embd != n_llama_embd) + { + LOG_TEE( + "%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you " + "use the correct mmproj file.\n", + __func__, n_image_embd, n_llama_embd); + return false; + } + return true; +} + +bool llava_image_embed_make_with_clip_img(clip_ctx *ctx_clip, int n_threads, const clip_image_u8 *img, + float **image_embd_out, int *n_img_pos_out) +{ + int num_max_patches = 6; + if (clip_is_minicpmv(ctx_clip)) + { + num_max_patches = 10; + } + float *image_embd = + (float *)malloc(clip_embd_nbytes(ctx_clip) * num_max_patches); // TODO: base on gridsize/llava model + if (!image_embd) + { + LOG_TEE("Unable to allocate memory for image embeddings\n"); + return false; + } + + int n_img_pos; + if (!encode_image_with_clip(ctx_clip, n_threads, img, image_embd, &n_img_pos)) + { + LOG_TEE("%s: cannot encode image, aborting\n", __func__); + free(image_embd); + return false; + } + *image_embd_out = image_embd; + *n_img_pos_out = n_img_pos; + + return true; +} + +bool llava_eval_image_embed(llama_context *ctx_llama, const struct llava_image_embed *image_embed, int n_batch, + int *n_past) +{ + int n_embd = llama_n_embd(llama_get_model(ctx_llama)); + + for (int i = 0; i < image_embed->n_image_pos; i += n_batch) + { + int n_eval = image_embed->n_image_pos - i; + if (n_eval > n_batch) + { + n_eval = n_batch; + } + llama_batch batch = { + int32_t(n_eval), + nullptr, + (image_embed->embed + i * n_embd), + nullptr, + nullptr, + nullptr, + nullptr, + *n_past, + 1, + 0, + }; + if (llama_decode(ctx_llama, batch)) + { + LOG_TEE("%s : failed to eval\n", __func__); + return false; + } + *n_past += n_eval; + } + return true; +} + +struct llava_image_embed *llava_image_embed_make_with_bytes(struct clip_ctx *ctx_clip, int n_threads, + const unsigned char *image_bytes, int image_bytes_length) +{ + clip_image_u8 *img = clip_image_u8_init(); + if (!clip_image_load_from_bytes(image_bytes, image_bytes_length, img)) + { + clip_image_u8_free(img); + LOG_TEE("%s: can't load image from bytes, is it a valid image?", __func__); + return NULL; + } + + float *image_embed = NULL; + int n_image_pos = 0; + bool image_embed_result = + llava_image_embed_make_with_clip_img(ctx_clip, n_threads, img, &image_embed, &n_image_pos); + if (!image_embed_result) + { + clip_image_u8_free(img); + LOG_TEE("%s: coulnd't embed the image\n", __func__); + return NULL; + } + + clip_image_u8_free(img); + auto result = (llava_image_embed *)malloc(sizeof(llava_image_embed)); + result->embed = image_embed; + result->n_image_pos = n_image_pos; + return result; +} + +static bool load_file_to_bytes(const char *path, unsigned char **bytesOut, long *sizeOut) +{ + auto file = fopen(path, "rb"); + if (file == NULL) + { + LOG_TEE("%s: can't read file %s\n", __func__, path); + return false; + } + + fseek(file, 0, SEEK_END); + auto fileSize = ftell(file); + fseek(file, 0, SEEK_SET); + + auto buffer = (unsigned char *)malloc(fileSize); // Allocate memory to hold the file data + if (buffer == NULL) + { + LOG_TEE("%s: failed to alloc %ld bytes for file %s\n", __func__, fileSize, path); + perror("Memory allocation error"); + fclose(file); + return false; + } + errno = 0; + size_t ret = fread(buffer, 1, fileSize, file); // Read the file into the buffer + if (ferror(file)) + { + die_fmt("read error: %s", strerror(errno)); + } + if (ret != (size_t)fileSize) + { + die("unexpectedly reached end of file"); + } + fclose(file); // Close the file + + *bytesOut = buffer; + *sizeOut = fileSize; + return true; +} + +struct llava_image_embed *llava_image_embed_make_with_filename(struct clip_ctx *ctx_clip, int n_threads, + const char *image_path) +{ + unsigned char *image_bytes; + long image_bytes_length; + auto loaded = load_file_to_bytes(image_path, &image_bytes, &image_bytes_length); + if (!loaded) + { + LOG_TEE("%s: failed to load %s\n", __func__, image_path); + return NULL; + } + + llava_image_embed *embed = llava_image_embed_make_with_bytes(ctx_clip, n_threads, image_bytes, image_bytes_length); + free(image_bytes); + + return embed; +} + +void llava_image_embed_free(struct llava_image_embed *embed) +{ + free(embed->embed); + free(embed); +} diff --git a/examples/xgenmm/xgenmm.h b/examples/xgenmm/xgenmm.h new file mode 100644 index 000000000..9189db734 --- /dev/null +++ b/examples/xgenmm/xgenmm.h @@ -0,0 +1,53 @@ +/* +08/18/2024 - Yutong - The file is adpated from examples/llava/llava.h in the llama.cpp repository. +*/ + +#ifndef LLAVA_H +#define LLAVA_H + +#include "ggml.h" + +#ifdef LLAMA_SHARED +# if defined(_WIN32) && !defined(__MINGW32__) +# ifdef LLAMA_BUILD +# define XGENMM_API __declspec(dllexport) +# else +# define XGENMM_API __declspec(dllimport) +# endif +# else +# define XGENMM_API __attribute__ ((visibility ("default"))) +# endif +#else +# define XGENMM_API +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +struct clip_ctx; +struct llava_image_embed { + float * embed; + int n_image_pos; +}; + +/** sanity check for clip <-> llava embed size match */ +XGENMM_API bool llava_validate_embed_size(const struct llama_context * ctx_llama, const struct clip_ctx * ctx_clip); + +XGENMM_API bool llava_image_embed_make_with_clip_img(struct clip_ctx * ctx_clip, int n_threads, const struct clip_image_u8 * img, float ** image_embd_out, int * n_img_pos_out); + +/** build an image embed from image file bytes */ +XGENMM_API struct llava_image_embed * llava_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length); +/** build an image embed from a path to an image filename */ +XGENMM_API struct llava_image_embed * llava_image_embed_make_with_filename(struct clip_ctx * ctx_clip, int n_threads, const char * image_path); +/** free an embedding made with llava_image_embed_make_* */ +XGENMM_API void llava_image_embed_free(struct llava_image_embed * embed); + +/** write the image represented by embed into the llama context with batch size n_batch, starting at context pos n_past. on completion, n_past points to the next position in the context after the image embed. */ +XGENMM_API bool llava_eval_image_embed(struct llama_context * ctx_llama, const struct llava_image_embed * embed, int n_batch, int * n_past); + +#ifdef __cplusplus +} +#endif + +#endif