From d277c674ae20a3a1277f71a14a2956bf5b3196ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E4=B8=BA?= Date: Mon, 4 Nov 2024 09:56:33 +0800 Subject: [PATCH] add omni-vlm examples (C++ & python) --- examples/CMakeLists.txt | 1 + examples/omni-vlm/CMakeLists.txt | 50 + examples/omni-vlm/README.md | 109 + examples/omni-vlm/cat.png | Bin 0 -> 216828 bytes examples/omni-vlm/clip.cpp | 2461 +++++++++++++++++ examples/omni-vlm/clip.h | 94 + .../omni-vlm/convert_image_encoder_to_gguf.py | 208 ++ examples/omni-vlm/omni-vlm-cli.cpp | 293 ++ examples/omni-vlm/omni-vlm-wrapper-cli.cpp | 16 + examples/omni-vlm/omni-vlm-wrapper.cpp | 247 ++ examples/omni-vlm/omni-vlm-wrapper.h | 33 + examples/omni-vlm/omni-vlm.cpp | 539 ++++ examples/omni-vlm/omni-vlm.h | 46 + examples/omni-vlm/omni_vlm_cpp.py | 84 + examples/omni-vlm/omni_vlm_demo.py | 55 + examples/omni-vlm/omni_vlm_surgery.py | 161 ++ examples/omni-vlm/requirements.txt | 5 + 17 files changed, 4402 insertions(+) create mode 100644 examples/omni-vlm/CMakeLists.txt create mode 100644 examples/omni-vlm/README.md create mode 100644 examples/omni-vlm/cat.png create mode 100644 examples/omni-vlm/clip.cpp create mode 100644 examples/omni-vlm/clip.h create mode 100644 examples/omni-vlm/convert_image_encoder_to_gguf.py create mode 100644 examples/omni-vlm/omni-vlm-cli.cpp create mode 100644 examples/omni-vlm/omni-vlm-wrapper-cli.cpp create mode 100644 examples/omni-vlm/omni-vlm-wrapper.cpp create mode 100644 examples/omni-vlm/omni-vlm-wrapper.h create mode 100644 examples/omni-vlm/omni-vlm.cpp create mode 100644 examples/omni-vlm/omni-vlm.h create mode 100644 examples/omni-vlm/omni_vlm_cpp.py create mode 100644 examples/omni-vlm/omni_vlm_demo.py create mode 100644 examples/omni-vlm/omni_vlm_surgery.py create mode 100644 examples/omni-vlm/requirements.txt diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index a41508107..922c293ef 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -52,6 +52,7 @@ else() # add_subdirectory(simple) # add_subdirectory(speculative) # add_subdirectory(tokenize) + add_subdirectory(omni-vlm) add_subdirectory(nexa-omni-audio) add_subdirectory(qwen2-audio) endif() diff --git a/examples/omni-vlm/CMakeLists.txt b/examples/omni-vlm/CMakeLists.txt new file mode 100644 index 000000000..b6d41b050 --- /dev/null +++ b/examples/omni-vlm/CMakeLists.txt @@ -0,0 +1,50 @@ +add_library(omni_vlm OBJECT + omni-vlm.cpp + omni-vlm.h + clip.cpp + clip.h + ) + +target_link_libraries(omni_vlm PRIVATE ggml_llama llama ${CMAKE_THREAD_LIBS_INIT}) + +target_include_directories(omni_vlm PUBLIC .) +target_include_directories(omni_vlm PUBLIC ../..) +target_include_directories(omni_vlm PUBLIC ../../common) + +target_compile_features(omni_vlm PRIVATE cxx_std_11) + +add_library(omni_vlm_static STATIC $) +if (BUILD_SHARED_LIBS) + set_target_properties(omni_vlm PROPERTIES POSITION_INDEPENDENT_CODE ON) + target_compile_definitions(omni_vlm PRIVATE LLAMA_SHARED LLAMA_BUILD) + add_library(omni_vlm_shared SHARED $) + target_link_libraries(omni_vlm_shared PRIVATE ggml_llama llama ${CMAKE_THREAD_LIBS_INIT}) + install(TARGETS omni_vlm_shared LIBRARY) +endif() + +if (NOT MSVC) + target_compile_options(omni_vlm PRIVATE -Wno-cast-qual) # stb_image.h +endif() + +if(TARGET BUILD_INFO) + add_dependencies(omni_vlm BUILD_INFO) +endif() + +set(TARGET omni-vlm-cli) +add_executable(${TARGET} omni-vlm-cli.cpp) +set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME omni-vlm-cli) +install(TARGETS ${TARGET} RUNTIME) +target_link_libraries(${TARGET} PRIVATE common omni_vlm ${CMAKE_THREAD_LIBS_INIT}) +target_compile_features(${TARGET} PRIVATE cxx_std_11) + +#=== for omni-vlm-wrapper +add_library(omni_vlm_wrapper_shared SHARED omni-vlm-wrapper.cpp $) +target_link_libraries(omni_vlm_wrapper_shared PRIVATE common ggml_llama llama ${CMAKE_THREAD_LIBS_INIT}) +install(TARGETS omni_vlm_wrapper_shared LIBRARY) + +# set(TARGET omni-vlm-wrapper-cli) +# add_executable(${TARGET} omni-vlm-wrapper-cli.cpp) +# set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME omni-vlm-wrapper-cli) +# install(TARGETS ${TARGET} RUNTIME) +# target_link_libraries(${TARGET} PRIVATE omni_vlm_wrapper_shared ${CMAKE_THREAD_LIBS_INIT}) +# target_compile_features(${TARGET} PRIVATE cxx_std_11) diff --git a/examples/omni-vlm/README.md b/examples/omni-vlm/README.md new file mode 100644 index 000000000..d6cfc7f37 --- /dev/null +++ b/examples/omni-vlm/README.md @@ -0,0 +1,109 @@ +# omni-vlm + +Currently this implementation supports [omni-vlm](https://huggingface.co/NexaAIDev/nano-vlm-instruct) variants, + +After API is confirmed, more models will be supported / uploaded. + +## Usage +Build with cmake in the `llama-cpp-experiments` folder: +```bash +cmake -S . -B build -DCMAKE_BUILD_TYPE=RelWithDebInfo +cmake --build build --verbose -j +``` +After building, run: `./omni-vlm-cli` to see the usage. For example: + +```bash +./omni-vlm-cli \ + -m Nano-Llm-494M-F16.gguf \ + --mmproj mmproj-omni-vlm-f16.gguf \ + --image example/omni-vlm/cat.png +``` + +See next section to convert gguf files from original safetensors. + +[comment]: # (TODO: +**note**: A lower temperature like 0.1 is recommended for better quality. add `--temp 0.1` to the command to do so. +**note**: For GPU offloading ensure to use the `-ngl` flag just like usual +) + +## Omni-vlm gguf conversion +1) First clone omni-vlm model: +```console +git clone https://huggingface.co/NexaAIDev/nano-vlm-instruct +``` + +2) Install the required Python packages: + +```sh +pip install -r examples/omni-vlm/requirements.txt +``` + +3) Run `omni_vlm_surgery.py`: +```console +python omni_vlm_surgery.py \ + --clean-vision-tower \ + --model +``` +- you will find an `omni_vlm.projector` and an `omni_vlm.clip` file in `nano-vlm-instruct/` directory + +4) Create a soft link `pytorch_model.bin` to `omni_vlm.clip`: +```bash +# in nano-vlm-instruct/ folder +ln -s omni_vlm.clip pytorch_model.bin +``` +5) Go back to `llama.cpp` project folder and create the visual gguf model: + +clone `nano-vlm-processor` model directory (You may need to obtain authorization to access NexaAIDev space). +```console +git clone https://huggingface.co/NexaAIDev/nano-vlm-processor +``` + +```console +python ./examples/omni-vlm/convert_image_encoder_to_gguf.py \ + -m \ + --output-dir \ + -p +``` +- You will get a pure vision model part of CLIP named `/mmproj-omni-vlm-f16.gguf`. + +6) Then convert the LLM portion to gguf format: +* Run python snippet below to extract LLM portion from original omni-vlm model. +```python +from safetensors import safe_open +from safetensors.torch import save_file + +tensors = {} +with safe_open("/model.safetensors", framework="pt", device=0) as f: + for k in f.keys(): + if k.startswith('language_model'): + k2 = k.replace('language_model.', '') + tensors[k2] = f.get_tensor(k) + + save_file(tensors, "/model.safetensors") +``` + +```console +python convert_hf_to_gguf.py +``` +Finally we will get LLM GGUF model: `/Nano-Llm-494M-F16.ggu` + +7) And finally we can run the omni-vlm demo of C++ version: +```console +./build/bin/omni-vlm-cli \ + -m /Nano-Llm-494M-F16.gguf \ + --mmproj /mmproj-omni-vlm-f16.gguf \ + --image example/omni-vlm/cat.png +``` +The results will print on the screen: +> The image depicts a grey and white cat with its head pressed against the camera, appearing as if it is staring directly into the lens. The cat is surrounded by black and white stripes, adding a unique touch to its appearance. The black background creates a strong contrast and highlights the cat's features, making it a captivating scene. + +8) Python interface: + +After successfully compiling omni_vlm_wrapper_shared dynamic library, run: +```console +python omni_vlm_demo.py \ + --model /Nano-Llm-494M-F16.gguf \ + --mmproj /mmproj-omni-vlm-f16.gguf \ + --prompt="Describe this image for me" \ + --image-path cat.png +``` diff --git a/examples/omni-vlm/cat.png b/examples/omni-vlm/cat.png new file mode 100644 index 0000000000000000000000000000000000000000..4b446b5dd0d0eced3a3db23105af9d282f6ebdf0 GIT binary patch literal 216828 zcmeGEbyHo>@&^jz?u6j7VIzUy?iwVxYjAg$;10ny?jg9l1$TFMcemhncfRMG-#PU> zZ{YrMsaiF&W=;3Z^z?kXr*{|O3UU&tNCZd_5D=(Rl442_5KsUJ2*?5iIB-kJSC#MJ z7e5P8Q3WYcQ8EQbJ5vj569@>JSR;M?63Ndrg9Zlr`h#P1pO75gl)}OymGnD$I>$PD z$a?kr$bP2i=xpL)Z1(tJd$sg;BIGch1dLkp6uP<{i+3FwS#H#np6NK*pFl&OC(Lde z!@-H8k?~UIb8+Ji!W~UXGtkgN)*L`AHSbZvlHK8xY3Csv3XwfShGn7|_(E34cUn`S zRi%N*%*eFi20uy$RJjP9IwP#W^~#M)%0Si#zfJr9hD7-t@a+T27^;^GF(Oxn3MgF} zsTTT!7GJSPc9)PALYN6&0KN|12Rsz~QG66UFMMZwzog_mwFJKpya%`{A599(5@@)f zL^)NvxCI-3)dA}zCAwG{HRUC`20JGq^3XomVeAOP``hS$;Ell;NgM6dzY6j9Z$bhj z)nxb^MDRlgx-*H$cymljUEt%n2H^rfQg#zhJVXU!ba0I=B6RON80zb{-M_z^vBSaL zNui+lQop^wdrrN-`{F{MfrV*L1`p9NYvuiUvF8E2C2A%bQl_%95VYVr0t95J1q2MZ z1_^!$zz+ljbX*7oJh;RHKVrY2{__ zge#cUyoHK}vxck;kCB}XgMqP~p$UV#js0H|2tIcnaMi}d*?`R5#@g12$DN=2pB_Bm z`rl?oa}=1& z$mr(g#^A=vVCQJY$jr^n&B(;U$ihMo?m_S5Ve4$*PH*c(@h>L-;Ui|^WaMaJ?`&ab zOZJzqfuWs?Ge0@`-+}(`?_cvYaku#INVZP@UKV(PjDNo{GBYqS{vU5JDc|2#9t8_` z6KhQ|3mdR_z+(uov9R#{)BpeP%YR4wA4-k?QnItL|1ahLeEI*SRCO|O6t%Mf59uuM z-<|n4@&A7OHz6P6-pAle_FwE7C_=-{J%XDK$6PkK8Ao0f{+puR&j?sZCgq* z9&#x#BN6Zn8sIZF79Up)hU$d+6adts=fGW>wyduMt{sb`&&1^Lt{r|YpKHdd2hzK8 zYOfJNK*5yw5jc^M8~x;i^WG5H(u~#AECOKTV5UB+c>I38avD8-F|~MZl3 z`M;O&&SEkUa92ft;w$#Q82{xjB^mwS7fdc3n7E(}q*G0cf9U_k262e_FSCF1EDR8e z11bCS)5-p4epujvx#9jjS#Ytbf+30JNLyu=|8J7N1rRv!uQ~nGmVpGGn;h(28QXss z5ErzI_kU}~@>donlNv1U{FzAgpOM4C0~102m!~}~0uYfKXR&tre*}XSXY@a!`~QUU z&*uKWM=0VV|5l~TF-$ys{7>~-umo=t2IEf@32HG3}B^iMK@xF~Qa6PM_9&ZCPQ+ zwHZV>IIaSn<@VGplqFMy4ZMvD7dy3uGL%Y*K>c`$NAQ9KuT{?vDF4s#yZj6lx@c%Q z7CU`XCrz9))rZ15WtH4F6s{zCl5Md+BwkK*dXWT4mHMq{V9G~)lrO^2M;fL*3@igi z!#J~lPN@42h#xVIzm#&Bm7a{&Sf*?e!_{&}uO6qJ(V?b4XIA?(y_&<$$h28o+a%l6 zQAixhqus;Hj|`!M&)wx0Fgkc)#`DGh0|&tnFnMU%=Ik*{ZEPblm6{b3vPtQ%XNh{ToV}B4(4(xVY{2J?n@M22X{sOjI}l19jAPdye!Tpn z7M-;SC_Q#7QCg{ovm382VFbYS#MW8 zHLdM9?SLq`MxsvP$g-hRz=UD?1{UOBblmv|2p};hpJx}cXAHf^JP*cbiXlpvxUSPM*DI*UUNLX&JKmA6xP+!4K(2g}qJF7-QV#>5`e##0V0APr$c zd1hu!=th`_aK9--LU3Q-^ei~G0a`V?ulGpa#xrr=R;H4vXZ}l9efYKB>(pEg6Q|Ws-gmC%#`Nn9nTGJTIRq2U237+qAb zjnGAn*8B>#MHy}|RRgnxVs*cCOlj`)3$ONkF7VtKPcP3MSZQ)4ukltT|g(|Ws zlnBeZ?H&U(#xB1;5NuYn9ld4lkx33u?Q@(>1=<$`HstDBV#Kg?m!bi->Lp&z-V~$m zdsvoF65qG8UZgw@&eF}lvM}tU5{Nt&Z9svK?FLj`D8&=Df*I=cyhnZ%+MJ0uJdKb8+flNJVVumIF`6BZCocz*TgY>JWa9b!XN#`_oVjRZ zT$97A*3#PY=Wi?}(Va?VMh}LiEi6#NLx>M!gaMDe6Xc+GxSxF#9hBg`I*AlA7oL?> zVU#70;%V8gp*a6+yagYDy%)N`nDn@`#3In9fTopelI2Az(dsN#ZYwBKDr}5GfL>1s z4+@7&RivbVt|R5$MRibLD-K6gA1jE|xpq`-8a`3pk@F@9ceU~Lfp zRY8>(jc;0~+}_9*@p0F%>HC!uer5`z#F@|10E*Kj@Kfq3w2e3h{2)9U*|JhMIt*IC zC-Du8&upz1r{}^7XiJKXj|t{M=tDPL)W{wru`h3MU>9f?_4JR|?%$&=G$iO%y+Xf{ zMiFXaTc^3+#HPNkrS&;JzBYr8ip=!O7ih;1v>1w_$WUEeVmDjTS?}9 zJw1-dsavW4x`+=fe|2F5G<5S9ou2?0>)BPX{89jnOu52H?9S`;#3jeqZkeN~J84hr zX6M3>SI|cZ-h)A=DHPpyrEcDWm(Pm>h^p8A`?=&4<{}UB8yI73afwvLtNFY>;}n0D zHxg_-aChR&Cdiv!08I&g#4qc>-D|V8a7_#egNs(5I0M1 z4k7^Meo#TDNDM~~c}V|MO2k>@ww7@yj1e)coz&sy1@>u`c26SkpEWrC1txBe#}RxH zA;6_qMKJ4ZtM#ylu+ST(xOq)(w+Ul4=_sj=9?gX=9%JokkNJX|p z+%V3fvCcn^M6kK&gP@=k(%vwQr@gjdI_%K!k$oYVERH0T~9TBcJoU+=OP_FR^qodFYXNWTMBNE!`?Ir zFmPe;Cz1Wjc<+-5NivjW_nmCdig}n->m`}?kC^)w)KTpZ%ej3Pu_R0}IBs<&zM7bX zoU_-70UTXA1-_aR>=G=ktWX*qV_tZ+!o{|GC*+wxyUxtXicv;t;eDX`2h5nEiV=sh z?JF<_QQ{9uTE=ya(5)T<5~eTMM#hli_x8!tXKp2=q0MH+8_`qEtHQrg$oQryTLS;! zVWAEDB#3f9xKW_Up(trNnjeCnwyl>A6*HltQ+B!@xfi5CCDP#+dF;wEf*o1vq0&x3 z$bkCmyhHveyH2NI(h29OK8Qu|(uP?+Pl!Mt(CNVC7Ean{>H%q#O{Mzof$EMf8w?y)A~l;RN`U;=?STqO;ChCY-Pu?e)KiS#Y$N1mzjKX!t61#48FJcBoDolC)wPqaY zX7iW-6QNlL@wCCr`l~*^*OLwH8Iatp!mOhd4pp=%i*t?p=l8Mins`<_2|Q~34g@xl z6A2u^yt>P+uwLUd9rNO4bV*wb3@1M!5+L=Hcb();MN%)q5VmoUv1h>yxfpQg>h z!7-wdWxqjzfVjsUqSu?=L8nvmQ*84yIVcTOyGUpEHQkVN<{%{~^;7ERw{K6<@DhT- zT@1C~bJxRU5|`J8h82jjx$%9r*3vOx9Dta`_U=7zKY@TaP315bJkd7S($9Z*_l{3B zQnP;2WUgyi#OAcI^9U=|!0kjbyBQv;7>glC3Xmji8>p#pR+LKq7%zU?Z-aZod7t~h4!j{;fr`d=ZtFH*E~&_vfMh*p z#LS}~bCwOsqIN1BEK1bZ+u2l23#dnY%D;RVamTl~#nZNAT%=TI!F(6S0`^8TuFtvt z7Oi*#gzRv56z3BwR5^?j-RU1{03mz+V(wICnDoEcPqkOVcK5y+XK{glJRUufE?JnaX zbv7#wKS=l+DsuP1F9!Ure9&du@AFdupN7*tOuQ#&NtBne_~KZGZ!gTI&E<{;IocH% zX)9SNpRKV1m5=d?=T;2f;vI{InCOlHXhlzchY`TX$#)|&3185+kPn|$9>>}Yn!mX& znMA6J!2~!e#9~WHJ_IX_Gu&&0@ZRnJy`6#WGH_Ru?|Q`o&}oSgjY%h?o=U714==MS zOLhKH5D&|v&exB>9g~}+swSB}13Cr{-UQ~FyhFjD;_MqB07j^)syQ)IQY_NieJZwh z_e)Xh$Aj6p;&OYwR9s^cJ zrjfb)Fo71om?)p}6IZ$j2avp6jWtXf`(PeT06=0XQ}Ov5Z45`laOU%UlUwo~TR?9w z>?1YUma&L&4s_xRL5xp|+)SO-b-zrT8eTLKnmFeu&(F_GlXif<7b-S1H2gjZ=>snTx7vb_r_sY_z`UrqW0~rx20ULZs+Qo%Ei4^(tdPKXSZw*lCQXzP|C0Aj=Q~ zRHPjPv-e*Ph2cykR!khAWCvGO@Y6G>fNmKm#jHVlEc=rQ?vgySey75Gt9ccM4G9#f zQ};A=6p?g3%Gld2M%P2bUsHenWZ=h2D5fmlsVCSc46BtIVW?uYBUYYVYq=l3oBtl^ z5V|`FbXsSgxTA(*3{I~Fw0<*=`DMFm@^Fz9wk8Rk@cFgBYCA9aZoeM{| zkd-ce!FHj{5M~AZP7^Dml{Bo1X|Scp~IU;l7`7lW zi%m`rU>=Y(X~u`~cIRejsHd*dKdzU8*4O^&=jJ+nnG<#Pj>1!DWFM}cu#J<2@k zehz`_K!S=!g-qrJt^0cq;&+Aq!OtEJKFyI0i>7S?erC?Ydlwz*B!apOYr2hP)>VBx zVuq$A6YE4a)_<_gMel~GFcGr^G(N>N!0j!#ktD;5QJ#{-aUpFfO2lK)&1kHVV1p`c zO>`YdFc2t1eBDu{C@-G$QV{DU3_+>3Ejnbh`_nSXQOEtmKDp{?Jh|I?c}?Rz+Ux^t z0D2x!TE#a*Fp{xmJ^eTl(GEZaS-FT2pWO~`Z^N>Yub6mxO4E_0CHa6b5eK3OIfvo1 zeAHYU?#bmPQJqFTczS^^-^P`5Ys(Li{BLd)+o5Me~?-Js(~9yA&w-RwV$l?(U}!IgSG zzBV!>$D9k$UidmT@@Z+(5%MnI(rLeM29?FZ2czU>BV})$lrCp>WWhn*spMDVwex$E z`uK6VM{cf`QRPK3PI2YA7HBg@%owE@F6LE)7DRn6j^emT(O|2`L&b)Natm_e$sqR_ z3K?E8pm+hH0qUMUT2vHoJrK!N>J;|x-etBZZY(a++#A(z5!6%BvoTqnoM4<%F*>g5 zcNZtNka_EP5Ktnv4qWoL?`DZ@;sC(-g9Dk`=LHQDo$qZ7+HDZn z%`KOgELE$}P5Xpf&|-2V(xGGI#7zCRwg!obQa*i317m6O-I7XWGhI79TX9PB7KC#$ z{l{E$Wszn9Kz|`OmMEA~p3E!XFv4iZ|LGm!lSfSmgn>YL!K~SeRXm^X=jflW%pIAK zFws(B*VS>c6Ae;bh|o-}E#dLz$bvD5MB>!E6kK`pBh#&z%*rVS;&i^CbDCu|Z;6sA zj1P$uS}vAV%0AP%G$Qt%=9|C|(ETY|zo7?%fQy-qZrz$L3U`2B@vXYzmNECFjMYAT zRj0AHaW#~Rq92e2p5dV}TLZOhcL6$&mqen|*{MUKou%Sn+fgdcHu7u<6HVz| zRyXNK6sK)q`n^PE3*M19YQR(dV0e-1*wIVucrdJuKjZPZjLI{rrl}n>DMO2}tJBz` z|Emv*&DTsE`+JQl8a4)l`P5V%a-dVi=NHq~mofOPB=)36?cP7OXF}E>sL)jbAcKgzpj%mNGQ1sF`9es`DJQ8@=rn^fAw<+0|S4 z4=Lgfa#mR5bSi!T=03c)boBd8>w>BlR3j=VL+FPC$8CKCdOYEhltP+BqK8DNC4~ut zMafd&FTAhg8vKi#R+jmRwYNRh2M4v^QP?TW>b;jLZvLXc&6@8)^R3*f5%vxOI*}~3 z_{iAf)%9H;evMYcFo}qU7 z8wUyjA3d{w*flzk=;$&ij|E=K+sP#NRf|8)<(^$ftc>j1f^Xwns%K~Z$8App?zA)G zf%aSrneSf-?BLZHINm2Cb3zh<%j6Mqw41z$;)p*Y~u? zgb8!OQzy~Zn8|ijC;y;=AkqLo`w7V+R+NKsl3(HRb#D}Lm`EsDSkf$c_w#4GePzYO zqruUf?8xx@Re+F_R-y)OZ;^3nyUYn!Dn#zASJ!jxGop2XSM?W_(P^Cy9aHrg(hhIL z4v_7VarC-D{qG4Fy@966aA<=z{_fs(!fqkzdDB{DS9A`r7e@dTV}D{ezCvg^1quC_ zJLY;0nEieCRz5+qQ`IWUHnuzKlX}QL!YJUaA>dh~+fXdM#X9S=SdlFb{H8H6%~TtD zn40~k(P9D z5bFtTVAK^+jF2XqzY_hx^~63i+fc^Xyh*7pJWSr+`>~5V$SdR~*pE&^)Uj}B=+Cos zZL8_!J<@QH-g3J8PBY;a(ng8*GQXI{Y*8`5aeCJ-grCP)gJy!#~@f=mmtMzOR} z>f{{GaIL1kMUSJwZFqIm`6$M9_&A>01f^Vx^k!oE)VS%p0S`=1Zhd`ChZ#)g!Gn|- zpS_^s*IfrH411J^9MNr)4f6m4z9Pu2HdMr_7*BJNr_YrIgwJ~toQxtOR9{O4RnuOV zrj10K#M6sXey`O4QDp)_Fu9tv2QxoD9Ih_U;)YjSes>M;lNu93L0u84CI6F-9-En{ zusSzaU(+fQoqjbm-Tf|ho#Q*`|0cUn8U8$(BWW6dp=$h4mre2%gr~Z1#(Y^@2?Q3d z@m89>79)D_Ki`fDCgM}Y+%=BeGi-eJeOj+~jO{gkhL0!I4Co zI5tPUXKvS`lS)AEB>1S?B*PH*ekRzeH-Aa2aLq=#OBJiR?)wX_(ac)1L)A18UsDW)mMcY!naSWuy3fX1{#mPjUXkW)RUiqm+R=(~o z%)$hvR)_lzz2UoF62+AP@MHZRt>a>G;%fF{5n3(5frnO0vD(&+0Ekk$H>*aek?4}| z?Hi2hE@Ev>K}#lJpW!_A)!8rgi-G6^#d73FOBZT4W!)C6+oJdfDPGUQg@E5GQaQhA z)GUcvjMDO_gN!K<%0!7EFBt3$11yrmgYy+)H#*#KH5>Gjs66+&vsy>sOf#?T-g+i3 zg}KSIphfl`5HU#QV+dBN2ZryDQsYUBo>59?0lfF==fC0P>`S~ z;y6HtYGf|U0KPaO3-gN@?UOLyx}wh@8Vj2izebaIACnJ1OqJzS5hHD8tBHx!paPQY zAJToOqXh-&>e!cls|s104{#3@dVCR}a8_pp`;+LUQ^JIw0t8?59xJDVw#*~;d+j<- z1ZviLj3moKm(ipK6rM3*=w6IUygtQYR{sr^CH}o;C7PRRcHlr#^HAW**sb+jT-wxR zzg-@~&Z5g)7Zo=SHTobt+EbR4w~mN04%v$~lSBtEA7=t(x%RBvqni4T>Y__lYU+<# zt}>+xoOxDrb)@8zRWC_Iw%?ZJ@XutDnxxNa>YwnsI_)GYeXrmbB%{@aH#YR>1mAZR zHGTXz-~@Y?w@A8!k=#{3?A{DL?*@JqzJ92{_=S!VoL`xbfrlvR%84a4UKn ziaf(F;}GP|>Z)Zf@&{g#q6;FNIUB#f@T9KpuvY5gjHGun#lq9EM! zA+qS!T>nW?8B2?&-! zx@xkRNmuTdmutY6f=hf)f%jsb4Jx9>H1nYip=(xA4{5W?&vq5@f>5@yv=6Yk17={ z#K}-nO#eI=-6a$UxtZ?|b}}F`fAK2p!SfI5eO0-We|v}-36_@L6TOREGGtn7c7Z=z zXRz6Qi-+0eJ)Qt`;k7^8_4yjLON+1ml1GhxZ+JO(>9L9r@{-8>1E&`MPZ)K^-I$vF zhCV1NHSu)&RK){XRubDLFu)M^wNA`9T^$>zVL!!w*Qi0D(|KB~)7)x<{3^bij7oAo z>x1@j6`FchHtbeVnyc!r>U8mN(|BSbCX%e97!eB7EgRbK{+8<_csh=;)@c#(yU|?a zp*{Kyt9nbH8%b<8%^dv%Khx=7^8u&0q7 zPD1^K7gL?R4%4OEG5y<%$R(_WkU-$bAT?#>Dq}Gdk@|GDa`B^J2mxjVg25>}R&;i% zi2td(>*?dt0!cbe+V$Gavv`mFD+koKnX$bQ+!3nhsWQ-!8}>FKCTE@hvG0MgSb6{D z;U+>we~)iqyLD-hQD#|1R$+eSXxh5d^|}H6dm0<3RZwa=6o`gXoYNbI(aIe4Ae24oSW>WPbVqbK$r@eC>`@E)D~$S*dL{e zx2w=OCVUxU=}$4gR2^I_|MV0Pt~%JHdtoBQBo3etPa!=AD98mKrnxc5B))ISe|=o= zy)CV&=Gbgkqj{m{9AkhyaeIc3f+xnGVifO`6d4UUt{91bc$bMWSne6}@C?mh0>LKg z+{|E`1iY}qn+t(2$|DR7(b;fD!e=2y4w6i4YuxjvdgwzTX|X@TRG^k7kWQIeiu|*Q zpL`W0Gb&k%L^SZ#g++erQjf<7zU5g1cGUqiG3shpoXh?+oB#>yBCfJn%TM1mUJouf z_6bSmedD+u*3?NiO3B{M`CckgO$cW|27TFzd2Lvq(J$0lw>zTPdc1nj21hz@z_HJE z9BYpmPEn!srlrrR^$tSp0b8^dbrLOQJOC6LqG3SCSvH!Wh<0|RcxJdl9CPQNoV3+M zOKBT~?g6P;kChExbJ;uBJ2Fn5*;qvvr+BG4614CN>&z`2ln)|5XkZajnK5~Ip}i2B zck%@zYS+faA9uUHHwB32Z7kCJ*3Eu8x%kEqGk?G2X-X}_N^{(?L~$_37y}X1^)_w5 zHeEfFbq=3H+=qCrsdHrejRX62gw{Ewxk#XB2)VE{tjx;8iG~ zjs++Z(d8|S@F0L?RN*LB{sJh8ItHyU!?o!G&R@4CkNF#z!&nr7!j6_H?03Vt5+hgM z$tWH7qyjYeB>cu1EFomvzsoVsa71sMriC>tx5KLCgmX{D=^u;Og7z030^82WKF-y8 z3;gtE2%iV}p{ywKpImCUQ$uwSf{pNtt^Jr%mS`tpxj%#jxn%#O%xFB03+Z#)pS&>= z(vi}Yq5{n=!i}`@U;>w3c3qxJ2-X9LbxY*rE>ta<`DLT;}sf*l{N|SL{h%G zk>l`qf2O}o`!ZcCFDI8o(6Z|~h&qi=9NL=WZI5BEO5~ivyGiSgub;7J>$5DuDpV$| zLtRYFV^xBQfc*m~rX;3`WIKV%w5MSUzwv`-SN&$BU%&Lv;y1Utg-`PH_!kcaI-4)c zv_n^TBRyWgkUV96mv(oZFWMcuF=@!`AKQ78C_j?K#S|A8U*0N=RGX4ynEU46;>CSf zVLamUc(9xG3^@p0K&z~-=(Khx-vCEX{QgEyAPhfV%^?)Qs^iIE@707UAJ4(Y1snWq zdfs$c5-jP4$|g3DcKf0ktVE87@j`o-+Tt_b{IW>hSIut%2O9-rd&DI(c-Mqvu7JlG|v5AHeL0`M-RbOX? zw;c(>UBV?1jakujM5!b3R1!?H?l9y~Pe zB@=(FZLn<-JB(W?4)@)firqFWB8%trE+*hfI-5zwagNJ1-$ZV_tvau^)C*$?^e$zPZ*-oGO-bsa$rYBxEYA;rQF zXWY^%IY`zCWnM~AyV+dxJXl0eLCyhHD)??!lR@T|OI>dlFvXdA*pYiBsLXll@WLJG zbiLurNCV;nhKVi1U+c#DT|LY6Gp)-9{;a|W!92fS?~SnOSFURLC>2OjY+Rfty*~}e z&c0_Qyx$~i?{yKRrKjBBeDG-f3e&3xzzaZkKRXEQ`4%K7Ae^#Eb=b2MUd_V3+?UfB zmFw7IU1LMMyk0Rpwn zr3e;}B@`ogJ#07W3@P9clEM+y28(WR2wE@z3sY7prt?J?)6;W~7jn>J101|nu1L>E ziGt5liP!g8^K!$)0)w$+G_wX z8!>jZz*n?I*q4_nnk+8)BSV4!FMZ6;c5wP9y77>GQI_rrW;BXCB9YpLm)~BOCwc>I5y&@Q zZ@;XW+ndeh_9xXMw@Rl2{;V44;r9&G!v<3o(5@rtg(c0EjFl=6te$#l>gb@{46U`e zjqD7>WMQGdzoJM@OR-#=>>6q;slN)eGWQ6Ti;O!Q{p3R$6+VUZ6kKG0FN|FqEkBs~j6Rv+Jn}RQcgsEu zq`lQVe;|Dy5oCD3>3D41n@5tXHuhTUtG+$LM1T0B?c@FlhE_@jLN#Uj2{ahy+SQR0 z5+aTLW9lQnCl_xEU^#KXocS-qg=qb z3Jn9IEZYaOhOKxDe4bC>D+2ecfIGAnQ=RZhXQ&e(;9HbC?NFL~ zD@J!`_Xt=rdHH?lhe+4ZoN9JQ@sFQ)r08S)RM<5pDXR6bkWIetfIStA7{$t76V&u3 z?0X2uT%&B4(_Dg!P0y+?N_9~#arslNCJl?keE_xeSMJc1uJ8t7x}V*I=nFd=&Erd+f897UTw!gi-$WFCS}H%}<`#if~by1QnsCcFI~ z2?(*qbGDRtr){@VMqO;CueA4t&AG^GSIsK$b4&Q_3~T&(S6Bo_8Clpi9zS3q!)Mqb zxo!|UiJLuEP)+naS-0egiR`op05*K@*I0OYv5jor^(%F}PtLBV&Yn$7yh~TxJv+SI zPdJu2y^_6>3VVCBqT%VX$_5V7(^G>-F{1hSu}z1xSLvpbG&CD`jSf=?Y)S}7J03m3 z@l(ye@l(z7%!ND~DpXeK;=Jx~Ei+McH|%n%%pMx@RL%RS($4aHZmjcI}8I6-JVYhV?P$~%Z)y&?S5UFIZE7U z0p;z@;ZWTY$4T4dJlQ4TyKdRkx(#N%UVd%5bw`z$3pKI9-WK7Fg%$|+y>>^0388bB z$}vTqsk|Qw;OSu)$qjNQ3@L{bxQR+20cFlDDrfLVVxe z_AV%jyt#^Y zwg3K|C&!vWM?+tI9gKZ%Z8fbges|bsJr}C4IEYsbTW*5r=o8@RN`?v8vCG(7_lR2l z7|$`&^lXMgElqv1wPh(+|N99e6ZTc!usT9vE5!c6D*MV_Zq9XW_d9Gi-qvq^a$~k~ zN!t<}050aI9m_cdsS^&(B{WM1QD)DFJdJufJs(<|5s~;Bc^`ODzZG5^a#Pn49^R*Q zPm$(_d6fsmag3##jrZNP8sut^PPH={`(ZSwimx;LUU6RUmN2JoeJ!B@9asrim{MUz zT>?#B2577iyg)mNj^l}3ZexhIxX_PHBD+}U37!@PRQ-~*Q;1oJ^+qiZI2NX?@?p_s zHr=k2KzNK~9{lg+(n@?p=^?gOF?iO&<7Eg;^U%Ek|8yBnKMgbhH)pwoYSVn{GLMxTM4^wop;pRWx zS-<#rlJ*T{ja@y^2}x3qMFY46gzlU=(^E(U`~ zj{t*trA`{wFQt};c*Zkye#!nc@_t`Ayi&$^*E&hzQV)4f|RJ57_c4G%yw^`^aCY*%!WQ zj;CH<{lk?K*+SFkx>2T{V-o5S^- zjTQH1>RY7nEA-_DUf)u@EF8N84FO@x?h~F()y0TPYC3^Qp^hZHh}ysm#wAF}JCruy z*PC$YtZP#Z7peCKIpR7@_;9m%#~pwa{>iKALLPYoLzZ>kj+=SZ+|HcLn)3(jfFco+ z3&W&XXac(9FVb)VB=1NOtL_Ud4a9I>0Z)|Ld`khg3?0L#abCN`P4 zU&xC)h`|<4*DkZdVjnyYy_N@`C`w-%agbu@cjU(JK}bhef+}zRand zH8klRTpnkW?49UjEdZf9to~jcD=>X>N8Bup^N}CJ<~FLC6muQi^XPxRlaZ4k*$b+= zT}Jm#h8q92bPgsu_$}_zDAMUUnqjFDP+TM0EJXByEzMMXX>qhH9rc9=rANiwI&Cm= zgPQ8uVGTPd=w3>H-RHzq=4Br+rBWw`V2n=Pqupqm_$Zw`GIegMIlyTQX>fla_*>_{@_oD+uEZCX}P+&2*fLSkcq?3+0%#EqNrmbM>ovXUS&+?Y2;87ii8F;pjIjrv5Ia#g8Beeca)N6H=tnHk@v~T9sZBAW^n^N0DP>e!3+RV@F zSXPl?P?!S!jzD}3OOE#>X2DWqmK)N+0z#+1bNLYQ7B!LzYEr54TDpe?L5cc8)YcL_ zxx*}305;RSHjL(Y1h_G~#jwXeHi>=^O1ikR_rQnzQbSv!SZmN5RJsxT_^8pA=77z) z7+8ZEn@e4J^#*!WLlLg{8ygJG-ObpCx$HfY0^V zTyP5`PXqvPw=|qp3^L9UUpbO+g_w96P!+&`?nKzbEJG&i z6}-AAXL*uR)Q_7iECBqCrF}D2M9)nY&^JY`X(HKM9|!sxsE+746wKHmWy%H_LKTD^ z#q=m7;y(ECmf-tym=}N$gg!6_eHXX1s#`dY=63UEfGS1sWk(Q^eoWr?Zl>ntK`}v3 z@e12IkQbcTtE>K3z7_nQravQa$Ze*EX2TNE!V7>OYY@3ZOUDbvOcqVY)q#|Gka{Hu zFx0-(=CDGUEUQxLsA0t`V@V}`3b_Exa53e>%z>~BW@9CP)*qE&B40vP#)KMrUae5( z>OR-wKSjC|35oyweN%kU3aZh3 z8%}^nSiuJ0SRa|%VCJqgrYegUl*oaTD%$(PWX0v3Lr$^QoEEktU~HZGrneD2sl*g5 zC`*Vu#vd!uU$CCG)+EB+%cW1e>A_?2;u9_TOYMwC>p`G zM7H90DzM9wIz|q{?ii8UZy7=U`*}qo;aR?AwO7Q5nu^^A zX%tA6Lfe36?LV?KusI|JxDY@7KwD=WD#60!yN=L7{8<^><&v4)#ej z*gP(@g-#6KNTH;p8xfO~%G4dmT~}DbBYapx0sn|MfaPb0BrX*?Qw<|VFOJi15frh2 zDiufolA7tNMPiTZ!fCv>&OzG{i7@C5_6*R7gX;D+0`fe?qiA@CxWWQ*gqHPVFx9u8 zK#KwtR{b8bM#D|AKTd6ewh*9Bx1Qvh_|_QT$K-t!1e#qcU(TpU_CP#d;|L2Xafe?C zr+W{+!v?IA>m7%Q^J@nbdZ{uAKCfJHN@?V0{s;}Py-R5%KCJVjFi$pfo4_5TR@IZD zS(Jul;pCqxs@SM80} z14&+8#}siFlxe9x--Jtw_T&tmmGzpt$0Cf+-)(jmRmDTX$`d2Lf=7UbCRG3gjw|{= zV`)7`sq~7WMyB*?@b(Wp-Qqf!^AxZxd@*PAk$)leJ9IMU-gsW;-CG_b1gGGXp_Qi* zk_m)Fz8t@zFDmb=S?g0TA+E+;@He11UPq!Nn@hb(!T7j=J;ld0gLKL)AtAuD{jJC6 z7fK9KtnTm0bfub$K?W7bvMu9$|2~t|q3(0r@v<>n*+L0~+4|UONs~Gb1F{yOU`NkB zEI8DgiI0y0vpVZE>#`3rM!@0%r#B>ZTvk5k=uoV8B!5YY$QvIT6sXCPeLPd$;(}N3 zG2T5D$#;YXOEQp{)lZ{n2t!)xTL%0Z!>f-+yB@=KX>D7ryb`x0#RdDg-4=zjim0-G zPocF5x}kDtYn?W>`8Q|ViYHanh>5&Ua^*jOBhe8TackHU_{2|tvYFw9_1Uu60PJbH zY(s-WlPii;d65m`R4ND~&leqf=lXFLLR#>Pmgn*9ub9uRzmpaDV(ir>(X0l+iJnm+ zV89`qTPu97s<55oqfr3OIdEC184*A$S_-3mCku56x#`vuLnd{Jrcw0_t*DbPx!vbJ&t6(6@0!>NPqlf|0C*$=r2{Y zq!6{`r1;P$Oufoo`%m7E+^_XW^vmJ@7I7(h!-jLCTj+lZ+r)b&g|&HW z9dx6mOc;`L_agpgb<|jItI=Q>i%v!i3u_Xb<>7)uBXi_6W?c($J{5)=L$5_XQaH)Z zit!=dzhe5Nxd)6q>bVobFqPWA9Um+GqZq+<$g7IQ6b7USVnMW^yzb@qw?wJoXvYsR zm)4onXA0Wp>5%3k#sB!h4Xns z{hZn<-t!9!hD7o?u*>4PP6y|KOc(XcQ-`H>CvegAmJ)}S848_O2AVw!m#ahfC10o~ zyxJz#;bAt8w!(OrOWJ3eYinv_{<`-Y5(llpa`|({0wT>8@&4RkC)FrUb+6o`Af&S- zT`WM8MQv?V%km2l(vmUbFtpUZ{?oZ2>xj56Pkyt~9cQL&lza*E?`K(WMP=6L!|%7XG4{G=qUK?PTRic63j88Cg_6f!ZwgGU%{z3xGs$YM zUK=+ZGnA_6+ZX;+Lq>k_MQA^N*j_P5dXP>7+Ue`i7Y22gVV+g1G6`)GFS0+y{etl~ z&gF7zO>`IY6-fc2r+sMvIAKL&Xhb5v`T{AdyR>!Of+(y?Kaa~LYAU7KO{0ZRaiup$ z{ff1Z9L%1TIb_X?F9rObT1e=C@8Sy*J4D8M6BY)N;jWf$e0_+K#PinIwWKxl#jm4` z?z@DBBpnkrP;!=8u3qm!#ZgyOCJ%!*=Ue2$#JX)1eARqpw^-ryXm8$4;Ye}SmCz`5 zQ{au*Oy4zkFA@t(Z@Y9)4PPPX7};s5xM=ZeC_CB1bWg2*xvn=utmKB8+a-#p!>$+) zC2mmVY|zy;8OI7ZvOqUY-MbmmRUqQ_ZwF zk=5#9B&5rCdQvh#nx&Tl372Fr?cD>9j#QV!BG~e9CIdkjeK<%&i!-h+g|4ygF7>Pr_ndY^3AB$iNiKpsrjJ^A{B8<_# zg+{IG*~?WB!~4ss#_6SgMxsO8YKL3pYWod-JGzlc7Qe_kdHQk3OZ-K0$5W{M z@SRyqkH1TTIY{+Gp)x1CMi!8dt>V*ia>;Xr=IEk{jAM=|f zkTf23RgzMFLqS0b=Ao0hU`Uz6Jm0EJ0q&xJ#y@1=*4uniUv*6%%a^&v2XWYeca>tj zG|4=?4aL>&A`BSBahwiF9F!dd7|&xTr)IT>Xe*WBvN@1hc{XV_aKc!Ws4dTVlrydU z>;=pB8Mm@ZjRrL#x0bX7{|^AxKq3Tb*{`XX_@7W z_Qyiwq9$dAZQ%7$SSIn=9AT8ZMAWSC*{GQ&%lRF&Ao`7%M9X%Xd*sOqg4Ev;Zfo24 zyBr|Eh<^Ct#i=m)tZw|YtZX2p@e$aeojbK11Rp|J5oQCBL?LEu)T=rqdlou8ESh?d zuazc%--KF6(CWfyL6AiC6Yk%mfIcLWW$47|6=Eyl=z*CwkOMI6K?d!k3se1a1M)RF z)+xBr1X(c`hl#u-cOLegWaDU02UL7)Fr`RBb`aD$2Dc1vrcE`jF>%-!ejI8FBrFn> zp%d{2M8+`biK#6jmb#FP?J|P~%FMYE4YehjU?i#%fh|amenV<6Bpi-2w-1nNNJ2a! zYDQ-fo>2oygoN8Cfe~%;BkkJR*ou2J(}_d1+b?b7c_eOB5gHDk@7}`)G0yHoJU+aA zpDEGES59hL&xR`noViQEFw9#fD26-{LF{ddQMnVuX;X9G@@~e)24KyTbJ>nX^DEnUmMDY2$)JOrTz#On5$8b~bKE`*1w%x=e@fYu1Ex)?; zsN8zIUbfksZKI9@r@1C*gHw6HV>C}C3pvB_+va$0h@0Zd>W|;WiXVJ_Lyr~so{KP9 zSMXruc?rWXw0TI}c2O^=;ZVZ^U_mtW^QhNx<_tk;os><1ilTAMEq?Bx?HFT^7Rj1< zKNhCPiHhl9qT0rh7ZVqK5`Gy7WFN_X2q#b5M*oeBu)hj8df3-uU>|zzkZ{;84>ePs zAAt<&Q}gi2YPqyEOCO;9g2YHv+$~vcNCU%SQS$FrijJ}Qj{6IINmOz;X9zE!}+hJ}q>V^KX zp&a^!DD<|r@#6q@Z}d4NSoVqc>NI@+(PKzYyUfqcV3NTJ6%CaoY;0~bZtQQJk{J(_ z5zf$*t3#;%C_yA3EIzZsTvB4$CL?pV+G@P<3pj-!reI-LBNEr(OHyqb<1=}yzk02B zAC`L$?w8+u{7Dd`>8V*ZLPS%BDQ_K}R6$Dl2q^uPgZ=EeICtuTNE47KxGJ zpFYO`+yBls$+lPxx7)pPaiLAliq*18&Wighn`LRbSH6Gs0{0^x-oIDwK7b%WWK66j z5ilY)c4+6_=j73T>mgb46&Lf~q-|{koU2fYSqC!Ep{)~;CmnCqC}@@#MqIdRopMt@ zM3~x}Eijnsk{uR;WuLUMrSuTon#jh-TAZs^@A^uJWuJSy_<~rE;?N{yHOPd*Zxc;} zhx_*!wX%w1sf2I~^NE=0)1fn~ZQ4U?WM6f-Kf-m%weGX)L|l<49ETw;?zpT!#{iI` zPkG)J{VD-M!GlFhKagIe*T4Vh3D=L72V^I;Z_G-1eR_s*gI^Du6uPtS6uvw@i<&h3 zgF@3|M3!|c*q@HEiXb~4D*+7TTqZ%~mmnrOx!Tqm&o#ovcaF=01IX$2DkOHNTq31^ zyVJ-2Lt6wcRl{FceV$AN zek%e=LVhbe`QxiVzzN*R)rrm!SU(RZd?!3t?&j;LCedm&=j7<8ARtV#YEfzk5GP6n zhFS>CCDT(=Os;h%MWS7pB!{cqAUaH*uBdk)3o%jC3`2I?2u{F=Qcb{z1`lIsB zrJ3?aH`!x`?N)#N*}bxfV6RL=<~$*^)jScJOe%C`?VCO3RM*4dWcP+LQ5ho&E$Q$3oP!6cA|_`2tg0yX-b^1?Y43~h_# zWE7I521G;92qrnnhi$U|-D718AD%}~9y2EDgimik@Rr#CZzhP2&ZvE&QTpWKFjJ|1 zDbtwQJhpW%hOnu%F!xKf*_`G<4se$!h)wJb!zoQll8s1o2BC>f6*=!%mI!IaQp1A5 ziEOB=<^KJ<+>4+_Sg+zkbpP>2+2Z+kc(%R$igXjb{NjT$^%WvrNyRAqe&UdH#<-OH z@Nd9&@{#_h4TOJ7`rJdwKJ?##SjFp@bg2c@pT!8JFmZ8nn80`+es|9$$Ao*>Y=fJ> zOF;CE+D7B(&}3(Vc$fHijG%?$R5gy`O#Vp(zCHp;LcTs$dF8i3;CLQY_~#~k*R}wYUG<3f!%d5Pt5eel$W1Pi#`F7nj9aaJtR`& z`NelAz|}DL**YZ@gHEMpS9n?9I?k6y4|$ERQtJhwfIU!OrIj+=*}N$o$k;CJ-Q;=p zz*WBs?{SYE)%N-dc$ieokXv(usKJs5HiI-u^3YIs&lSH~ zlwPljUV6YraN&GRxtcoEzDO3AF3e#j*eWyRknw&=#5iQczPV5Ik^)#wiT$r|rAxuy z@lt)496R1jC2vO51GN~`y~j%e&l~4zg}yVNg6#4q3e(J1Lq;xka&1Fetb3w zq@==k)7kqjsjwlTLGd}cc<6}h=Wv_`=OJ-)F%V@kUZ~vCxjs(ipG4r5Adn>FmB5&V zd=&x;ZUu@2L4n}prr?nv^bwd&jy3HQOu!+G`5Tif6Q9CI{{bg)1(uo(w`f-=xH6R- zj8TS@tSPZA-N$9=%3^O5$q%ZrO;c4DeLSlcdJ$C?5Mh5f3w!!bVe zbqc`v6Pypgf`srr$O_l18JeD)DHq6Pqi;|6Me#1dQ0t>(wJ*b(Ei;IP{lUx66|ti3 zVMWiqM?KGSNXG{;nF{ zuuO@Ft6EJ7%6o;TZPCF_mA+1k3M9=kk|dq%RPP&x>uTEHV%3gdYap;}i>hkPF{7hn zVYacZ?RES>$f|n#A^s>l3sr@AVU^YSWQWx=q^8F+|K80@<-#oaM{r2}Pai%Yd>286 ze}xcLMjUJp5mB$9flR}+c? zPMoaFwyx3#_6htDt%~Ljwx`DjMv`I3toQH_DM#2YMUf-{CB8k4ICOR`$KIW;mV`zKLln?d~}G& zCadd0xp8HwOl<6w6-e+VeI=$k-b$byUlOWk4%MEtkr501q25vNQ+1v~(U(5toFJCMJ8uKAL>)u;l zM@2tg2DE1k$Ihuvqx|W6H_IVX`sW|wKR~n$PYotSbs&t@4B_Wk)Y@!g=CK~r(9`J9 zD9p^^14El<33VPrJ0mHXV#By*Xj5GMN~W;mQx*~*L8pI4MG)Mpq)EHff~aBG!FNLq zhjsX$`@@L4=Or_SxK4G@7SLArc+vhK7M|ay57lVchhnFYlQ3xdQSeC|)E;^096J0g z;C-IW0pW95UziIh?+GR^{VVG9&m^hXP3|RuT*k4~{XJE=QY$%KlTkp+Afc$B`F?XHui1qpOiuJ^Ib| z+s=K)pc*AfSQ`TAyEr<|E3029Rs#i2i`S#xuClqtQsA=b&9* zIA~vW4Bx+S#n(LH{gfZTea&0)vP9r(AixY#{?*sGE-y(0zBvNUTfBU86<9C@rgaKT1%i_~KS4lPkt57p;pzim z?gpbn)D)~kM4q6ESKaPPHPW?1GPV(VzBX97D+rxkRdw6emThH1H_BljQ;IA4@Si{< zqd$kA6+YXn%2&-t!ZqHj5K$;vo~90k)e7~lkSTQwvZSeCkCpc#yUC8Xt5D$y)a;|I zu>0U5kx?Vigj5*;GDc1g9xR+PyZA_Sc)n2_QRi9d&m!b|;saTbsHuwAS3~nuIC&y~ zB?GRI^}CP=tgK-=f{5rKxk++0BUi*#pJU34B?iXB*mlQQ0Kz#FlCm>#zFJCU#u3w9O{N=AcC?EXhLx}8ZX%Yp5 zod@S=iK+?Rsy-FtXt3`dHop4j#OLbNOZ|OX2Ibp9K}3%0Ixg5thOZaZ<<-yL@uI8p z+?$F(l8`sG=f8{RKAxGJ6OAHKcq?Ro@13iqk48WtxPgFEIK|w_@6KkCn}K`u(~!LQ zqbpHg3V0n(UCFx=R!|u6pmxL0M%pOY6jnQ!LnIzCCsVFOMHRk_LkW#h9c=`Rkr|DNbvs1fxSxz=N|H?Lu24`Ie*EdJV=WC)6U!@T zEimCA6eR*W!|GV5slt%aT?mVUyo(RVBAdVI%VFyC3H&oOH|U!&fjMA)W-@Yf7$U2- zNX9ZN&FQGOGF=!(D6dWaGEL8wp7k(;S0;4Md?rZ&d!k_@wcYMxog zuR=eL0c0dLfTIBWGRO@=NzG8TEq#>o~6L?;sz1mb}LCC`Azxw zlMgZ3Kx7%c{yWf&6V9z6&HYE>%YP)iJWhVD&b;EkR1A@d*K;KgoABJ}r(bPyH2dk9 z{E!HIjzE%-&#}l)uNr~liP2OH5m|sLe1hnQ@87&!rpGXGAdsDmU7Z?b;H2z?8zh8D zR^j2xm7>2H5~{#7EHa`j5H?|3hY%?6REtMc4cY_W4M$a2s3GYE(Wvr;MCdK$D8%&h zP#8H)Du5LgpJjB>t7&JJ^OsQKt1^z;GI55i~Skz$1| z+0bMVW*nYJ-;2F)+)xlg{77KtGsTLYM|1^kT?%_w?S59{Awf|K;=^u1+>GNe;a3+& zRjf5pu65W4zSM|d#0f&sxq1dJUM)6TxDF9Z1`*vXNjU@LEjPk)V>@Nj~eCgLyC`%aX#Xnz}|cC z2kPM{eCNUA^1XMzM?}#oc}yOIg#@e-r1av&i{8GETk3akkSei-JK4h%% z$!x-YleopfET`?GiCw?yD`1d@b& zjzxZY)d)CO@^bE!gqSr|2SrK9O}2N}&p^k?5eSH@O)q!i*Zg8ArPnGf6#5beoln(P zc+TjBO*9eyrWPX{SP=+DI4D5OAz}v5AQF@uD_BU7W*qNPD7vCmYhv__!cKytu#X$n7fv`<}*VLj0Bq9+s=^%`KHp^ZiWK|(%9u&hh)zVC|!c{w>khWd=JglNs*f=IC zk_d(MD4Lcjv>ryEY+<@tCyaNO{2K~kiIv3189vNBlr5;5oit17AnW9>DP#;SO@u5{ zt|t}HK5sOVhWgyGiU&V#jWYb*=hvJg2DZtYjYg6{ND9?#SeEZS9Z0F$0^A@@GsA{L znD1&7*4EMV&}R2@QM+N8=IT%@VclM1IpLo{`DPJa$6?r9ADU5oMsc)E`@`qal%lh# zIM2?QuASOwmr*f@=Fn)VrE<*7p&?nAo9B8QfN7;<(gbp1Q<;$tV(S3MMG#v0Rl*|? zb}Z;K5~dE$>4Rch9lQD=)G*0ARMbcnR|H7VYYn4?p^_eEjjp;r}6dX3$1~KBo^ssqx~CcmLzGAjK2L7yd{> zJeNlzaJ~>=S&~O0@ViBznl6pl&}1+uPge26;G`Rkk%1uurNYqA!_Oz&YC=^Ys9o6M zCDEuM)D+fgKwPn#7o??mqFy;JI!(vwpGQ)#ghm75phK#gh^gi1B1k1E3JK?JH4Q<0 z5Fp;8z%dfS-{Bu2XrV??zmvb_n8SUpMX6?VAWeIa zi8`7L+pDS10pxRvjeR*nf7K<8>WX-VIs97YE-Y4lE@-tT2sd83a-~eq&X*5K?f#fh zV#kjpRk9N~O2EKOx^j_E_EQj*m*nS1aL} zez*Ma#!^iBuBt;gFq!Mm!Mt5fw%XP?=WhxNlPI|oRxPeD)d6uRWOV>lkSOrI*43)l zT4$B{vC4ZP@fgJy!{`I|#xU}t30W{KS7I_rT5mI^8rh*Ah>;QsNhf}ZkitomfTTxF zj4NWbB3|cGSRa!xAy95;YS3yt!7(=c0MH7UQ-epMe8WU5b-#jE6Ntj5r|AV^v9i8R zHqaG>Fq#E4Cp)_eo61zBFpOv!>Q`Wjfy7~cc9vB<^SRwMpKmkaqp=Z$6`1k4eZzYXIdjORj=$A>+f zmBrSL`g{eJvMTyP?OK=;sXxXp-}V3LLp&rY;yXiIYW)8G`@b#^$!TMMNkGJUe^B`p zX;tyCtoR2}eSPMy<@?Gb5jgJ%Bndh1J$O#qulhVx-b|dMBvSa%JJ&*(7#8V@E<#@s z#A+Nu;jCcdRS7747ci+HSX`NFno+2PdL03zW2Bmr6$H^^awx1kSwj#aBoIn!KJ*b# zZEyYK2(LMkKAC*VopdAg6FdJX*Z5g;f%z=fF|VkFFbXCP=6jo@z);wk#L9$R{%X{U z`OfpVFe6;UzkxQ3=ar?U&~iM%6e0;xizI#ucufupOoj6> z4~Dqjz4zd$5Dr1ixmm7}czAs{--5H|rAfnGc?L<)rHhNaSD~sBBKyHZZI##PkQ>oa zpcMp)-z0Mi{vc1_qiIB95TwaxL`x!Bk<3`1rV{It99f6`AH<4t5<$m_5lRvi`&>*+ zTo?BQHV{AX64MCBrT@$u;=MtPj){pmW6YZCITP8L!r#vlQ`?}n=Ljr!cidpF31OQE zayCMJVRGUg`$b|ai3oop2M)wzlm7d=pZ-Jnz6pR8XQf(t(#}MxBDI-ZS$Ai%a5)6gAWW>ywYDhK>|Fk>> z9QC@QMv`DwL3kik6lxLHD|wJ8ao$z1Z8Xh!IKmz~14>Txhfr`CdaS>TLe^Xox+=Qb z4$=X+P^k5Zc9C+e69Eyz6U~g+NH7#`)}gP6&a+0QXiicv`-5f^pV!|M#^Iwv`Kt26 z_W>cQfPMJzQ4j*_vuttov#&7(x$Ti0DC9N2+{0XDdH$|8L;Poja6|V<}i^jsfB@c__1I&}iMgdoS>}ZV&t2Ljt1-#7*stbh2*S zF2RejNgMU`k@QMN!o-4>LgJ=la-GC;o2yU2pByS^iF5|$fdU^xmer)`4hz|OWz5Ph>qa1w)@|I=Pi|CDs>Bh(}zTap6Q?)UFMESm@ilS2(45blk# z#%k645u4lHBBOC<=y0n^N)iznP$eVW^Wf3r2q(2}O$d??H6i+u7;5`a-wOor zIEll=S3)THQNWwqBYb2i*VVda1_^*>w@vUcp_nAWeRtF-sbP>*d5;)K92L+y%1Q)0 zd}eoal{Dy6aRH)p3ld^zwkDf(99ktWlEUzH!IY;aOy3XNq&7t&Bf--|G|oAnNrEJ9 zjtwW!t*ctKg}`E!j<37Cjv=A z&S$@!i&i_Q4qw$I>hz2dNAH3}a8Q{SLwBqmz~r{~o+D0mb! zY6cYi78ds*1YGrMqaWj?fKiBSq4n4xPsJW2;s^mC$#5mDfHrYiL?bBREs9m`1_JE~ zD`m|ss@>Z-sA{@Vkaptcq)Xf6JE?v zU>!l*G#Qa5A+*tANCG5GrdHPk7WE-4)mWH)*60__FluS+%Z2&*AQ)9dkvTDFInS#f zjcvAE+hbej=VriqjBwuvanE}{`~jrUP3)>?a(t-0k&Jksnlm+JYF#8qk}~Voxz%j| z)@Y04SXF4--Mj{|U{4VU!f;AOo;Ve|oQ#P)GzpxH z9X=7P3OCOM*fDb6&x!;8aL@tt3JLCs7F#3%|MQHC$BT zt6JR%nNe2F4NNLXg;1M&tpZvTj}25U?<-XK2o6`!Ix$K@^gWPR=<5(B4eEBatXhAD z#AA}N^$kLg5%BKBYugmGs`zc&0Ak`bnqw5s5;dQ@()_S_THr7eTBQ)Y)FxRHA746* zYHCtbp)l5IGc*C5m#jQNE8{aO^u$o}j4OQ2LHcuup}5(8+hZAODmq*%QCPyHW=L%H zTh%|32Q?hx8kzxc9y)=lVUhIc2V#3ABa$2Mw;tc^FYB?dY@=f#B8(tYCN@)eYkrb& z%%XK!MLV@Xd2tUtGD$X7+hE^xA$}8VIu}|ho<$9anltmcRJ#C^w}r?X`_uvHn2_K} zF0EJcqpyx_K7f4e(?)l*_1qDCtslr3zA^TZ_sSa_f07*ws7RPRxyresM&$OLyRo^R z*Z=ZY?+4zYrQyCx8wl~Rat{A~*~@=LNkRgq{BzC`ND^|+JMb(w|KtQZY^UcGK$-zM zm|}kaottG8!M1JA56;Kz&(s^&h0v{XbSP9LDqiFLPVfpBCw&E`5gC#1gRFxU=IH!pM(p(@}Si$ULhFD30g091s^qfW*ZbLj;2#nJFr1)-)J@qB{GKq4Y$uJl`Gam5^BA4nFAw7IY_Py4In zU$#;0gR6K6l{hahUI=m_A+ybHzPEVcLJ0aWEm5xI!#aHTrRGR7b3UinsC_Xj z>c-{<1a6((fveDL{Yez&`lZmosN4HA_kwrCyqHyG87P` zP*Ns=GIf6o^O<`E+3(^>1(Akb2UPpl@nMSiNZ(F79iL(vS|dvAum_LH#>DjzWbC7l zKH|9`kc`!Px9>(;_MQ&Z^r6HtI@JM88((xJ33(AJdG0(SkR;?h_S!jVvy=MpHEgoq zJ={8e4KvC0OP9(IZeC>{7a~oRrXCUm1c@(QRTU-*7KNifdd|sSGU9~oT?&#p98WJS zEQCs2p=%_J8^K9F^nr*ho_wzHTbBeDh4k3y$Mcy5pTBn)meCvj5aQL9ygOEZfr zZOhfrq*g_tZ={U^RgH@HDD)%~K|~;DCI=e>gU@c?35|ioNM8p{KAIyWKR&OdK%W^2 zlTj>^9Zd+fQU4Fy=<_Sk^$(FugxLt~jn}G;a25aYr=JBGFv7-jmM=zk?%fNm#w;2V z>yaF&fl&BLBE?2Bp&yJ|4T*+iMKa@aMdS&u1&hN}>+dGC`{n0LYuT>S_BfA;RQN3Sza5Bn+BzO%?GZbeem zF12Rdc5Ekuz=<&i@+w~--z-2jBnS||NB{$Y6DJt=_b75jSzGB3UG> zSPP5H_50s-)RJ1`mm@yHIo%KIS7Kp9DDd+uk{d25<&ga)WFU5Z_VM|NLizFY!K{*|YO12#0Hc`pU4nry^ zBkm`&q)bGkh-<#!YlFPjzB@)M-`%)*JMUz)ecAo`*PnF%;6MGNZuR(LLU(UfA9YSy zqP_mQV;~`a-N+2zkJ<qW zT&NZtK{?!(2_m3}j8F#T3qb~q6JRvHn~81_mMd&nI~$S8ik+DuHY#KBI1aVIyeT}* z^rU{|LcRPd06xDP!~noAP!8faidZoHFmY**dY1#-=7=fctG=fX#sgU}2Z#wptNcSU zHkaPG5Wv1!W2`So57Nct;{0=GPuD$wthm;7?g2#N(i<1!bb6=m6(pnPG~a6j(qi77 zZ(e0!rGRN`-D4S}Qj0bob|+J|aXn05U*>*u{~@jA^qTvmzlV_DFr1oO{qaJk)NQP7 z=qIzPv3HL$nf;4D{ny=p@yGwL6uAHA-~M?BXVk!C%;hsgCBsDY^Sh3LgnZYi4Bw67 z2n-}-6lZNj+U)V!zrNn&`Yyr*=hLw~@ZVWK-Ca6&I-fa|a!?J~2si_MpxKW6&3l09 zD`p4}XyD`;L<1o}E&u=^1^g$n0$+`}0Jw>MuoCXWJ&_k6VI?lrHn8TrxfJp-H=leD z`EKXA6zOpkO?{jw4+E&xfHaDt0ATMVEceRQFJiu#N(k>(+Wn%;3nC~a3qun49H@s( zzY^aL{uD-l86;j0_5S!$a)$)C?%aQra$j`oXVwZJr4P3QE8wyt@9DK9GYiQuHo#Pf zvwdo)4Uh6W#lyHP2VAKXK-swf`tH4iDW~jv4>oF?s0-SViqlVfzdWlkrczkJD|;uZ z38Enfh;}PO-WjpWvrN9_#>JtK>J1D1J-1XT?_FDWea=hszw>22Q&WB%j$s5w zdjtj>QU^TDSh|&rc2YnT%bg5uaWL_yQ~oF-iRWAIBu~Dl5P|@NL*uDctjr^{};a zwE_j?$cfJY10Z=|%kcmZi?pN;JEbzslnEd}VMTBCyqMMc+4zhg0ZasdX)8OM0wNJ7 zPo6%BufvTHhlc?uj;_rSQS=HGK$>>#;cw6<2-{AUOR~94o>Uh}upUK4pb%)|TJm-P zOyCauw*so7Rsa~&2)_$^yVyDPR{S~6ojKK=N)k6_8a@)`pDRLP zJf~u2P>(Ww|6pUI2oN&{Csg2y1R)Bylfdhf&n_3JbGr0hpQKA@vx7RKt@42YQ`2O% z&$s4mln4GF#(?RH;$zIIEN9YY^Km+VDIvAHZ+>fzogtU$i#i@Xew6YPosv4v#t#Hp zcp5(%CKu$Rl%;^L`C?)+X2|0E@4izcj&g&LsGIpo-?gtyHmM)LBScc7mqTjiO&y4- zTqW9I&l&A^jpO(j|^g-#o6AHd=H)6Xw=zxefUyFdDm|9;3((&Oj)?GQx@&8wJ!_$t-T zxhDEsul(0g^I*<~V;F%^9)W>`jPmTg=57Dob2H1tJVnQB0+xq2;0T|n+#DQ4uV1~< z{o#jicNb4*=hY|=6NyA1y|aun%luB=Xb;|cz^6nHdyy2t8F)AVJJ13=loI|800yW4 zW2gOx$+OzRo*4z{vXPlpI}l>Vpo&z~OWL(53MKXMYa!iBjs{?ztBnX)x>D_yc~m*V zYk_f@V{#tgrv@Mi{6TJ~KfsR~LJ6SA$TCXp;Ajbq)zKU{n<;`K!dHazt5r5|=PQA* zQ6@N>a{g8Cm220!lPQN{;&*CAGY@6=;Pk@*;F%)=TQLGB*8{q(0D#?e?eq%t zfC!LGQ3WEvZCf}k$IuWIfX-y$yE0o8K!}(Tt&!Dr5u3J84Wd>3%irD?1gIhaMH14k zzx%g;TxJXHYP-+cNj96cg-B6vkgmtcCfYlgdasR?4u^50ATTfWd&wYD#)7GYNdz|O zD*}c{`0YUm6e5NE0QjWWI6!L#K4E8d_~e{Wl(qcTeyC0^+b&?r%i<9H^2R~ z$R6Jtbx>TS%GWO>0x{4A<0neU!S{j_Z?Ad)ZGg|8!~B>_^M)|Gek;ky+(XF6QeKW` z{+TRthJbJil^K-+f%G5*NRoSjv#X4^%sB1=kwFNnelr;%Ci<3til89KOeVhf9TFfi z$cBe6Q^#KJ0rNV`c{hELqCafyXbh(-x6F(5&sge@jHx1H+!q{E-7}Q7-+c0E5ifoy z(&f9KDO{g?_Gvl08bidjh(_-7zMVez$3Q~*kr@7t`UngpWYlNyHEurusny|88$DP# zD|*1!Lo^R?eizG;qMv8=|3`l-4yy5sILL0Iv=jatUY@;xtcXZJ+Dg%bo$PLv>~}!T zXG5|v5hcI^z;XoSsCqXJlUBV5h=>T}0T?n1@JH}?2Yx>9gml!(Jb)r1#Ohkq2C%eZ z1`1ZkTG9SV+p^NvC*L)INSgprh>jIFpuT$jW+@fGy8Kc?k^mgMbG=ma{0r0rIQg)Q zhmBDL_f7s`x536fcsdBhm$7m#Cq;MYNsa6Xm8VUAk< ziib6h6dL_pTs#Wx&3k}Jxp;eoj-T3_ObC>?Tni_QLer+%^*_3cM%w5%k_waeSq%A!4t_rBDL2wlo@q1i>-z@ zaZYw`P?r!JN|`oF8!wj%6C4vClb3e+9f@FOYWf4yQ}TsmOt#bKiI9qwjQw~Bn){CV z5YaJ@a?!Y_^wpfYPXFB`h_p6{j6ph)P3>M>q<-ziZzTKnh=J7NQjlBoErO4N`gl9procqHdTzv>K71K6o+50C=l$U|CL+0YaPH9Uf%2)vTcB z0$9LC{hT1J<^jn42ODMk=eWxB^9LWi-(AgqBA36o5huy`gQTs%6%1MV9|TZ!kePuB zf>4|}b-GL~4-&xvsF9@x`VFKwqyo9t_E#0^icC>UfHE-GZbX1dtIS>jUhk17J{KHE z5g7pQJm4;W#zxw2y!6RAR_+eu3*kaGnYO4CoIZiO?;7!l@|A;mh3^9utA+kX*-3kl z17t(n`M4ks=1JX5XiOQ@o(t#KqjW5HfA$xDS#t{bnWCIeRl4@Y7b#yE>s7BgR=+k& zEayJZha$F_A9d$rkyC1xn?N)bvbuCUW}m1<%h~5;iXxNp9q7e&NR0DEEA`PcMMiTU z@EM_kA)Ne0n1%S(AcF{`e&lAY@nt@`8J`_(no3{C^1b_pn&RGe^yH(J-{#r-;TT3> zlt*A7A)`EdBiMEi+5WM~wBW%iZ^vQ&etz?A_u~&Pb<-gLK(RnR=TA=BSEHq@nuRsS$A+ZDAid4yioztULH|IhtSUDt^7=+0JhbPaT z6p$HSj*#T?{cLsv826LDTWB!gL3*A&YvHRt18l2o;DHc0fTy-S&9%%e0B|R565h+y zV)e{lK)9(ePoxO}SdI#zEC4NRm~-q>0F2qm%3GRu08R-i6G6^H#(=BNBOF3O`II0j zR`&>uh!-Xoq{*HsK$r3R0-X^p@3mq{apY$M?78uMgK%n6l>%7i>X24`Y&my*zC(_XAcy;6 zCFBM%o=DwSu3jm^Ik%AU4$0-u@=`vTs8!}f8~b~`3K;$wM&JiB0)si`2QrbbV>bXw z0nSn501FVvzokYX8dlaoVKo4KD$2u!Gsn9Vv!X5lQvL>5AyfYlCTrJad|0S>Eav(DdH}_v6V|VLuTMbOrm}ffQVc8u2K82xr!kikt!wptgz^9r5Me z`_)dq8Po~UDpUokh1?)YQ4blP^P)s(vT!0@)D<9V`N_{eE>+~6 zx85rM59I~>b%f4e zPOqFmmon!1sa?j@b?Q_G;-P;n3^zOR=6q@hl7RsE-uSskT<3oBnIo!H^_*|CN`R_4aLch^sL7gm$IV=`uu61>}WfS1QbrM?cJC(cq|+_|*?af=oK>N+9W6@YVC*&|5>dI25>*wBB@k_b(HEOiD@a-3!6 z0Pet=V<-RyJQoA*lnlNafS3t~TA)CEL26Frdtsw|VVFMj&!HWLxZ0QV05Jl@#{@Vs zC-G@f*V^jI8Vf|r`2fB*zf=a=)J0u!o$(R^415tSeFE0TpSi&*p8~G!nZ`mtkd)^k zULSt&UYWg+P-Dn9q$QklA10Sch=_Z@{lW3rxR0kj&!b3;#iX(sGUdK92gZ{6Pn$o; zJ$bMD^xBQ?SD#(WcYEEBKK!6t%XODS6y585PonMRy@^H4<5!Y*MN4X~^IW?*s{0Ok8Il?VTTDEUAD06+jqL_t*a*g9DXUQ()IN-0q=wO82qIBw|Oc5)Y{3ctXHz(8mG%_iWl{Tb0&3z8NtE4L5biy zAjbw(Ks%UcfFe*rKCFfj4!;9teiYit;Zl15Jk!I;BunGG$c*&Shwm4$;JZO}VMgKn zDg0J+heTO?I2^!KnGfT1t4!pJQ>M(J{6on9+NeF+M^SMY8)l5fq!&jZ05w3RFaUY# zQGtHyS2r~Vu=8a>Y=9yKg&8Z=*ZIn)mQW=syg1{fe&sz%`;>Pwu~R>&CQM`ccP~!HR2}t*WCifIFf^;E?z@8dpUL3-8nUB3=v~CCY0E#&72an0)o^$^!#l z$l~dnD5`3D z@xIYX+Kw_BVFPQbC^E4Cu+4ZLrs8x8R^ySi8*RT)!tenqx*ovK!I8HU2m0JTkX48* z^3$O&tG6~cH8&x#0xcHxH@K1;0!Y7$-mjv>Do>(+rxtK=cgKn|@@98jZe1S*X|S2U zM$;?UG0csF@V1gXeeU$SK%_8SXO+p~0dC4^(wnbA*753(s|bN*p--DpuyFn6IA?at zo*$pDESfM=b-4kQ7kd1anS9MGFV~j|k3^+8?yi@ueD`1c)YDiCKIti7j8Pkpr`=sE zmiJ3;j`PP1o$hTEEDj0cuuJ0L5;OkNU{2u(usHp8TDw9yDqop!t#pJ?!{LHd9-anV zStR+#&Aerw=@j@zi@-4c_SX$uYlm*7yXrD&dVVFDCCy zAyrpbG%2xfF0et87W7_BUBCnaP_Z8;phfe#=1V*GOP2n)jW8a?G(@8Xik`7(qt+#od#9X=DcMmKn2@I*rmSHBYhN-{|l7R!5>vc^BmaS;Imcv@aKe&A98FsK`< z0Umjke;U}uya%u$ct=48Kb{MCftFfhLmzC&ud3d3&-v%*U=&dR<+2yNEB}nO@oGp* zxtaTwJL&wA-zxnhMyc@VwCm#3p5du*iH?SmoyS45WtQa`9s!7z_&|(4(hOAD_5go- zy*=vX<9}Gbd@V7CH4==-^2-_3Ed{>9XKSCJ?d`UI2Q%eOgK`}Y9*+zP*!hdRR{tb? zV)l^2ZaNjC6OO^sNb{Mh{vOllsmNZav~LlkSmWaKN%&20+>ZAF{%JJj7MzJDzv0i! z|JT_3HUK|XMD#9I;H#Bx?Xlw?9m%lGS?c_C^_AM)0h@iS?wiaGyko(2y_4v7?C{s| z4Z{bvtZI1nV2D^sn>@P=j?$ZcBHh0)#nLZ+;fnI87JiGYAQY6o>IyYcuK*q(GQQlv5<9pUdhxf!!mYjd0qZ zEo)7EkSlq|qXg91dX78-;@vd81(j>gefl;_@osMDz~I?h1=X}8V!O|ReG3}U)G$p^ zsP&4!?B~HG)_~;4L+hd3_$@6sk)9u=`f9s1$t3O;(DW!UG{|Ie3}0h9mUvy=&4A)} zS6pfh$r~U?P9+v)6HP&_N<92_g>WRN7cAGprsvJ0LMASr>@Vv!JS5t%e!8`yl zap2gA(?JP6#*H=Mol_4C#8B!ZPmrV1?#;$^PXJBFk~=>$l=wN#?A1g1tRAgP0^|$& zhmtE;gtA+}G(;q7VXG@MCByh!Fe zbAl5p^A%WOETLf;6etZMaAee{O>(~=FW1o>px>;eIa#h8YF0P+K7;R35kB3rdpRIL zdZ4jy@;!mg$vvfDYE2elPe=OZJkLWBZ>t5=qCoCu?>}OitC8Lf5TW!k$ZnlxYE`H@ zNZoE!gYmrQ*fkZvw|=$loX4-0bFIUuUk$TI=vhWp>IWE69B@5S2Oi*`Pd{6WLn}cR z@QV2y3JuXQ-iUxFCcHuIj`Gp{u=U0B(1TyYzK%RAVT|q3Y>bN%t^MWn1!nb=E?>5> zbzCNBTs);a2XkZ2rHGdgv>e%16axk_=OyeudXa~q1eCyqWQxgg4_jOMq1z==S)xQE zD(@T`9V)(X9S)T<#DVrwYurC+PUjNhalfFoa z3`0wmk+D&?KTC-QxvAKCF*sT#SIdqlOg~EHfj*UePJwL%tvd3qafUBD@10o%`{!Lj zU397Hek6bS9#A8ZhfTOW*n7ox^)0fwjVN}Oh)lD)`{`7n1coU|?OjF=yrM;Pzsu21 zjS|%B`&kbw{C2jnCNbQ~FtPBHix~L^;!(hjvTzyB<=ncyG>#X7t_8viXOMi>nB3d- zaVZsaP&2(6(jo)TsKK(J-qCya?sBoMKKoqF*Z+8o$44NwcTQ)lRfsr`y3Ee*=ks(< zC!%@OVIj;}2jP)Fmg1!0Op$vL4IqloxwYcKN0AWivgMWmmoI)eEhjd`J$w{E>)j zx9QF{mBZ?Q&eTA7c{w%!Q1fcIC%dt2br#+`yikM1=Ayg%LaKD<$NI!Xedb+j6IVhX zo;_JVISo;VBGLwAIhpevuRi|&Uv9OIb|UHgLLEg&~s>(=$WAi zCiYr2-Hhm^Zh~RPa}%@0nmsCIGGhHmA#NY`T{B|mgPF_AH{&)IWUOA$#fM$`x%g%x zP1=H;uF$hTzd@;G=dNJddMqxLD92oyRF_pM9)J6_!%jUze**U_fwQ-J^LY_#T5W`D zefC<7*z{!idjCdk_jPVFNW)tt1ZzP&zg+WoonWPlgZUv3GWG+d2cZhY7m)s?SJQ;g zCYzh4+Gl-%?WOmV>?riKJ4_)`Y&s7!_xVd7`)u=y3)oG+oEgG(X(4g7R0eLEB21S1 zh2l_x@`CK`|EN0JG^e5^HA7mgIjGM}fCd?N`m%e*e<>lk>O)pR*Qv^nWcGBveeKBh zeB}0#)=feV{MN-MK>R|o`_xN!vRk{(WV4^UYpHA0OBXh;0#DMw!CdfC<8a%adSs*< z_6^}M5_=d^#Jf(`q*M!KKif3Djj_0cDTgt3W^g72)lo?K?a#f9h7+ea9D$n5rKboM zFcDlt*X*%$ggW`86z zVbeJoC8pm}h$EgTF52*nM&_OC=>_~NLKh<*aq4do*giY<$t564G^Q5|dlDh2SB3dB zD}hXq;T}|wanK4XuhVxEr$>_qgOP07;%7!M?>zqO??k|6P%uZJ8c#$>j(dY?RQb#7 zxy~;vxgyOD?|a|zY=du3Mmc@Jek!gd86R`Wv8tkDHVF?Tcq=onxsVT4!oQTuaUC1) zWisgRqP8+0jeHIiFv4iLTWtLGKQ=fyJ4)%Ba$560yY6`{!CWDIZ__L*O7fX&-`;QP z2@_;tm;di6b-^7NBt1+63TgIJ&KYWZCyZw=-m<6A$g))aprTw@elFd-M>EK^A~hB# z)KeWs@F3FxS14=o0JYC9psa+t0yY~ziTvGoGY5D*0~jc#^_=oQDn%pe&m?dSyewF@ zv-1hhT#Y12#YL+ys(Q^8Z9rDmJo zs`2?&w5PE^$Kq`mp)p6E>iNI=`=q@^Vi@VUw$l67zI3fbssqQLqHOF-U4JHD)mufy zd;hQOXI!@DkcTm2r^jbDwO$P+twT)gmSdChfZj(8^55oGm4I*)#0=s@L=b6?hpF7R z5Ke!R5Y-9uZWk*cywG}7e44LqSNouLmk_Vm6bOcbCceGYBOhdeu;LYrmJV!GeCe2{YPEqrP z8bX9H3rJ+vDKNsbvreEL;Wr}I`NkknYFa^p+C{(jEDZktrrB>2ks&x&7r zhwSEkL-R9>Pxs|xW2gzz{rA)s?_vER>O>!@e2m>3;eO-qsL8ra@xOJcEB;2H{^bog zoPc^Xq4VJF|J8`U`OSoC-(C6}R;oVTQ7Y%=P;d=NF!2snK_J9v-)|icW3y+f+^SrFsxVmv%R-hK7Mt#TxYVnaB zYwqAbDS(GSTRHWItMpTRBvhP&k4GqT1Q+vqoPh;>678E%#vB zfvU|2Gz@mDet3H#%N^Od;E~jgV6q51EC`-Dh#;=^S^e-F-SNCYi8;uGe3cEbhsrZUP^R91-H7 z3=XB$WX!wIxZ5!}0@httEyb+e$Jtq;0Fjod*6>tH#JGf>v3vzMwJ_Z(CA9p!ME0x0 zQWg=Ip9ttZI!M+1ywR#YxRN@kjG*OV?-z?_bIJF+$I7=prQ&WFilTrm0e1?t@Bh8X z5wu?w2tKNW;sTPs;fkrS{S7$VjpqOUyg&v__Lp52Dx}<~$AP~4EA9S?^25qNmk0PRaQ-Yiko>8?^67LYJUVhZTdLIRv9WpyZ9KF=DzVdh=+P7N~hyN%bqvfXxn z0s5$17!_Z}GLEWGUC6*(gzU3V8ZUx>kmV0JW3d{R^c3Hb8CS{|oxfuCNpZji8xzep zrRv$I+Y|R?)arx3+~Dwj z^7Fs(fU{6!YVuUFI*g3ke%4p<^_n520Xt75uHYSOz-8dk#dyDGPQ7i$4+iQx=wLB* zeiWA$)uyRf(arJ6nbnp5JL%Y#ZMpk|>o@20GW>qB2NYsb^i5H?5aN1!6q<&lS?Z`- z4d3K{>oh?vei40aIeV$dL_y6H(*P#ys^m$JzWa3zjchDRx^}Cl!1GOl@0hh@uMXc= zI+%AB+lvV(KyF?4s;=GVI&b&NZjunf!DNxBb_JvkN7{r7bJ2Ir)90kb9sqWo4`Vp+ zZWvtupfTljS@ZS@I6K$xO{Bevj7XEh~z zoN<9y?i|JtLP#jKq}?yf7Idxv3tZy_Gt%gORS^Fhd~9%0{D94cGH2l%NUy}}MunMM zsUM6c8LU`emba-I@}%`scs(|x`=J%^l8PIkNo8GaW^T@F5Sptm$;C6R`Iw7v45bl- z(`~7a2rI^I*w4SY(hsR9_1u!G@Ndf3+SuIFc-86{5{}cNM)*jPpX{ zLBSW#tB8rQeK#=skIpZ}^Qmu?70*9CAUs}^IdLvIV}_`#nc>kCm|#t%_FUM-kav+% z%6j<*tUh^{sh%QhMpU_kAs~(8m7dByeeI`6@?>md=(%>HyhUVbZAe7Ux~?S_9U3?m zByuhC^M9N}di_Mx>YWXB65)0&=gUAoYAUL#y3|yr{Xv$@-9d+aOmd$mJ(-jw^72qv zU-(xIK-FaU``ys`uVz|ravZON92J3sZ{8-uok2E{k{aq3Na=wLI!AC+!>X!dc?S3-2+6g2yhaE@&4j} z1nNjhaY4s$vTRi>0W3L7EGI}?VBr|!LQJ@z+Xjty=qrG%SLidq4r_nnqcsvaYU=k8 zBEZ=GSp1Q9_enKM?E$?=JQ3*k$K02_$ze)YgAVib6dx&iLCKLqo^s=D7dfFn%w~#j zHVK6d^dAFF;X7~^tU5^8&e7t>jO3G8?|oV=ut@%v64xwqlaeinC^rgQf=)yCXxqo5 zi^xbw3CBdavgTWB=>U@s?8eQvj3uuQzhyga{x+cMP5G&6nZQ&DikO>A=b3n>Cx8Or z83(X(I;UNWaEnq5S>?|eu1d6y(AvDC7Q>rL)57d2kIAeh_o^jT>pzeE#j7lmOIfGD zRK$K6Cpp)6mqq=_EuwI5MXua0!`E=45G+7gfchEoA>{u7%j713@>?eZA5`&s(h0|a zCS^MmXY3A6?Dc7~A+E*dvaC_kW)+FZFv(kC{Z>wYpf=1Bh{e4JifRnsAZocq;gyW76*xe^~8_%y6c)N#My* zij06-31>lKFmLBuAw<3o3BXBvu;lA{*D%y}T29!A!CrYK`Up)*d(Vi5#bFpLB%b!- zy+ITB%=ApSr0WtIx7BUg&?j|1G&@Ldf;z@%k z=?%1#1CcDmy#$9>QR>hVa6E*CdA|~&60>4Fwk_3EA*K>^n$C{T4_;x_NCm-zh0*NC z;Bdx^g@Q_Is1%KuvQ-hN+vcg`>t<3&Rx zbOd|~OqMok*y!ve9I*!=%JZahQ|i%Ll)b&sy1Gx(CnU{+Bb~cE!wS3mu|Zg5;Hg

A0m4nhhYP#_ zuMzIe%v)jmLz%J+dYNC5)XIHt1GPX*Zc2R0mOsYNExDx%>@*cD5s$pd4lId@fur5O zBaXN6Yx|)Ul?MDQjZ^BWSKoiLT9*aUxtNDPzLhZw>&G=DABRgL9(#cg8E}qLb>+B| z!-bXGAK2yQ?xn^ylAmF|V~Ps(h`N5oIO2HL6i&=yoVH>-^9iex;}qjC(n;v{Uk;la4V*c#~bt(ov8@hS6u;Yrj?}h&EA$N)ck(>#Rw` z`D`V|)TolHk*Rl-wc4-*npVaW`H z2=FZ~x4nZ;o^-IVm!G!O6>&!p<8XyRFjZCfR{6uou|_Tkp`Qmk1pZQ>d4C6%=O;Je zA`Ht*qkFab_cX+e_`Bcn(uJ@S-~||GC6Se)0P&|d0}9Ta*fLKoYiZmkCP)d>MWA%n zqQ!B@EvWcc!n|-$0D*N4Sh!;7J1$y{gAAGZiX4tR6_ejBCL86#+|VGjuI}^C(BIbr z;XX&63tESZSNjv1bEtNdIrxwe3_)t~}i_#d3}`}&@*TvhpBRl-NA zduC6V7Fx6%Q)E);R;0PfFgl)06%xmm^G-y{G(+X-s9Y{hw%)x3wN84j^@|vo<5B7Ff<1qs&b3@_!rOScmkH=&fl&%{>Ztyi_Vh}q#o>u1U!Z2JWboDsRC49=-I{$4&_k{Vdx=bT zh&fMF#YtuvJSnAqyTsgT8tS!FzVwj|4+j~2$m5*GlXPs~pBoNHkVvkg<}$0o^_Ul; zd=_eskNdPaU0t1j{b?~`A3;p0GV&NPwDsu-Zg$8hl}-EX;{F%b?H_V%Qf6B!E{<>%QA(=%w)p)d{Lidn+lRGHjK<3j6paQ@bFRo>?PGxGsf z9>x&I4@3M`+$jZK(Rdpm!C-DQj(uS#_kJptqo*n!K<$Y_4NQ*7$(Z#p^TJAy#@%GL z20WTbmLK+8)*N2(LQ1s%m=}6S6oTNi|0d3f+*K9RVh|9sEw`)PAOCi@4_^pXtE4H~ zf*6;#Bk&%Ww`Zvv?BN6CQ#|k7yu85r&f2TW`qjhJ$5>s5gObHUUfA==;~DoRvlX@A z6azWMrXMt+dK`~BoQhLukJ5bRg4Ij#?1>1(cFl~STV2Wh5PaRMQ}2D@v)cKwI3hXn zItBFZryo=X3{PGtuc!>FJn=G%sH4Pd4_Ryz>izGTqo2_@(Xh(}zq|A`)W<8S^Isi$ z*8=a|OF@iH$<$7Ktf1LGGHL;*4$8*-OpM&uMBM$m+?3g%ibDh*y=QTJr29t|PH`Tj z4D>_ueKCnc{Ap``o_J*Cs8aVZ1k|!+Bk<^a>`kAp;-hlgPfy=)NPl@gC(>M-F@={U zq<`Q8pbnJ4I0w>C;i`w`?Q=1IRYF;4$pRXX6x6@aZT67E&;O208d@yuW9ejV!^aLr zhFOd835~O}`nkH*fcj`U4t$*?bsXc?HCG~1k)G9(iJw}Z{uZXOA}87I*6++%G}-8S ztha;+OuxNI*#DJ|^R$VYMF-3L!?e3l2)#&y%{tdh1>sxw4cT$R)KgKS`LxR|y3lj% ztpv1!X>>V&j?WZDj~>+5Pi$8nsftw9mug}LCZBr+Yx zfnI(RM(HfO{FTh}vfWD$i+G}S_qw0oQgS|BHsB{TpufvzFWc(+$ke#&e(ax3zfvYg z_T;*l=@X)lAj$zgq%l#7i+o|6ZvTU;{)^0$|3&7|!mHWa48-}>Zd>(c1oU<-l;>pPO{@4s(z9*F zo$UhCrO5IJPkJ4Y z3k)BpzINkCzq;X~g!zIsjtZWky=|s(US8k;rl8QGD-z_R{(Q+T_!rhL4t%0L@#z-m{C5B-jZ=HUuf69-fyQ?A<6rQ<2lb;)2lt# zt5HxAvi4g1gemN3qzB{7o72$`fqkAfSS(Ypn=pujm8I%52E7zB$g!fOwClQF73olD zL}txJUPS9z-dDM0v3EtE*;m;`E2^6qp-v6o=4> zo|4#kn?0H{rQ75GS!%xjbw^Yfo?S}J;#PzN)mNz~dY$RZQ>P`Rgyb+C9mvrcc*ybg zr9?;~2Vn+KUL~Hcl5l`vmv+}1VcEw9aF}X~kfmRwKD@f-?5HKa;o*E@df`KH>eP>C zN~B&oQegU8$F@2E!8|CtZVcf32m20DMZ^$-B?M8);r%K^A(HVk3|esoSEUe8L={sR1!?#4(KLxZH%Qvcv2@&h%jO9FiP~&f$j&7Jc#Mf zIuiAe>3>)HP|C!B_W3!VZT{mirY0WQ>_w~4;cmZQMyr7?(Z<12Kcm}D^VoeL^l3a7 znz3vVGFMX<-Lz`D%H*4d;?#?kQptZZE(jUnx4AkY*>_m!M9#In8rJuDER4=oiNAS--`k_jc<>zL zay>2W2NS6|XNpeQ;nVJTvmvw(6RpO;$HnP6g)F_KvORIh!wct9b!sbu-*JEVCz{;& zkil|4*ql2l{|A4vKk@uB>`FOvdoplJ@WU<=_}9=6G^E`z4~18z4bO~s6auHj-Z+cf zl=-om&q;%*%-BOe5c-(!MoWT%sMb4`M`Qx|lnMpsBoFc72)|Hg)&=RuxKju9|u zi~S;hw&#uqciZFi4;M~)inFZcdDB_;k9~pLC#bg}8A-MUFQRa6`?6D5;<`&%-EX=| znp7c~j^iH0XYk2bN<~o`c-=wDFdFH)@zLwt_oc*pKsc~oxIkpTWrDxzP;k(ld~C-3 z@m-Xh`5`lI*4KG#|G!6PYL`MZkBAvZpufc zF#3P#Gp&lsxiZr2?$R-tnHczFR?!iMmJ|Zksx-4<)kSV=Uh_FFmuqZtUC8O==;m6_ zm(Z0o>bbsGy=AEWS$6@$vylLcCu$c;3l^K)5kmM)Q1^2EEMU0UxbV?IDl)K{);e8x z&ZL#p&u=4nA1g2USvXUV5`ZkE%*I|5s;^xWT!NmKcsbyT5w~?;A@GTp5*7lo6srXg z_n;p=BaX#Ay+SAqr&IilvGrHX`g)lr9?;B|qw*ajlw*Zv3cHy~DKQ?L-g2R0Luf?6 z#nTA!LJO3T+q(k$#sP`R=_?vGs(MZl;f}M2r4G_<`Cy`;Etzh4Ii90r`n6p9&snGAHUwxz`C$46>FtDGyNf4+aEV3qOH*5L$ zpRa|B_c8XCg~j8avW#SzLDpxN_pxYV{^pal%DGs=*6zh|zgN!t&$VrGvOx@IK*jv| z%olY@rQf$xY)sf^*|eXG3F2ur7pV`f)+~Qxrijq{SmzdB^Hf9lPK-cpLXeb+_bl>? zE*p+mTeBzB;r}=rLS}r%>QJ*f8_=oo7t|j9IY*|q=l^?--mD!SUo7Wcy-XUKBKrI` zgdb|JLuA-Q`l3KMFuWeHl&((0PQ2Q~o+VK*_VF@qA?r13dy5XwDW~q+<+tBy$}30A zwn$MYc=&~0JGd{8_+8b0Wfhih+hy22*S_)@)h^1 zmy@0gAnQ<&zmpzFmCnwi=BNF*S32O-=%Pk1;@{&8 z2oF+xnG#YgcNY3RCtr`P^b}b73if?_;CZsps|OOjigRYbK+VZ_mPF}wLbu2yVb}{^ zWA1eb^QHCIx!}i@Zl`q`<|?yklAaY64Ls|fLcX#Y)@f816`MSO*kZbe04NOU^$GsZI`?{;#Pbr7

te;Jnd6o-ORn;tAe#7(6^RDSh#Lbe(~37pzcPg@ zYMkBU>GUp?sb}<}bT_%;ffYrTmZxMoUFBsNQmXHWoj58FX zXC0SOz!oP<%9`mxc=5Hk*W6+v^q(+Hc|Kn$pJnxT;?QLjj6M;`n7=l-z{J%Lx%u>y zZAO&7AF38+l~3E#5U2{vbPDETetHre5_Qq@HkiT3&Z6o#ZUutXkUsbG!0~l|z-IGv zE&fZ{rDW^JCrXV)mtU+`Gi(QW@Y$D79FC#_J#SP~=Ju}EO8XYkOgYTgS<~0-UuPWt zD7j+3jg|3{uqc}3vgeKk>Q$tvjaNW2(EHkAgX`+j|EWFyPmeET_P-wATj^j(W#wb$ zbxq-{jPC8xO`19Xq{M-hE2X+3D;4r=w0$1$(W)&5n|b_ zHKK?<5SI&>V+;6pjG~D2#|S?Eo75OxE2!A(w3XGs)mCgaQ#M&dZ1NAlym7RuM}cyc zBaWjzNJLGNJaZOV*W*as((SGHTsC2@`IjhCd{p|UY>-?1)*xdnAY|F5$}HqWKVK(x zuIH7)joc>LM__lEJ~i6S=W8*UX~fH z!6C)Eh6~r|3kEGzdZm%nPVQZsluZoUXp>R2(5|R$9bCxRFvwFQQ{gM{Ot^2^BZ=3qUdc9`?`yO z^@+5D@2t^mg8_1TKWXQj_PB>ENFj|6}B=n_4Ui$KjJ%J}ch#u7rT` z95TG3?5J!y^#cCDQ%g;!ktgKSSixLBb8O<{AYL){34lR(Cq-qa#?;rM`bX=~ z5TBs6@;uRmt(0JwN>3#cE~YUGO{NgamBc!N$pA=D(kBS*&9gFx5Qdg!RKvs0^65|B zFG?tz%!%}e|ERj8;y5K_WQJUOo+US5s+_HdT>Rd%x(m7cT7QrEI_W1B@~PWW37%O^ zn?M$dx}H@F&WEQ(?m*d)aZ+!ZJcl{Mb%_oG2POX0*R8v{aPl4m?=`O1?Q=WTW7+N= zlEGlVrCHb|{?qR2xS7)l&L~wv8ku%CW-?!f0z^C5hbCYTNozSPY#xQLSa3sfD0Ar& zL~O3*I26@6lK;qR&B+8}jaGnIF6T@Aqwsfvk(TJEx6Z8n-9(0D!_?KE3*G@g1=%Bb z^IC%W0DVqP53g}b4S)(|?{L?03=tZ-^o8{P?_(!tR81ub+>b$(04 z4^pQymUA*9NtA<7u!LgC$AmJZ#%c^Cj#9t(cU?Qj7j|wsoQ6yVWhlR4G~MbN+j;-H zSRVOdf2_cD4SXMu1)9LV+I1Zdcg)-67PB7`UtTKJx>UJ6UhJB-`5Mbz@jeUF);pf$ zC>0{*f|aY+6<_A{y7^PqTYLFQ{;Fh;Q~u83Y-ULQ+f_M>o?!-8F%$}6w{xBd-bY-*4f*E+^R? z!hXW0PyXeu_dkeagz>*<9khru(dyg9ZFy$4mYtg7o`HU0&e=Dk;Ea4KPq}#AV<{9f zSI;o`W_`kB3G;bibcwLlW*m1$GXS~|-0W+>SnwP>Q^#t@?lgWJZ2~?j$oi?bLYX1zny(vLaj69ZyXimdws{0EpK+#ICpzi++20cDTdjR z{$>CLgS?~-zqZWB#xmk$TXnwTlZa{V)P(-*0adRtjA{%26$yioua%CQO4!|F-2&4w z!(YFC&2P+c0WtCz@y7JNPlM|287eCfSaOJO{tY>evGQ%cU8xNGTPjqLiv&=AE(ty( zhB`M3p*Wi4@uU!W2oj#Z@qX_reSGcMC&BxB${Y$RvKK)FFXEKUT{SnQ^()R z?4HiQ@BSw4F(A6Cpl|2U(6pTQqD|=Dl@|Wbm44y46!|CKe{Ve#g?irkF^xYG$2=s4 z&swLvX9inEGl!+=o5L<2U({o6?9RPFC+4D28_Cb32v`7kNr!yBU7=|pk)CBL=z(PQ zA*ax|tD4wUue-TE@nnA@h;naT!`e}7P*=f#y9u1@v!L^$+)EV(P$XT=D;7l!Q#Y~5 z1t1JnyO$AqNdis8__6poy8$h~e(2@0yZOGL$_5x=t-NGWx%3Cj3Nn{zENPEY2!~VH z*Q3s~;U-g*OO?Mwy`MnZ)f2s`Xm+`}#xPE&ia0Eam)tx_^UG@m zr_31Z?rt=^Y3T0K)|S$iu;ebCcD0PK{{|zwe%C$isviD*cwSsw4M&x!z{+Pu)~7%_ zLaHxB04>O9kHwIU&B3w)lIC&}BGe&}v@)t!L1)62^D1(I>mGyuL7hKiTi}Y8pRniI zRbMiS{5Je*{ZCgP?NsM*9f7NP>~YLUsH9kF`lk)V3(S$pE`bVb)Wy7_BO){}Az)fU zNHoAsIv-O|A9X-=v*hm)b7&ShBuRxGpO1pi3k zMJ3$$_nUD-wPi>*amKdz$6@W+alaID-eu5E^6J)e^QR=kf+~PB%dT65fmIR2YBRvk z;!yd{%GFU+J=C6`_qFw%L8{1FH;~h;7of;lK|q|NNi?34MXoRXkc~->p*!_fXc--% zdO2Sr7AP3260x4&C|fr}DkOZXuJHu(-Pz2pPaZa5-|6)*K{3f&0L^r$fN1$jz zr~ZrGT>2!}crJ#{p>rnPXfl-Q{bxe^rNaR6*$a>~z_8jD@L+&Xsl(&$7UuXVRaOExwpNA7IDu5_B!ug4ajfC)Vx_!Igq&cvbaQ4-- zWF}epu4Q&-xKSSE<=kbWl$IZzY;v6{~m_JChYM$=370j8acUP`!{Ax^oI()=@bO`01*{!tc=)+^Tp9p{t zhU8@Q`eAUT@qR@cJ~Y`IBg`61Tly9$4~82|t|k&@g^mHd9G+E8^h!_$V8ug=#@_+e zfq;3kx`!BNS2+noJk_t%B+9xJY2PRWU<1XITjsW+x6E)=$;TeblF>%^h3LMt-^rp2 z@5jKzff9veP%#kx#pA*Dp2VUh&m1I*)@|#d0u|LQ>6*Q`oqOr)o-C%-4ve_Bu2IB4 ztH06SQyXq#Qj%!$o!mr$j%BW!pEKrWM94Nt!(c9TaX*^I9P(?q>!%U;v%9kIsXzlY zM0VVSQE5^#FDWW%fI;wG37xdH(Mid%by<;w`BZMTqbg;Fhy~1q@J)MK{-TdgQ^5*G z%Vt?yjwsgi)9G#V1>0@G-f@Z5*gJO7F-a0;Z@nU3p%;(LT%p+2itDJF&+!^)X~v?9A6djk0}P}(TSma#~Objy0oM#T7Mb5H(e*dbhk_a zZl$-5{AlB~G*r}rE4Ns$26Qsy@b$TmBCPL`s)Y{r^%~nAFyk9OZ7o60iSmRWChoy;h#U*sYvgmeq?%sQ-?c z#hni8-ryYXdY1A{J+4>cZu9DTz&%p8jVMt5KizQOX*T4%ik<`Uk$@sX7#KYTz!8*A z$u)a*h$~g^+YoTvAGJ3ZzMe7QRN|4PP|fcE0mRWh6T+dWV|jiX6H)pMw=;2s)3)mj zEWoApFc41$nFz`Pq7ecIT=hY{gbi@q;i=MC?V(vfd0F%eN_*U_`{~I#r9$g^qAc2+XeK$ol>PZIO5^I!l53RS_`3pBb?Zk*87gIn(xuxt*$=6FgO8E}P1Tj;kM1MT3wKBO34SxG0PNQ2+$fDeO@eb-4)_ zvcPmQpEps?_prp+kDVP-^}<&#QGjL4{Hr~Ydomg`$;HUnhc<9{rNmLWM|cKLtQcFj zbP4}%`jm-S>~@SD4?gz0`Ks@JEZj-R+587dR$U7EHON`zr<%BPu;Shj)TmBweByyf zN37aSRyd2+oV@@3i}W2|5#6I8%o()FIp$SdWZnMy;b3FHTtTLlj!}QuPePP%(8l_S zC$#molPS zD445uzbrwtkh>Fzdd(>ie)^owJj$C$B_Bqs^kk2dm{HGgt!#(fd;7G}z*PYbzxOjBQ zX~Bzr%`SrkUnunXE^FfRznB<~(ftk-C8TCdtd~Og^fXW6r*sdyaK<)Dme5Dj=;_`M zCide1&c#|^Mc;2fgkCykLZgt*1K2B}6=8~EWaMEnY93YViy7aSyU)IJ%ff+IH~&4e znle4_7RNplmJ;35dd*kkXQGtLn`y!+gd07mxT-u?vQN48Ny&|w!?UaB5ddcY4}=1b za^ZZG4pgkyaxx1JF|>7%s#?K0FS)187%Ld3UCEi=AJ)MLuMZxR$EaX0yua#rGe0+2 zf|)pzn-{gAPs6qT{tS}FYO~>sdZd4R%>p;e?ya6~)&0VhD`yRO0Lbww&7xG!Td|en zqBNvfY6DL6&b$(tKNJAIKmBaO|J~1W6{$YsR;Wgd^VbLiwbqyW%ygXFs(AC&4M`|C zbhoV%ioGXD;{xfs0SoEram`P!sdi|2=K^NP-*KmYd2GrC%CB@9RJM6CB^&krcNqGT z;Wz6RvFozHvC%V+X^x0`<6D=`*H3r@nkbM+p`C}kDZhsbq#ncZCs*FHsAvicq$%Ef z(6bS{1%K?+Z#?Rt&2nNz6SrKg*l${X6=TK5%<`h?R*h^cW^u=$Iz8YzZYyj}Z00Ki z|02$AQV}$6XoL5<&YAArP{^N~p61ICEyj}5+@M|_eAuc>z3D?|8hio;+rQL)4t$BT zM<+ecgoe`kCz<=qA@$vq)TccCy+ICtf6Q4ON4J>3Owsba)Hz?s)THHgs#hLW{h(88i?~~{RrIb@>+kr@E7X>z8IKB=SbL)u0RMo*D+lt`ORL}l zxeFBFs7Q$g_!Cg#-2MGco>cd$n*C`&%008$?!F=z0!;BLqOR9}v@$G-tXys@WLEEt zLL5!4Gz}9(b@zT^Ul(c)VmbCohS!D%BC7cti20gI4xsHo|QB@H&=Ue zy*ATS;)#2@Mk?N_lG#U_yN3x3JZ%mv{bF)1g>4he$&RCwN|w`4+{)+F{p-0@|Lq3fXe(Du(+HMGIf)Dcaym`8|Gz@ z!INaq=OysU)lx-~UxeT`<13r1#9)Y zX=;Dgwd*n)9TE6R?~!`YLSDs3>qmN4+SEnM5am&u`3Aqr08H6jYEQ1ia4wg62mgr_ zVzIWTzfDK?^8e6u7JgB7ZP%WmONNj}N=mvrhLR3x>F!Q}p^=d8?hfe?N$D6GM7nF} zZuq#L-~0U!=e75Ft-aQRmVw4*2CqDFtAe`M&4*qk}*B$9Pt4LI2$RL9a0X2s5r#o=#Gdkc$2C&>Mw}iPb5vqFVYk(UKa~( z``!-~39Hjpe(U-Y1*ypm5A;aCvRLfa%2W<#&EyDzpb(4`?kxTmRn<)Q=I~FPVZQG0 zBaoH%@9;d9%u@YC1}6JexpqscwW2lv7B0>RSw6S_ax#|5``iD#=B4q`TD2}yD^DcF zHtBF@2wIvokbzBPeus&g96TVO;km_2P_RMz)v{c$wD9wXZL(Ic$5E{5Wi=wtR)y(q zn?uR)5w{^mlY$PJ>{r?{JCt&E7iE;W=D~^22`MX4jzeNETX{G4hPKvgi#8X$|6S<3 zp{93d!m(go7-S-vu;LXM0~SWdO^=&{LBReC!Q4bjsTQ<}c*&fwVZH(u5A@-03`Y&| zm>>@VxCSJd+8{V1cV%IU?T;VG9=q+PY;LuoAm`a&bh0~?pfYshHdi2(%WYwr;|NL| zoIGAQ=T1a%>j`9$-SYr~mV{x)D?bd4~IChkJQTY&q7G#uUDpTzMeic+hboxvN0o~CPJ!z#irWqMn+?rXG zKG&@{?4~&xBUDLs-mO-mNK}*LqNMX%J)#U-bEG_?ooC6s$IC_EQpQOe~1s9@DE|M!(a$ozqSQFw6XIgST$g!RaEP(!f z396yptt>5uNNxp!t{dX2RiZE+juVh8d5*Au{-gDD$C9N5ziHqvrB7P3P9yRXt~I+g zKBBjLRCK7>^(#K_N&J*w+VZ+7RRncWhcWZTLS4+82gY(ZjsQZO z_)UOT7mRTb3a~FH_o~0VukA_YzLqW84-ObLrS82HWc9*Tl?0-~H4@@(E7CUKT~$$B;I&5Kkl*cMWL6 z`U0of*^MScg?~xO4U;IFg*U@O*_*!xQf#dg-}R~2;d10V0IZ5*0{|NE$XB1FNd;U?xg*YHo1 zEzxK0e-GuPTgZ8t*2VFRt`Tkk_2=nSRk;eoXwq%_YvkHPvq zqs|F%y{Tp_CDE;|k_JW0++^&=K+b1#Z!k;9K+rP!-UPu{U`VmR5wH07ZoVtzuNPW_ znZb7I(F9{jlVNAD$iogvR7zDD4RZEAI=HV~1RTFYVbmn_{uxLk9p#5}2>xg#$@pJ2 z4Eq9WrX!MZSN3xC$J-6dWisxU6BD=2 z(d7h}3&WFmsP;}IZlfPna?@&_T6&a2yexTTAF2nb6LVcC6#i4j_y1euKU-EC%%pf< zobRDveNzadYlkMfZE6PyF!aG(-p3Z18|GvmvGifP3zN)dM;V4UF=7F9H7&ZwP^9bEfOFk~?MKL9Yt{qVEzS=r#Z+J*8~R20_-i^))e| z_RtGmB`KpCKAs87o-O=qV)ovnIj`^OKp&;1CVqoO zk$8@Bu3gF8xW@DIlPz(}_>3md)*^^W_k0aXd~Jk!kthb!!RVRxF11 zgnBaP4?CwI#GaFP^Iw=O5>_FsN{tE>#V_EPXhGBSq|6V z{gF8275P(0{*HH06`nZS@oX9~acU)RGtqpiJQ>9V_<1 zQXJyWWM&M%o5Kl{iXW`A>@XGgC(nM2(@^YhWB)DkHTMV|<72kA)j=^j{*JB@0);zjTV z*oq$h0l5mZUKBmf?B+l$)Pzb0SzA7O{0S~33fBt`B*b=fk8CXV|FfML7IJuikqz5qer9GWrV&x>?Nk0^u&xmegTE zRC}SbXG;g8CQ#y8?A&9+kE|$RYFmfv=ocngM2xOubVZ%^uU697E6Z^(563hR!f*#i zj$H?A+T${~C>^(NaQmTJCf+tV-ZtqVC(lG!rk=M5X*kcU>5*w;o(NP_DY8fQ7Vly( zE%yf{hD0qEuf1n6I~7BT(&OS}|2p0W-4Uoj#~VsP^ht-8tW}UMH^juDhFNDNd{2!0 z<&_4ZjomB~_|IqZe~kb~+}4iq(S^<5M9Gk`F4&d*%R7{5mUbhA)Neg<+(-mte;*IW zC2vI#4y|nsXr&?lU8m>=!9aw$2 zL=#Sa`XS!fMnJc!*Ye9O?5k<6w<=5Cj5GsOv%pBx1hf2!PCRMG0WG$Z(ky_$8KJ5` zD-N`#q3r_J?25xHNp(RQyVFRshq(h>uYdeCHV_xdk*@nf#@+kt)U2~8&2i(`n3-vn zZb;l1(*56%GsJC6m(J$o2WNE=#8B@bK6g_}qmyz?pMn0cs}87;% zpPZa09fk>q?euIO?mU{LiBe+e@LjE1@lqS(!l+_LG30YU=+oubI$R7EJI-91PdH(p z{YFN+6tBhd49s74O?6yrl+2g!vA(R%eu3pXL-|y0N2t|TYd@L%=a7&8&mkYKFnjn{ z0i-W23~?nuMW*4OK!`hvufui1Eb^SGq5bW84)1llxo~1qU%-N_UX}MjbJ3t1j-GAB zXP*&|9**I3)QiKWm#lPFW0q`0}<=r`OY=K+k67Vx7(M{G)SxF z2LlxSkhY?ywP}(w7pU`=rhij58JonG$RtaU#|@W%5q%YY^HahL?%QN;>o5NJ<)oct zE75kR15m37h404akH}RR{erVTe!mE;0>xin3~9-K z6F&CeQYqd2 z5L$+)#3Wx*CTeak>a1t;sD9+i>OvA9U@bf@#8p#)g2R;gUw>#vNHCaF?!i)Du593p zG%S?=8gz!=)0sE*S#A;e$bqT6T?~`alGvBueTg?UbM`8xlEBJ4uSI{w{lp=h+6slV zmSU+;M-rJc1&H9|=C+@0uEZ6vPy_XI z4p;SbVJuZFo`c?%KgeHAc4v zQ|E{zn%S3*pkVwx1%$_jdCT6R#3VZ%U*k|o$bw6D6AweYjO|FU7h$`g#8+y2fvkcc zQjf0ZBX8`6JnQRJsyu%+N)oz4rSwQ$XS?_Ib#Jl25p4o?jO`y~>6yA#ee4AI>`)<~ zVWqDeJdQX~-US6;xTZPFnAZpoinb>7#*)-Y(FpUkb4d7#WA9K)O;w!f zLf$cBYyXMrE%?M*emf&_*-q%XxG~C;dc#kK4m`NH*HCakL9skw&=dAzw{F$Uj7u`* z2VpOvZw#j@m0!l0Q=^-UKn?w9o&&kO({%~>l=YckWl4}-*M`&<$;m&BttV1jLOoD$ zaTLc~U$tUDV+vpG^=2@WcXCtcim^2%Cxmyq2uzi*T|2$gtd?0(7XI^rGW^e#onAg) zlx^?68c;mS^2_wAFX~q_%={oG^L|ekuY6s3vjLlggr6+TeNQHt&S+9ar$T0f@R9)o z@bi_Pcb9Cpq9EHfcogK#^^KJR$$Bc#*(IpVX&gYpCZ=j*t`D9BAWZ^K!K!E&btwDZ z-%=B;)Zc-eDN7gMo&8)cCnU+VuOUPjsfZDR4VTl ztvtvyTzz-$l^`;}#LfSK<_D^LTf(c!qm)ao5L4KaG&_NL-{LDr^$a`u7lB4k5ubae zFrjg3s18m`F?2~BqKS{|%ZbZPOVE_M0e#}`-sM`}{jxF;*ULu8g!d=Qg4FOkY6jib zsShI1ZjF*1Jlh`bKN2Po8<7vOnqG5?r4m}q6H=;7WS2iG4!#JDDxH`Ux(#hbF*IUP z?R?#-cp(4m%tZOqyhMyi^{@6&GEbW!>4~&G2kp*`i<|o)#5-k^?}7@nXfH~38`!xs z{o6CrtXThqaj!C#A!vJ~xs6CPPThdWA^qF(;bQqQl<~e`B$G7qE5p1j)*y2Dnmm8S zt`PN|M?%g@SJ#yN9!~!1BXaEYqPjM3$LfC>)0*42wD9(IPVRiYJ6VtddJoknayq!$ z$O~ua;4&8OA5|z7Bd{(xfKc27`EPT5c2yX?(=C8C0DyBmoE=twdr)62Ff`P=5d7gx zUYI34O+~NLugFx$Xu4)7`%u-TDeQNx4CA`GnjghAKZ23JXm?^4r?ul~7Fwgk=L>*n zI4c0_9gllvaw9a<;iuwa6xf>4Fn%NwJI*XuI-HL@VL@tweuC!~CE5=beaOd7MIBvR zWc+C&vb1;d>U8M?uGVS8P7x@E{p{Q=i*2M6A?fyKT2m~c zFlQ4y-}^KpH^fpuMXLDyc_5$HR4VYo&!j3EMD>1BTI36xh=YiooYJv!;tk^xX@=ls^@nOHQeM+(ThoTM zH*!kb?y^uDT}cd;k->4vcS`GrgHA0}&f-TI|G@#10N)<~<=&1)YBO~U7`HXu`dOT%U)^J=nNcMrdBa=JL#`f0Bqc><{M`iY`&)$` zX=MZ^8k7dPlqnaE?gTnF&opG~K&6Krg`h*s}S66(}j&e$KJyQQR+^-A=-(802h3cipWw+lsGRz0s z0ls~#ngA1saUyqK#u^4%`@gTXfHA<)?Bcqr0J3oDKa7#i0~YQwXKQx(G9tp$)6?$= z({H0Z1F!1hT4u#Q$(eJr-p^`%^Z&eUg$YQ!zCCGj~ zR*TYOD54>2fhqpQhuu~3F$i>JvAfz!MI%7Uh;Kqa$qIKL09TAYhfW-ECxr1hN00kF=GvNh@{)LaHPmRZDhZjx(5ZC+|^Ks7)!j6?N78Yw;1xUW3Vs#0j zkSs9Ek*e5xq565h6_&}~CImZK^(AZyg||fc`j9SOnQM+On;E6i@#Di)!(^ufLWaNi zlYH}58DxXiiu5wPQ)*&UiHS5`|beJtY^@EW@lD;IJJ0-P-)@n=Pv%AA5 zQc7~n=My{xQ#3n6izDL6k(Uf#4-T!c@<_EBJ-Q^C>ksH>^G%;BTn3BAWhl6LM=FpS z-w=)Wa>v#!KEINhvU1$+Pbvi{>B`9|8M@w6kKaq1-vwi3%yjaO-}-senr8cyKqQuf zTleV0S{(FZWobyTLlH-CsUk?l`Gm85==R>TYxvu2{L{VO8~C}*cAsNCUJX4-Ew(cn zWM6Q1>~yZv4|AYL4I+hEO=YnV-G3ghD}iJ;LdCqygZY_VY|L zlfO+wb@>R|X(;`T>u3nmY#=#4r>AnIq<2PI|ENui19Be`W0|PVRdfaz0s4AZdlSFH zqkp2f(j5m8Cw<6fpiC@6cE?U_k2H!qt^eVcGXX@V&ND3U9O3TXq@cyEe|8|11MZ5G zvW?cUmBbuW{o^ep94g$%SDwBmMeC@kxO-8Lo@k6xj2H1b0Bn3D+u$9!BT9#D$b&T9Xt^3k)zQFt3G`-AepNf1ift$w{{5KALN)_`G zD_Anwa;_Tfa!O0yeJJlnvFu#x;zwW63YCEkd1jQo|GSw)iJ*py22@}sX2j5eJ9(KX zx0WExS7bhi$?yv*osF8UtwWy8BRe(4Ew(FJ@)n!>t?>{fA*#zITKkj5C)(GVKeHhH zDF;Ie0hxx!Z4U+7x5BKktjJEHSn;N>W*6E+wUK<7l59Wm|( z=?T8#t{N!)v`(}~?}T9WGg4;=X!bGP_SI2GRNl{azEx_hbN|;b)jp7T>B~FT+wA`} z5S=7Bdsf~76V&^f+wtm$D&hnNo7NG&eLNpJVgW=iV1o-S&$B8Z925M`;@|q5U%$774Yqa5HT@%?GeY1OHOy1`-N>7G@IoEVr$66q@#2c52#bddm7?44FC1lFg&S zxoZcWV$uvF?;xZ1Vl>_P_FYk0F=qo4u2?7fM642-#h7Nl9-pTl0&_Pz=coe4+UrSM zrn2ZO70{W{_~l@vriU)}2hmvch9=i5wenC9~)C4Ry7ALdVk}9UCHQwaCF0UEfNXb+NCjh4yQ)?8C zV$Cm5p((6J<>YDo#7x#oe0eNu@0F`1Mw@(_$oC8X@atPay!faFa6J65?|RC$C)|%BY4(0u*EkB2x=&sfk(%m8 zsa#YR10kUN5<38u8GBS}NpDpG8+;JiP6OC#LO~QL8{V}%DqCwSeW^@2G-gI_sah8l z_{q#-cTA4837sjhLS*EEsUdXTj8s6{iTcTq)gW}g9LCWwH9ch!_0WOzPZ9rLt1eT_ z)Ob=h6Fa2g3zR&(HjCga0h9ydZX$e@YFbV#jXR{Cpb`=%(fVeM*)R;?c9X>*q?zt@FkwU)LxuuMQ)T#M+@1PD&w(=0`>Z_5CO{p0Pp#Y#9IO*E_YMBeS zCW3Yp9Or153)$kPx{+TM#MD=K}NfE-Sa+1j&xmRKrd-}IeA177;`?5$E;XH zyvBcGJWJ#tFyT-2Cv*cuLH}&P;JkoMjli8}obt)!?THTy0l(~-ze>leuv>nNBU##% zvdhPsxJa$@smcSW4wd;Cm!H!$r8+7N402_bLULhA3SFN;>PG!w&C0UFYN9D0-DM@O zQnd#AqZI-mNkmHaLyS?WfK)oD$2EVCL+o}as`EO1j^!bTxOCj0I;4b_Do&*Z^VPwL z3IZA#V^M8i=I}>Vn}*p9D@W$6$MTwtA40_|M1z$Wt{PpcM)G{UJSE?0H7u9h=1SYwBa@9|;Bhoe#eFztE#JZ)WSbxG<|{+^`-l z@)yn(dxK&~3%N}kV?#F&0{GV@$<$u0vytWw=d_l7H`$)2;9pAw<=z~R@vh008Kw)H zBD4!8dBCjfCq?1X4>B3D_^0le8it*k{%8w+@wHON|K*zyv3>clxtgZMRO-9>!eVG< zlTU-IJt25GKvILZS5(G3h4pJ0J&T05rptqQ^7b3lYjcz^>>=4Yj&*dQXW=#M_3V^4 zcXeFhX8(S-R0@jdF{$O#(E{%B_nETyR6l$+eH#;o|BZ=;=hFrQ{I@jrbnA$Fc21+7 zw+t#uOixsKW9d?f)EXI|}S()EwG9YJDth}fG zXXky0UJ3f2+3M!kzO{8ol9kxEvR~0gZgW;j#t}J_K2MSZN^Db2i({P!EI=pretj4DozMtaxYaJ74tmC5W@pBc{*Oe%k4ts~n; zJ~fe-*;y$98^NqU1ZN~TS0}%$T-{DmIvGDfzck_h0%8GbA(BxM4`%Z@K*l+H*oks3 zqGXDum34N&oK^N;fUwfe+5OGZX_UqHn_*)bxra<5SCecgTE-T5diooXm5XAJ<9uO8 zstS{2aoBS+Xj@V~(pWk7F`5M1E4p>QMt`;aF}nr5;k6_t=n9_o0sYKyypkLbK7#zX*at5#0058MwJiEUE(!dvUmg6;S0{MG;0hofIuu-9)vF|MyaV{hZaXK1zK??< zh%b}x{~=TQ?~~n0B!MWf*wVboq4!cf94X|iQB&9|{p(mmBO+{Y-uGF#3%bO==KpT0?_>7Z(Gf20x^9X^6{HyvhPE}b8p0)ozyQg{$@2f+ z0`NE+yAm{_4Uaj<7HOYRymn9!t~s#QMJRtEtW2K(K2|dl7q&d~85g6KIPA)Rf5@Tg zrLDN2G%_Gk)Iq1+8C>qM{6_UC{RZGbA*m4Hd$;7!Zd@(6N!Kw2A~xqCON;yDQOe{Q zCdDYZn9x!`V2)DbJvq?A9FeY%6%H87K{*xBemUND%;MvH>BRsEIOuQCDKK214uw7h zTcA@!9X*p+{Tqg5vQ!swj*7@=;Q3BYaK->-uFExu=Bo{sATRfph1mRhztyb8(#!*L zR{j*JA$rGr`hHM!aj(!Gg>=1V6>?=b9Et?bQ6Xun!U8Lg1z^cLU-Q2UcyV)V{5Ta= z+X4qO2h0%?h5fVQr7$0u+-GW@;G8*v$>`QE?kt6&9z}D#%QX{E7hy8YGqM(G_7h9? zD|2>k6gHeiW^35|F>F?FSh6uG;$Fnw)<4WeabAQ{)3B$owQVD!Ci?K=IS3kd_)Tik zw9E;Y8$svN;>Yt+U6S4!7kb>#NLjGKF+3(B7x;1gc}szn zb-lKweeJ)2=+ysjAl9m1?p9`LcQ2cel@8M$Bl(Y2f4gnVG+cN09f*0|HCq?&g|K?xK zUj8%x9sLua2Xj)!UNwzbK&!%r`}t!Xy}PTY>MOtCD|S%$r%*zdG5#@x&x&WqfWUXB z5I?Wy0TjQU)Jb>>S|o`eQ)@0wQq-MqIdNV$Ox)gUArt}I0+XuoYGh&s!|+86x+0B4 zS(aLTlL(rqR-Vg=$YWs_K+ZQGZ~2zg!qe5mUgiWGKxEbuzjF|s?i!pznmGnPdt1%~gnN(H1k0ZEUk3d(&w;0Pu>75o zZ0dQcAZYd8@U-K;#s4CBEcay}*;+|y!e{*hd`Um{{eGgM(h{wXoo}`NqI@PCHPPTe zkoWS4kf&)%3TqnI8>*p&Xs_6-9MnCw$YUg_(n^HSICgbV^Bh6?zft+}zuP5gc@1nV z$UHdz9#1qI4@Af@;Kr64-C+?*C}bhMkCUf0W2a}+?9U;sjI9EUy=2#62eavZ;nHsi zLsbk&zZ?xg6p1aKfZ=zI&bQV>CSDN^{}kxrKt{;^02&okiDMArLUv>oMZ|wKa=lMG zgocksH*D8cSKju4%FkL$)2ijs(*OkO$C%CEl1T>~c1aUcVos5LAX(m;6Y3w*6+{R- zYmWmW#(M57vGTg3ck(rtv}xSPd3oYzIjoA8Stka18xLBBy=f7~p zdTQjtOtr%2^R5W<&Yv1jm4x%E{=FlqryGJ@`bEI=<_V=0Ir&jmgvzpzQ(8xNHddD5 zxWA4|9g4v!k!D2Gg-nqciya3CoV@~?x=tjISKd$hMaR24cT4SbmBku#+Qk+;xb3@z zZ_(I~5;M;1f+uKed*kF$qRjB%O;;|_LbacAQw{+UYXd8!1&;U)J4(cH$mzyMPd-%wopU%h z{??a;alP=5Iw|}vnZbkt6&=VGUWgAxuJ5muF zmfC*vr+-2vY}37m`dU5CUfcmNGGK@B*N9{9{VARwGQ$9NTtWXEAO8Q1Pt4yn1=)}G z%{Y7Wa@0kJjR}2G7-}9)CtJ(bq2ASO>vWOp_x0b9Qq0J#tPfG;nQv9q2!P2W_o_j$ zTCYbf2^88F9T%aa*BT-Sa<7)OM^B@svYt=u2hC~5AU6ZwAG;BhgGkc!GZd3(lN?7+ ztX4EdqOM94=9uKCw?tv7}8azU<9PK!A-^Z;z*#%Edz{#4cLSdh56&_q!| zGz7IV_B1TL<_&Fs<#x#zDEaKr)HfEAd3Knx*Qt=#exJ=d=;r!7fP4vcu&(C-jYqAG z6q-L6;pA^>1H63_ADj^tY|Cd4Xdu=rs}PX}P1iJa(CwZW$hps64ZwU@>W5Pycz#Hs_+# zyARjgZaYu`onk=197out@>fP}VrggIMj`F}YobJer`LlP-tKMGyr0jm6IVw@$)?;c z=(#U43*|vb(F45s3_hI~FC1 z^A}zh0c=aJDU-N(t&(pA`XhIjLV#!sC|5tzOz1b~cw zXW$>L7B-r-oWAWY8`3)5ndb+G5#{jVDty1ye1rXk4Mg2u4#V0cQVqN6{5zEc*FGZN zhz*^_|MH)#8gpZFrvC%2-I5%&Rc=Z;RTmEl?QMBhW1$v8(YG<9%3vK@KnPcajuVUq zbPZYnr1Kh$+7Nb)Gg< z8gA6p;;4~^z;ySPT!`33V~KrOXg}*?l^X)>yP@$2^&CJj9xJHOe;8&(bK z2F`PNDRLl~-!g*xEcAx>?e^829ZTJ*bA_4TETVdH(Viv0ioPnpZtZrx%o#>&hPCcI zT$5x9Dla{;o`qJ!IOJ7qagD7kHwdf+$u#HbqBAo+$s1Mo8q%{1_l99+q5c$$=kUPL z?R-C0xK^w04g(|<;-f*;$l{m}lxQ-9Xn8P6n!`m%p`Ar9N(2*V~jH-#$Sg6 zgA)f8(XLuFG&iBzIFO2nKcKGH=YgHK>OsJo?R@pvqmxaeBH}+mFALZ9Il0``Umf>d zuNO7MA6vq51{U~cKFjCXrSQ%;TunL1moOHT*qpx9DF=!Q{WoIdoglwk?r>oLifK@N z%y1atXTsAoKD_Hli){84+%eTeTGR^o24f;uXdl3uJgvo`Eo1R+1iFY~JDwXt;{NdC zvhD{^BH;VR^vaTa_sPi8n33rYyQoDNLO2&{a6GjQp%!Ta`p-y+0LpuFfb`9pk*Vic zy}o!-0M!rx`?1=ICHvCiVkJxHDSjOKyp5e-i+Tvt(Pg)R<9p90g1TtwwHF9evi+my zQgugv^bC{4b*_g@u3r>Ks{MTq8-w6dAfQu(AZrCeeFdeR5eOXh`b?mzSyGzh@*UzV1K!ZDXEEu_qAOc|T;p{H8=fd0Lz+DDW z2!jaW-giOO5o$?)vRJ}VaPf(cD*F^3Mg{;C=QyyT>s&&6Du!DUkCQ&vuN)0mlPHQY z&23!M@MM_BlOo{H`my%e#vn4AD(?3lg4<*xq^1&r-9JjKJpWBPu?{N2J(^kIjmG36 z3;266mQIt(SqrC4MLi1`bGc`Qri-MF%-+jZ8s%`|2ZD}q`%Ymy=;=lVOHW=ei+3%ek6x8UBBar? z*ApU3PtOznb+w3-hO09@C~z|K;ZnCUpKtF?(7dD)-;4oOl0$h$5a>o zah{WJnO0~=ITETz-T859i+_Q}s)eDYASM+d-mfk>Dq+<(*lT~*?K$ekj_nE*f|6}n z%TF4+v;SVG5FX$=7l8vI-?_%2R21STx7bZ9_LlX#CM=;2TvrJJvzo%fyn*j>E;F-9 z?)c)7!Ys^!G&Be*HH4(Wtnb1vbfOcml5!Sx@B+WDOlvpTDy;Z6TxT9nh&MS{zo9O> zY4c@&!Q)&c#GC=Z)!T80#8~1K{;CKHe})2*oHd#KGqBBnEB^xUL!^_+^)3Ev%}+WS z!4F|sZnvHKmA2w`v433vTu@lt+O(-P=C^Bv8(ERjP?hKzg1a+LW!FyE2n`SY zB0l#zQ~t!vmj;V6qcO%ABRLS2h3Q-7Y=nUW3UC))cOh0c`X8$PZj^byJY@rAEFK4Q zGOv-TuD);)Y?*>3yl@^kMMxAP{*@PUsl~#$+bFbNtzGx1k46XZ5|f4Y`!LB5KfV%~ zziXbZdPvC?`W+M6eTptZNh8cwQZemS0{sO*@Dzqp*zz5p#hUh@_bmvu;mqE`}Khw+cOh>bV5y;oqywVH1Z{KP< zxtmvgUZ07ytcL9@X`*g8<)_*eF*2TNgy`O>m@xVN-3=KCewlfN+kc)Qc=0{%oeq3k zvxs#ej#09U>|j5xh~gIF6Dj$rQIG#bVDhB@L|~BIi@(@LuP_qolMZc>$cm)ScZ(Pv zXu&}^N&nHs?!@i7-Y#C-nvTQh{gwEif$H~Vhtac-HPJKHF&?JXUm49qzYOl9z7uC9 zo5v=|DM(=uVm?h0w3dhe1{M2um*(YNBqVU~ic^A&BY?S!Gbl8d3#3nH4nWtzVKh>D z&|hW6ZKTQ)NE57kif*i90$;tH-Y7` zm)Zzg?HM3_j{;{`Cd#pUp|c#VAgm@z8%Fsz3m#02qE?dCe_uu+_b*20j?Qktf1)Ok(K2X=U3=Wfu-pEowW zkbZ4=y0O_~hzaKr9;mG=LFT5FOo!mpbl=c0^liijrTO&l$>0u$Ft{;PZ zGMDID;dx-Pn9mDOp&n!@7l`|fZ-g_g`W*wtW#5p=UkeLouDUW4(~w0iWB|vL|H#^p zHd$$S@qo058pp0wHZJtl-OW~$$S8c8KU6=TtPC3w)r4nw)uM>Vk~`Cj)9(>@D>0yd z!vFtN5HNoque;MB&M+46PE{{ zQN;v0MFB|T4Jo{G?o&pYybT}6<3Zvu-CF-pUjKG6>s;~*!pMdW?)cl3X^nKs<#8evONZ(! zAD5mRFg3sUx)Y>f#@r@6&sjR{O;k8w^F8JUCEA*qtR(qg|HEX>{&B~4=Jkr_T6SHx zWFhLnMyWm4@RhkNn+_ZoE2r4V#O0bT-OAvc^tby7xPV(%2CWvU2`NrDc#Eq2_*m}` zc7uj)d~UVRF|amZF3t)0E{-+2x2$WG~=KoiREVYkK0exInU{pLAJ&jtcV&6%#<>dsIQoISgV957sBj^1L`1z zTD{RPwOqI@X~e<=Rk|m(AZ|I0E4;QHB;onq$m=oD{0Bq3ew9QHdlSHAptvXe zN2p~0`7s7o@3|_*yK6x6q$BC4$7ilHQ^y?Y&$LH_1cOa4 z#{i_jlB(AW!%U35 z-_5G8)>%DMY@sPvIzX*1-T`#1RPaLDWmF=_y7_HzD z%Jfe47UD7LCzIBGBpb5>uB%={R)47t7@x}K;z=^+wS;{rKt7(>Y9xhBVZ9Ko~d~C_0SYqsg!Yz6(n{?oZ2gk*x2drxP~X^xiQ_x%dzWD+izjitl3v zvsN0Z^6%az0BF~@W9ydOQ_x5xc!9KyO#a*5Xiy=q-0Mhydr&_l*ilv8wr^?YV5S=;!URe1s1+$P7?7 z!%jO|Qm-k?!0&VNoWXy?t#)j0>vZ-Aj)mcBr<$=6o+uCybUAkr-zOV~f{dK|ie$eo z%ni6`?BAV9zLj~!DSJopPs&whYg$BCE^>^g*!E*QFel2l4i&<18U^<(zX~>*D6j`e zUT3bP%eR}fFmYyeX5)K^(zADx00NEw2&MlhY}aj?;dAVBbATMobFT)PFGfpSYoDw< z7o_ncQV1?Gq;m4x_@4?|=MytgO*h-!TQ77}`QMD7PXed{KLh4l1!B^_)Lr+d=}kKU z=2O}X(VZYEDH2!$zEZ(A4Y{vEM$QfwaWR^^Ob_bb0x{1`9k~Bm z-vy{`oRZ)h`e3~4po(tT%W}XzJoq6Ubi&;wYWTSS((jA*^u|L4iTkmdJ(qCwb9ZhM zs$ao@MeXtDb2c(vnUf)<3v(nvti{Bl@krZd4Y1L%MM=-BXO;_Y3Nv;gb_y%dXO+I} zvqG&{ey#1B0!MM!oLg5^9h8o|)#dv>K7T+8^#kP|Mr$%fM~} z@{-fNiIz&iA%7PVkF`mS_;Yi|TMa-~Ik zPq5n9d?yz_jJuNpmFSv~0!Nz|tA$3;XpgQtk99yAi9S3>b2G2sDhtL+5hLN8oQB8O zlOzv{Ls1zNDpYo(Q)&|~Yfz(bcWe2>D;Dza zFJ-R~PGVkNhX*$Y4H!d$y_I}(MB&>x17}OOqR>#_%z?xcu?K}~pKlC0DdDY0gp;qTpl>Q++=LDLkS&wqEt*ugh)o+ZMGEQU7aM;oMZg)6;WpWh7(STt2&Dp<9x z8*!U0zIXont)R&|OJ5<=SVExAeve%)P@`TY8K@?T8)?;G4ZhnO|%ZjUFsF?ns5&!=yvQSbjwBep-U=lvU) ze8ZJ`CWBwuIxOM|Z+Ilup714oAZMoC_o}Cs34otuX z9~a-sVBOip*~v)b%E$#63Ug1nfB=*Sh_skT@DoNTw!lfHLMG_R6qu`KdGEv^oCP+^ zsS_5oV0Bz`y;8LDS%qdeS@`qnl4F%gt04lDHGMmOh}Z+uCD-|^ zZoDr&NYW7r$=DC58)u2K(+ya5OI_L3#xw7CCvB$W&LqUfx0C@=zD*WOZ{d$Nuft=L ze07jp-250s8SoqXE;>42`4%a_ZBP_W`!Kw%Rlg#8d% zX*zc8J~}bb7BXN8(`&3E8itCoY8Ok=b8Z4`nRS2CCU)Hq%Yh<4^nIH}9*GH%&?4sF z0??BM%%B3csG?(=ogn4{GpB9Ns6~r)%{vUXO#4eAms?PiFgbs|naTJIE{BquUEXW3 zQQjjRXW~=l>)9)+9+~yRL-AO!pru8`je(cWBICpT)IdzK$SsLHZZ6_jL4?Wy{8PaK zAb$Y)UD&!Qpp^YO&s)1mYAe5b&o$(29Z=(>o6DDNs+%Q2DfWd^y$gk09F*#g?F>)n z5J!(7`(N{B8X~FWgu8P-FQzTsIG6rUlAinD$c&uG0tO_ZugnI^A0&hCHzE*#8n5v< z#e%MOY2Ny$npNA`vl&&)e`b?$s)>Rb1El#_j(suY z5mML~;SYdd%u5Im1f!*J`*(bNU6#uCA#n)US$f)UW+MV@X*C`VppZ^G2*78lqSwAP zaDdeufspQtT>^YiXw+0#e!&%|yp3HlYmcEe*rk zbk1rDkw|Z3g1DC;c#yWmOkKF`qiryEvnZ!u+`T_hEpL{FIZ56W49sO#Sp3O%-`>1= z^+Kkz^gfk+G~a>~JYm=vM`4H=+TN7Rm-nxg?wQq4!$T2&+0Wgc5gnhDYY zjFYE07>_eI+|J+O{(!0#Yor_kS<;QxrTWm$jDRn&854%!MrvBB-Sxqv!lrXTJ|5V7lnLqS%O{)T-D1X2rfhu#BojPtv^(|3_y6qA zHut{xc!0y%K#3JH&mXnS`+l1ko}(QKkOdN+q-_CEU*s6f6o|IWgHO^wKV*H```c1 z{829U{9@%*p}O@u`t2Y46!_ObfqfA2hwh~<6C63QWX04-oQau<#;o}|>;y8V6la>% zFJgL}2#lO7tn{T;yj;A{Mp}NS`MB8kc^>)!7S9TzL>+EsMdiyGas*0w10fEANm?_+ z6+2-kv}hV$sobRgm;V_dDd*Z_w!J%-GiL7JE>IJV0INomlLL!hSs%-fw5%V}R4q!jvD4jItUQ zr8G8ZswzJKY3v_7{5Zv5nxyjMYb5kLzZ4IQCarS4Z?sL%+Jw%^taBGGZT23wCr`__ zkCylG%C;;Q(;RS+CP-j0@pB&JhXecq9~@<^wzBGorpDOjqh!pwW{J1MI5d`NLX2U@{grG96ake8 zBW)p=XfiNaLW$6E4|o6~1pG3C$1HkO-CZ*)9pTe&0)7|elzRN# zcix(sP@{8%CG-gefPuzkS(tTj8d!=i2VPya6cy!bf?8yWao$=UQs@7cjtS=|gY zoe6}TJ$Y^O=-%y(4Faqh0tk*AYeTxDfv*6?ivY;%sLQN(U@cJwhOO!{P5}_HVbGUp z58;h58WA80V7gXhVsSr2$EE3;wv>(r4*j&jJACHonfhA1o5z{P8avls+kEl&Kbxqu z2g}<#0HEycgAhYx|M*uzfqfA2`|Kz}9oLCb{>HIm{4uGd<1kgpFdWwUQBTLX4i>NA zMXM>O%j5V7gu+4hxpF?OMe6t0j#2Ct9crAwuZ;CLDb@nmV5u^%h8sQ)s{WBGBYq zX!)XC2}dA=2Eym|!+Rr4m^rkPwuokk@MV)Xle7eK970R+vN|Y7nS`6=yy`H{jMb4q zVTLi=e~=(x5}1`Z0Vefd0x<3jpI+RY`hEZ*jRc?xbl$(8nIIbEdTA&bfpcg> z)!ClZ>z(_cqr*MIm^Q%2Sky-Cv;5So--y0bJMY}OGftn{Ho~-Lp9z*$ewKJEbU;J& zW3C{Gg|`P7XmfV`syy0`m3BGRqPxDaK_0L;#|#hs;B4!!(sRHJutj5ZsOQuNDAI2A z2Re**!hWwYB)vCG{ifXkg+pDF@r--#PB%IF9yr=NBXHR4({f>R=lW+8rv8!S+8V)* z9BB`op67?u+0^$);6cBkEfdkrcGIPS@t*PHxR9gna-MyWPsfFMKhbcftZP<;+eRkNV#{51mF5iVEbf${K?1w>BC2hpC2mrIqrAAM9 zG*fktqoZq6`{!Hw9zGCISRv9U#sndiKx7IqTW4}0447Jg9%WmqdmP}180ai-pvA0~ zs{M;6x4K6=V&%)y&dgHH`qzPeRwb<%AgzwbSy(uHj;BYfkEPSkEBi#teowZ!AOQ&U z@b3VaYGC;XmilQ1ef84i&HXnXk6+7A-~W8`-cLWCl@uq=(3BOJ4!|rAd)=%RGpXj2 zO0ce3bq$y{3?ONx&eXBC>2I|?*7Jgo@jamVFhgeoSn*>H=ZA88b0Y`IX92PcXER^5 zoEXgp%qXNhDq=oEv)ZYTto~uXIs0&F>1MWH{{hL;OVKf$K`nbR>yKAJzIY_F)$7edn7HFRWVFZ02u9srOjx<)B=E=E$XTFa6;o7 zL)8&YXz^%125?suKzPP(X^ltPJS$@WI05^PqXkgzy%SiFQ) z2LgcR!-^35qsUL07$>HwJdspgqh)zkA6W6i1oJQfqFi~2R7=pRm*31T00_U2(S~Hk z0d5Fhvv$ib?M*TOAfZ(D)qBmXX#C>lz0GGgKHvQG@Afu7{L>$eCdATX zf(US7HX7{=vqArik2z|?Qm-ta=^D+Ou|gB%`*b_BGV_cnCjTPWo<@%w*KY(w?lvAT zFS^}t(>7WGKxguiXY`NYG*bg&J110c1dNyyEQOvi+Bdp7%;I-5V9!rKPkw78sk@KS zB$fwL2LPf!rO+=W=vtrZa^`fQ*8@qk5WqQe2C&Km<9^VgabF$#ATpl~I}T4jq^&qr z-hhS3qsjmPj-RWqjs+AZ556WGkk<;QdoAJC=i144XI2PRCw?C^GBjAUkm{*#wcE!v z@9%}^PVkQAr!>GvdX!egeBM}lwz+)yN_}v$y=H#4`Mj7i&hI|_Wb@;{`P;_87XzA^ zi}XRS!?*s}^{;RJjz%=Liw1)*zx8d{ui)>70{bB3cSFa2S}t)4pyPNdR}mJE@#$za z%cWMbT)uE(bL~dpSj&JRPGt)$DY|6etK+Opb}c3=+`5I5B99*6eCl9(3PK z-OGLS=yGtBj+ejK4+Njt=uR*#0b&-rfXICJdnEopt#x0ej=}i10yZsBFalg7ZwMzpj8`gJ0>WaJfOdrj13ob4}s8qJZOm0_;C35ec_CIfh z#f{D8?+$Oi`-8VOXN!W{6g~mV?_inXtiZUNkgRj(Gi}UXq3;i5hBHQ(Gk^{&eb7<_ zUF8y#00VCq`Meka-?_dxYb6rP5&roFM7V{eK~w8GgS}l6^rLkabK*aIhvWwQtvx= z3y$~L`*)}|S61Z$8QMsrM96w4wUfEbw-r0gT>vjX8(OuO4SwSQpycNP2puRw>$Ap) zxG0f(^OryVo6V1Z@>5W1#>nHwi)O#56Bbpp@Y{Rr`dbaZ1Y_HN`S#X&zh=J=3haZB z-v?Fy8+jf(9CK=BWxMaw!aO%Sz5s-rKD>G3d;yX!76Li}!s(B5W_sS3B~|aUQL{fa z6N@nc6FUJyFcKEb2$n^fZDM3*oZ=|#W?myyIwy2zW*H;I5FGP22Kyxd!JX&ouQGjC z|51kww;XYb*#7>l&q}nd%P!5XE;g8(qc1Zj+6Y4VU;x9Ry-ZDya@so+a5J-Kj-V1J zbhQz!Y@kNdKxAbkgyQ>^m@xrKS?{F zY0M~|9iNEKmv*4O3l#9tF;6{QpEG%kGjQ#=*V5e^pWhr+yp7KYFXi#?;QUB%5nzO* zw&}mKXU=ZkZg*C7P`^h{uWx?&$tRt^+`RMM?`%$=Jztwe{%ooCg^Lr&>X)B>mb0uK zc7IjtAV2ARfR58v48X5Z=QzG9SDv2V8$`PL-(O?Gc;6GVBN{S; z0qFYJXgJ>ZMla8z-wOa_fKni)Z@wmW=&i7+yL1hT0XWYO0wPRAwWrV1?g3YR1FU^@ z(5Ur0e-I#Xv}tKG&bW2pUiEpIrsDF{Q(e7>`bnN94*;EO-se+kh7K36hhNN@`gQst z?U?cR;Nk7f$$;VU<9%w!qlalmKKo+xUw{0!X_fA5PBku`R@Tets@*go#^JxFgE`6= zL=ymQ=pLQXZQmsGuc;^h`E}g~A^&{Z{R2-9tL*+_oL?nuhI6Pb<4>QPRT1YCoTsgD z34~m`d?pRZ>7Gb16{+*!^Oo7>6i0B}DISDnfEen@7}U`iBcB1YCNFJo>@eadMX7W? z0HhZiC^3t7j?qg$D)Q2+n;9ftOjCgmQ=Xe~?q(@7<8)r?0V==VoZa=W8o1KEa{f>I zxuu&*b?oZll#o&FC4h4945S@Rg9Ky{Zlg5_BuqSyX!eYTBq2jsF}2*u7leT4CtyDq zVY~T0NN?sp%Uv^d$RB926d0I0pD&JrIRYy4og!$`f)F59zB~pxng#O*aNYhTM-mK& zW2o{v6_~JF@i<=AhKLdmHi=zp^v&xZ|%jZRzcDe;>__oks4Y-(`0m(HcIxVlpYGp}#s3@mEmpC8FAk2Km zOj(+z@}-?A&l&49BHQ`^D=#v`&d-qml`0SA zGufOtsYaF=8-IL1GWDhXalC9NT>c$Gw(Aph&_BlcZaMfG^L&v4ywR;fHWw~k4IrLQ z3-$i+0{$vum%QkGuA!L#10mH75c#$r10HLy@y0X6F-2D4L|6Z{@x{}&Aei<`M9GHO-JN zW&g#@yqE+jVJ8guu{`Ig!XIoSB{IjXS?n?)981W0$yLE@?L8naK&q;=>z`Ym%w zsSb9bwi)5eXf2xUfjnWssNlzdE5h`^0U9o*xBO``$)MYrGn`FNH_lejIGPPWj?e`n zFBc0$Ij&28xa-<_xO3lA&uC)r8f9F*ay8l=-F)`x7n=`0_;A2B6QuW_kIl{7H)rKs zSt8dQyYF@O?fR?lyhm30_-%iu-C>;Wd_wRo$Bctrg8lFRE-A1NLVm~Hiopygk0B6# z?7IhJdhEzTU0-Z@t&r+(^ts$p&BM+7tdlugRMYt8EYUm?Qy%{U4u5Ug-PUWv^u3sv zy$$C$z7fy_z?q4Lnu%pAz`8s$KT?9AA#kXlM+M!@moap|F+9S@tWss2f90&Y1k#qT z&AlDtUnJ{H9x6S@`tDN4?zbmiy}vyx(Ms0${3w@QvX$AK?*yh&)Ngzk5;BCFnKZ{x z0K$1G21F^_`QtsC$(d25@(xT8w>JJ#jy9T+bN)M!nTMn5xy%p%-fR+ijQdSi1l0P(@k z?``gt_O(nr_6R!08M3sVp+WHCTMo=mV#Y}R29E-{XorTJPuC1pT$4a@0`v1bH;R;+ z4@BquT8wouPsV>H0IY4EHJ-*~m@o(Oj?>6hHV}Oh&FdK8=Hj#VNI8--VtF4turw(^ z_2FYSs0%2_?aP6&@E|~#fB(Vz zX+ZKJG5)Gg$Y|BQIvMwM!$1A_=REU|Eagc3juyrrlOOHS-JFJh{lJa12Tj(dhQFR- z|NOr<3haZB-*Jy(h>IC7hB!~Y}S6ZI=gEy~k{_PLmZWFmoFEKWaH-sBdkEf> zX;Lt2`-!0QEy9BU?%Dvn$|Vpi#YNxitw;a@)RzrpQEfY@K4>M&Bi@rr;W#(X>d9B&>i$j z9CNy)UAkX%*#r5b0jJBP`{YO1+eFR^Bft-6rdhC@ z)EE+&?a5=yTpMSl?SBHe%+a~e0J<&?;D~m{8Epi>=Jx?YJVF!TI?uDfs^{(W`|xq# zrZzlFV-dB+WJ1G67&65e1HhR))|*hi(E*rv(JM1%oXXHQBiz+RNu4WCjSlA1QE}BJ zFw^}ET;1!ugvJCK97$`rJ3Kr2Rj-|y(S*YI;tcD-6DLICt?HSY?%a7Wd+>ZvWYoVd zIN4`uK=>bVpq1BXiqguw6icXs4q>V{XMfl~M;dQ5vSRBPcgFJFv>#6!3-xxDGd+0q zh_3a^c-ThW{o}uX3haZB-$Nf`KCIP8auhs!B2{=nk2srt_l?V&w=SRGoX9WaS+f}G zalRddAg4;RdO~0r%a)5Ai&;7_8UVz>*JI28F)gbN?1jRq&@P-D{_H4)*p{l)Nyc&=8?0-AZ5U1-C${^!*Wjj$h>p;(xuI%`dDw! zSO6tS%!CdnRFPGiW!CR+zV>;cyg%K1{Mn7otya+7Ypi|IPO!H=-!27i{HQ-u$g1Y15nWC4OGBl0YK)H<%q05#(Yygkzn$3lO_Uq zaLqe4V>uvTH|4#(9E|H{z!l;-6hNApx0k>es=Z`68XIk67O3nH@U&+%A6++2)_4R$ z(w3xM3>nlNR{R(Zz?2F%wZZ9F8=zPz-69atv4u3Ji>M9<7JZn&~am0qw{D1&p(UrsQIg z7YQgcXduP&KEo_yTm%#@<%xLZe0$Yh8Kx+`%sQOEdN_B5mO~!vtM49IcfR^wzogqG z-9EEDUk~?Jo|9~)omA`RJVtY*Uz(XK|71QA($RMDI{-ilPWdpq*yi1;CjE(t8(%aaHm|#p(=tGG zsZ}qW8wqtzlN?6Rr}iIHN&SDRD6Bua|KmdH}aUr1{_uX(h#mOqVLrit;1Z~G0}JJMjOu<6UK_VK1~Z{#R}RQ zZ?2<1pCBq64zEvMW{w-$8r4ROvKnVVNcTq@Qh!(U%6lc53-eQXeLM#sQQph|F+#fR zx%Cb%Kb6HJYUwt=d9;Lw+LG5jK!$&aI*49uOg;^yK73F=r6uC@x>uT;3GxT|i%blY z+P~fh0KM`Zm919a^kHXWLHB4r#Fh~m77*kIbvv`*I8`^^rk@&vtHQfVf77+yFYnt# zfqfA2wY>pUEMT#2d#(RrWRA&UfZ{?hy}Xv@}fXNHD3CRlOBj>Rz7@43()48!5-FjXhR>l7z4z@C5_A zpF^ZsDd47$AGXTF>Ke0p0lokNLLQJ1ihVh7TGqQXG5i~-md7j}^LS_$I32pv6f+91pmljUGMJJ2D!e#wx#z zhiF|pXT?wDsteV<6-;R3_W+6L%e=ug8gyeBjuoOjj5zo*ef zdOiv>K6`Eki<&itfE%=YS>7{a?_|bsxq>W|9?k)xX;^>P7>#7kMi~Tv=vbAwGra2#>di#P9w{5qQ|ZV z&n5YO(+6I1`P#L4uSU~YA3bi2jiYJrl4Hz9UP`~`@BW{C3Z(hlC&I5$0H44>7`=b} zc=giuEsEugx_9rHU0P$}ai%veyuSIj-+LqQku8)-rX`xE5<2Yv)UF3&Jb=aHW}{x- z(=mJkVX5b1j)55r)eB4r@F~Fb*7YS2F*brRjz5eWC^2i624mxVsJaBisGFVj`&F>< z>hE1sUHiIYUHu1GN4GHIVb;}E%3V$~s~qOzzT8(n>vv}}joaPZmae)nam$2fa#`Z# zG`Z5NCV!i~m!v=85$H(ROuv;@I|xcDZ8QEe`9l!KcCSSTCLQ#*!z+QOKR*5J^Kl54 zo~8l-5%YnN##os;tacf5LG%sg4M>{dQnlql0E?uVD9cQe@T{JI%fUW`C=G{wL4c8m zw;xX4fFz)#P4ch@3n64|$eU@xQehetpv$hSKqdicbpa<&X|3kDkT!(x4E6lu%rIwb zKaB|i@Orao`I&!OCQC>H0ydXJuUW-1G}lFGcLHd}63v?NMQ9t7($4wNZOz{G8UTFw zk>$40Ex|)bqrW&c$9o5k)*s`X*cjbuc!YB2C&DrKfjV{276ItW@g5M)>M_8t@}2MY z?*X2exuiADwP%44zBc7|Y>gANn)=jl-q|ttlxL64I5mFehYA3{zA9(>pl9ZgxVHK9 zYdtro{V9oCuJ1AhcwAYj{Q^dOSC{f~7f$KP&exu#Y1cXLf>E9%DY}8`mXJuyOf} z-JOsb0qQ&xRX!55K4?O~bCy&hH8DqB*PiG5LEzCmJk8M*ASshC+ch<;EG7kI6Kc;x z6$Iw_lcy)9hf42e^rJnA35$w)?o8!Gdp;kuA%rEm02u&*cAY5=p?#-kXhndOo!Q|I zwR?hyhQ=8496@E(2SEim(0BngOmJ535Zs(UEl(DGQ=q%E%ya5r8_%AJUI|o!@N9IX zkwAw_7u&MFcFGG)g=gnWarkiYWdMsn17}XOK2hK-;8#F3D>}^FeLR~QCIA+_dgjS# z+63X>^G%Azr(69c;FflKo;iV;#%pB-(zW(QI@@z|R1SZar_WZO8@B)mIvVHl7+TaO zniF(Dw+2uC9Du{B>vJZrz}%_!ZaGpcp78^ThT|w&T@I8N9}o4nxgDogWo?ncXxv^# zFXO6AXWY$fzDm*txeg>5uhS5Naz@!|#@GEtUv%^=4dD2Sl=s2X_+GZh{QL|>}E1j17n z+O`iq_+T_zyMhFSTeMJ{SC3ymfA!w}?>|Ng{1JnYS6dH)2R5&@k=WH%zMN0MV6uD+?kC&`8ERLn6i&oYgb=sR z|8}6{XsUa&MJ9q-nGxd|HF`4&!okefstY!5s(31RF(WW5&k$6**(wI@<5;uRlCp^@!X7^dnl*^>G1fhFOPg0>HwY7H2W@ zy>5@sMa_(+_OJm69OFzGWjM7S6^*w4=*CeLh&UdIBiIT1p*t-@^a2JB7DAl!q<#^z z0?19GckNi^uCaHjoo5BuBG_q~)a6L^)(=ck+UvNQFnbcMnR1x6fF!eNAUvbujB&JG z;9>zp=QMo)yfLnxW44Rl&#ROAPFt&&G>-#wdnW>Z_0^Nq2<{*1ogv_9jeOH!@&g(W z(z!vi#w?9Y%>zVorn2OI0B{%QT5SMCJOE%w+~2Q!j`Hud!(Gb%(CPXvzfnk+SB+QF#!+xo_A6HHv*<45ya<#o*u3fp5 zFroRd0^;tZC0v+bFmS6`9E778`O#)-R!mq;fGJ_vV;AlvzucDJ1d0QrH0#66sox1I z`+H!Nv?asbVrI@UdNWnb4YR^hVWzJBJ~A@an zu!-i(9#$36Anl^4(n2YQ6DVH+faUJpZ0&uYP0PV2TtC(g8@zos=wKrOh~DsU$%kEe}OU0vbJO z(r7i5Gs`K{YRKbw+K(AajUQ$dw0zKpb$$yePrG-#RUANzzBpL4*ki@ZF!pm7*^cF9 zUL?SL4hLYAiDu6A70t}CfZG7Z`r~kQ1lFdLsw}!2`^Ld&3acM;lymRS?8I9-=Fr_Z z(udx|;|Gfo)9+`|_1KB!fUNJ$)V-6;asxz`qsNRzbVVbF{C3A&ug~1t0gx576$sv? z^IeL*v}>EqJhjVr_dWSaUOQGwTA9pX^}~msd=wbDyLsb{i<>7goa?vl)+aQI-Y50y z8~74oRvxb$ub$FJeaYu~qgK7uPhN|cjU#L;mc}F$O=xH2scn36E?>U1xjLG*wesox z_utRiw2c9y;WYG>viE=PQ{bOUfj?pp@-?K|ag5!q?YY_G*z+^1aJWAnQ+TTYQEy$n zFu>z%F#v=CI7yJC)i8U$I|7dIIbM*fw)gIuRW4?|W~mtei8%FZSC38gcy{-vq_XUb>Fy?T%M}yQVDHhS_4ixR06H zZyc|(HaA~+?l;n|d~q__Nh|-Q%I@1Gb@D@kx!|H>oy;w#>Qo!%XW!b1sbZL`Ri(71 zGP>?foP@xEdogSP1DFBAsND}WdnZDG6EPX&1;7D@<1m_Iub2;a?%$s4qJdfgLdX&H z0D-YEPNnq=pAmu(fIx5HfNu}s3Xq6&s{Yze=#Cblb9tbGnKzJ0;M(oh5=NR8S^&bI zfPOfe#I*-bX^aV4FPag6iHYt;%Qmevk&iTAh#?Uz?lv|69AnNn;TNLKG;$nIl{v9w z%9CU0qnGU>T-k(oeRE51e}Lw{F> zJYOWBfwGs`Dy#;Q@OXLCYCdfC&NW$E)MNaXdgf4p=ejW0LcQH;ys2$_g!*zJi$8=d?t{v)G^wp-_$Gd^YoN3%& zDN?iF#(_-|Km72cF-gAc-O^9ACi_ciw?y9m`e##MAB6lm>0Wtx*DWu4e4~G64<{06 z@3hP9|M>ma(t;!uLM3LJGox-|@Z|PVrxH4Zlz>9^7a^qVD}h;1JIj+Amk^w^F+eZ2 z-^-_CsjD##0+2wlZ;WM|7ur)sxbF!@wgn)V>C80y-o?@Ij6DCLV|RB4PHN>Ai0tlo zddpz{fs8yi%yr9bSJEBh^}T3lIVle59~OwGJ~ zqA41AewQC+YsR=!`*R467?Z~C#Jl$Dk1??%+#jTsdhqCe_IN?c3YVXzivX|FHV2Nj zOf(P*tdJfjn(+ey3mh!p23m&MHw9BK4UNDNzjMlD-Wc!^7-hyWUerfqT;qTcIFi6t zpD`)*JaEL6ryrSSr0Mds>6G1E13T!jgznlHZD?86(hg9~6eNZW0CY05q1YvMzvZif z2D9oUP}2N2$I+wXOEPr`P^j070_`y$=?|cQRnq7AUgKLN(XLy4VoLzpAmtv;x8ZsF zq%a>Ee?ZZ!)Jnj)&)j2(FpFw@hWxu*MmO4cBm86nP<_8rei=yla>i)SGakxXgK{o%!GBn zfN~#v_`&8@T94-*Djqc>R-zo6Cz$mOrE+F0C*mwu+jxn;dD1brHlv@GfaL7;x!yO zmruY}t&XW;3;@XN|I#V=GdKgqP2@@#so3N?X^kg-mjhSuJ69*nQeX&FBZEkXP0Pl`SRqq{MbH&?xiqW ztYWLqeXFNKT}Kt4Fap}d`T#CBIH+}v|5EGDzLZWhOBf)0u(dLE-n8Y8i>0w z-a7^NIvQ}C*?vM#`9Pz-c^n{6LxHzM0A+RoLPn5b>Z+T7IFac^c~1i~2b!q^D^JUV za8$1m0R8s8OgxqnXO4*Wo^y<|Yjx5t8yFf3YlRF=$jQ-y02k^>ivsW%J5_faQ?-2n zM)!dZ#54PsMi8=f{&;*#Xx7YlrS}~05k-f_!rnh-52$YH5`}{#SwDc}8 zebHnb?)?(OL}08bt82_~%0k;7n(*_UwSUda{3@P4s9ohrgE&N~Jn=Y~tENqrVYgnt z-5*fzXV;y}7bwy8Nz&D?J9=_^89a)f^SC!<9dOMYN%$wV$I@|hdsbi^jr5I zZ|)a#&Ym@gr`+As(Kj>K0NZUgUw)P3L~&d20=_Z8lp0vhe<{FmCZ}CmGJusMG(V5~ zAKsr;SNcaE=;Xy0j9tJnvEc=M1$2E`-u{n$3j7l(@W%y0b}u?2?cQI%U%l>pdmM?O z9m@~o^pWG6w_5e`KeX)jJJ-%{&YUb>LJWNe2E3s1!(7b@AI3}xLxR!=U>H?Bb$HB= zikLtlobKf#;CZTh3=Sv(TnMF6dH4MD?$4|{=DO3G05gaCoH&_@D5q9Jgc-){7{ei8 zF^?~gspD6T-u+t4^~+TA!&C0B$5-2aHO1GReVJnq$GC9h?WKQ+IsZmZ+h-O{0~-kr z!Wt7NSkP6-mgjqb$M#+o9g|D*`QYc2Y8Ee{R0%*e6jJRJX!=oOMjr<-gETz6Tn`e ze9VMtLY_Q*G0&laek5#_X{_tp0VmbR&z=2gIN0yv%$4 zm0T3uQ8v?m>ObRbTgtk*?rE?4Gp?tU^|12PYFDqSFyr#0b}qi2U(&q?k8?=AzIp$X z>(wXUnBMg*0;sI&=K*;%oF>w3rvNr!r=QSLNwuJjFfWd8hl$MTzz6k=@>P$$lV0j^=1$9I@n49g6lSQH-^@W zOnpxw{UHtEI4fxC zFM^DThL%iUpE$mviwagZd(lLjd(ZBV&ye;l;91)8oIb96E2H>!s0*;U6D|WZBicE^ z$@&l?cLK1V6z}A%HmXbuJ#QZv_c@pD?yK*Bf;&^z!We(9?|6R;Y>??6N>h~VP<1GBjlA2@P|kSL~4(A`+f;s zcFfGaa%ltfF+UiKgY05GekqM9fa~Sa`mu7z#tu-P*kO-g|`$Xgh-Fr+Ad1e(S^gH~y$7@W%l{@P}XdP|v0gpBKxYUcSB#_lZR|6W+wY zFK*tse0KBh#gj!pZPm;9gna@?>;#NPi0RKh|6-QeVwRZV(`|64iPMldhW5fMXesES z_KTrL$CwDMC(Vipm!tDUMp52x?nXJ^(P9E~wX7FX& zsGX=bR0>LaET0@rMl;Is zAAkJI5%#Y&wr=PAc&m@l@an1?1VN@GC_L*v5ECWmW7+ry3?*ktV?Eso) zEcRO-V+!eAvDw?|hqQ*3qke?%GJSE1wfe^lK6(rY=(h{?m#DN4o;?^xUXHX+TUoZN z!_%j06`A@FpaYs{C@OP;*>P6woCC6J%2fss$~VQPeN{5;hmV@2y?Png^mB^$V~z7E zqx!7(uIv6cUX259(0Ys3we=3HG-dGBA*;FEA4mDA7r^=#|MjmoKl$0u+by@H=1oR4 zf8`%o9gRqwtFiYyv(mFincS+^#fw)qfBM58Zm!zGp%q7mvcEhnzQ}v;f3W#+n;XW7 z;y>$s`LfgnnS?ufAu57OMvHIn^@4h_@ z8>6L`_bhd~55a?(Q*mS5zzv`SjNlSpR>H~;`Z07*naQ~^88 z!aQfzz4Gh2|65v{<<9-uuKk!hK{x8{(!D4!oCoBuih_hOaq!nbTQfX;M2iFf#N!7D zV)RR6MFYV{K?K$Of1>q787-akX9EDvwRhrZls_lbhi<&;c6httN5NvAT5N3(t_U6_A__y>fU{m!00JLAi~Gz34u#Z*`o2jCt# zdTM-;z=2OcyFTL)DA5lIF`IrHA!j~!NXmNCCC-e;f;+TG|ZN{_rLH{TZ z&;wNb)<^)u{y1> zuxOxaU7BO+Nmv_@9j=SJqR(&KXpf-!S5C*P1AivG2k?DXMAawTI3efGU)cQMyMNN> z-Oc&HXbO<=?fCSw&o=+{um5`U;YS|^P?kAMyARp9IhD$@>Z~qD>#rSvQ@#~5`fvbW zWsehe^xuKUy|j+%JsKsnY=X27yk<2_ToH$0c_sqoGUrmRUvs+uXP*K?fjZL5wqK~C)cu1nht^lc)8gOmBxc+gP?f( ztv5#_g1MQw5*V1YS+6CG%RXNfstd*`C=}+f?B|Q&9Xz;}J-Uw}e#!l&>(`$hqjWaZ z*>XsE%`=lselZ>#Wd8Va4v_hdy$n-Zza^Qx=Gm2YNB?r_ruw+qbK87|5yy<>wF4rU z`*5wKfO7oCFbR|qcH3tOYl4beS$+<5@w{@zTooOk97#~-G@e|Cjp`F^ zL8qlLvD~>0+*&PgBoJDyW=S<+s%}gz{3mS0Mi6sk1saywBQ<|QnK03xd>BMD1tir+ zzBDJcspt1|C`U~9l_CAHXRS~P_yCy36TgTVCkawZc#RJZ&VbSYu|pSUIk<79?|)I` zO3$H_m?zR`6A;2&!!!u6+)vxVymKXRqupl%9#%%&Zme06GkXyQHqb?oG-S0yGtG=~ zK~sY6%2nR6*{U9W5?G}j^?v0rx9rB>4nWrWsVgmn{B`W=t2)bA-i^Ud0SD$zdPgG!3<|I^`!A~`_|3PU;fQcHhf7QJh(60Xql<0 z9lQ@8Tbk1?S*KL_)1nyDNHr^NBxo{LFV6FG3I)~P1M$YAtq40Bh%xcf{{vp;dGF6; zJ7e@_rlSWme3fyirBv%(1T+^!TPgSd?o(h<;ExA{Xm2mXFpv?N2`p<310<$K)UnTd zY59M=`}*b0cV53T>P&*|^Blx51~zgp7EJ_|u;b97=U+M?jy*c2Gpf>FO8b9sQYADA zrdZhc6ZFPUSmtNV9$lB-?34QcY>fX@>Tcx#9+8bHz(ngTIvMne~9IGfhSdoo}skYyHp zy!*^B05Bjkc{lC=3w6|g@}@yp_xNvQ#;83sha4#Rbxc3^J=&@2@YVAp>!Lif;sS?d z>~slwfTi}lC@<~U%cS6)9len2}wx8KpC&58Q&$O(H3 z(OA^h`oqehV}-3ZK7bZJN;I*6*ITb$*<4H;W>;imnMR5BZqaW6Jbm_juYHOhH|gTr zRNM9O&XqfW-iX+P{8g2By<763_D zU{cR2Z)UHRgE`SFd6?=5hJJI_6kW8R@y``xY8Yn}@Qs?F8 zWv+n!FV^qQdJa2}-Szx7y;ha(t9<3TJ}bkrsX=3@k#NePX7AcOyO=zIHsGUg?HHgX zFjnm$;6_^#ecaoDgb~8c;EoWgm3i<`!l^zaWC_=$eMsnZE*!e@>^4hS@3oz}u>dsM zw}p^rVquDSn6_g2y|UeRP2FhAXd8BI$K(k$VC2rdyED^e1~9ACpTMovQtG5^>Ev}O z0dzh=p|5VFF+;#($LtuJ^K#cJAag^??s;|EjSsPD&Ye3KxLW(v075i-?(0_VdsXA19|hx6?$ob7R5#`?G}OO< zpZ0XRfTgnC>)(@B!DzR4#G&7eXMQjGh6cm4oP~`Ld8)ZX-ujbYj(U#HqA|i;DQKCz z_<#|P@-V!FV&9_`>*Knd__3Ol907O}rW4C!Ku7@bh|v##tn%d1|BkLsHMVupo=LR6 z=l4oAH1o`&U-^^{=mrSU`hWdj{+H2!8YdqHO8)-ue>zUb*Kd50pue5Qrl%|BSbG?q zG!6n}`p`z1Yggs9VIs{N2jz(`)I?`AGL_|>z`JN!fz+E1pKWg3zd3$J0IC&N#)UNQ z<85~ytawO(&GcRMUv1)$%;fV`yE~aS)&Zqi?R0Jb*r&j+Q{ayagyfSk*5??@(Nvqq z0|b|v0laS9b9)16r;W*GCE^X_8~gaIatL0G9Vv+RTuMn`r3qUFFC zIL7Ryr;1l5KM^yUaeS;U#Iw8!5JA%H)xpyrbCB0gAiQcnHy}nx;uQbQ12Y}=yJncu z_t-Gd#fAIbwSG(GsCx4Yvt7UX+lhZ`hP<7bPVqaYU7odrFWlNVji6cZ#O&bAU}uc zn+0?L1?p&hKwtn3>O(-P48iYP-{@oa&_^Dw@A_Z|B554dZF(=yX#GihCcoS>r>v&& z+4esfUm=2qf68dWqRFE`hl~kqrajl0M~p*b=|RrY@-hq7R~&C?5P*hTxAI#`;Lzx} z#~(%800Q?^=nG1_Eg4@(9SjrXTei zVXtgsb&mQ9jR5!=k9`YxHvL!{-*df8jB#LdhD?H6zD@uTCaIJhi$1;IR@T(X^X{Rw z&%yGNmZrugcayI=)&6Nq8NSH50tgW^geE~5FMH?rqC-TS=NDY*diPf))T4^Xf7Nc~ zH}p|OHQv_OGuvYxZ_**fjFWx=2mkT^_&>%E=ku0dTitWC0Ci{2UD$l@d*2()l`%Mg zqkh2yuAV=$xppy4D&UFNMMD7jy^lZJeE!RiHlKX(eDffU4|C+*mj2%B+tvmANQ^se zcc6zh@%I=|ALsybMnkft^U$gBH_vEa&kqf%#~d^y@*7puy0rgyp8{V;fo~p!7^B;9 zI)nDBck%1$_s_aUT)kQh#k=E3D_hJA%#M!qf}D@}e)sC7&5v45aQSlkd>mN<@y=`# zwq}qt9Gv*r^rX-Z7%Nxcei?wx1U7#7MiaxaWNNBguSLqV@Flw_>)* zhp}O9%ADKXJ1~0jU4fl%f9~o<@Tl+7bS$Rmw-PxN0xdiUX>0~^^}W~X2Kiu~+K;h5 zd!CO(;Oxct=M;rDA*+7~;L{0Cz=ie#eYAxM001$$e zysbSj0vs)8%F#q^1uCF$yfTIK*Y}P)-hpb-HKv_qQ*uqH%-WZMLY zDe^|ET*v{>83&JwXL6$asobvwc&!%kJS_{24$uX3paUmpf^GSDe6=Tx-arp+M=Rjb zH3!-bDCoCu>2_(Q-vJ9kUn9{HFd1;t1^wcnsbC%D_&We|`f)uYEx@P5rOBxD@&il( zPvz>5y)+{ z+rK+AfXFQ-te5ok%zf_f-rM2y>C@+&K7G2I56H?W8&`vmE9p~~B9D+dCa>$I9ZFD2 zPhK~Y*5+Z#yAc?)ZS%!d@^kziIozp;XwOzaoP-fI!rt#ZAYb%k`Lw#)NbHG^KHe2e zc6c=vV_J4gwguRb_R>a|W351!>g=4V!*ND^_O&D7AMtC8HOr~bz8yV&EPf?JnFg-* z$6u<*rtV{)Qm_KcLIFuczg=MO3MKF88o~K3-GU;b?iwfYR8zRAz5Q0v@yCz5;VuUL z6$aj0C8QL&pvt*u)aw!!H$TOCS4^)PM05Su;*6N%@105r5bW)s6B15q!UQ)&Fl>$> z*_#pN{aL=aJ6l_Cj#6;;%9-iJ**LPDGcP)Jv^0t|!n|~4oG)T3=ThdEFJ(kQ02x7; zj@gi{x$Pgp6n=x%gcTxXZ;c7unKBXgRc0^%Q!8f@2Xj3UPk1A9RuemM8a=BkVIY5T zC@UDJkvwGxY?!;{lN=+BCRtvqxd z`CU$uJ2?8B+Ysy?^S0k|$2k|jXNJ}9w~{r7mLsd?LOEz2*zYKcFu{&T)<~1w`mCHx8eju@yLHP6{IY zQY=|11Zo9p*cGxRqYs~-PkT;xChHaFlJap$7%r?bSr!Fy0y{}z0{X{tkd_g!|R~Zkz z&bWso0S>fV6)rdrET$cjbogt8emjMhHeq?We3|8mg(%G*Cttg%!Zqb%-h&6MwWcL4 zYehqFawQ6;x)Zccr5#J}qd=ux4bk&s#0zfKYt7nNj`AK2+(;iP9F&hdU$enm@VX*O zOl)geiIvT~(oVLRzY#p6O6?}!ceAy*?gKCtX|&R4VGj(Y>AcrAf&t8d{rjU3#y9PK z_jRy=eqYWK+ZVH3_vvT8)jdOI%V@1%spqBO$cnlP!F^5`=#Huigum-c;U)2VTTu-H zo>FS?NVy3JZF4+m4x?W!`X;y}(Toga z-diOkoi<$K&hqjb^tM+@m~e~rG1&tX}&IJ)U!IPAqZnk zVomOA!q~QC#7Ie4vpVMNYhm!-^RAm=h_l=4(o4q)U-Qs$s@g98&}&iBk3aU~lG zVOZi`i-iLtzYr5A<$y`uh~``b2?0Rxizg2PfD%LDFh7sccS&aQG|hquP<|*K^5a{w zxlW3iyQQ>5Xi+pM6?{po{=o2twZ@1QYRsEw1ReoZOF(MCauD8=I^|E8NJs4{r@|w6 zDI;uJSXlg!Rnm^pFEql$ogw+=)p$M9j5s6S99^03PTSZ4)_rU?Hs__a!$9*5OmN3p zkznhUXU>M5s!5yIft7%1sxVzeH{ax(yEJ9i5i0VNueN~W*;x_{;A1_~rs8xuMcHW= zZ7YaF0D#XlkoN?tGPGmcAZ@H17L2x4-V?rH5iVT{?jw-Z+@@zY@(qp3W5$ug_BzT= zIpkg*Dtsvi7eiOJ>4y`RtPP>@?b&E;dlU|qKIZl}vI{0fc4KJoWTpuz$KWBBSWR}u zDzZI$()j(#Yp=Hsrg0viPaPAIwRrEjjzGRv9y*&K4>;b|8 za5GC_I_;7;#cT4?4{l}vadXLW2*Wa#61tSMR;twNM(`BIk)^CjA1BpoSq{4#f?BZ- z=8X75Pr0rW;OVhw);wT%~XVl*8GCIcfb?8qgTH{ru|qz#I>5} zdbbW(;Ao*(h}S-qG=()Us$hXrJ{qE{kVIV6F0XDsjCZ27To)h|ol+=p^8-*sizI@XCWRdH`bcx2!aCU0lJFkjOUi+Jz2 z>nzE0WqR|Pbw;T%r^ke8pO4L9)J6#Rgjw&;J_#$*tIkL9IGJ4oFU0ZcEX5#9nqYeC z$niR2S)IUKt)Cf1os0rtB#40-_1Y8)(=md?OtC57w?#3~$(fpYit}h^J;69AFZOL< zLT665xf5oOA&bXRP^FJ&zFr>odTUCcv=%WWNo3S zQaro^lXj>-N1nwc@cEkvggQYj4aEm7Xgg=KbcIe7jqRCA*_^2w4uzV~ikYpswsjwn zHQgu0w)9Uw_d=(1&^B5#tq0#|)1EfUu3v@nlE3LB);4(27r?<%0VlH(ke_{~WKizJ zfq(g;r&``}uaz%%)}Xbq+z^haEXf;Je<2uIU|5!v(G?bXCth(FTofPLGX1X3r}^Eb zw@Fui0EfvHk6D?vH&v&7ZEThf*i|u9oqTD(xEy%wX|W=0a5HTeH9ov4bQ=MkM(G8{ z;qQesQ{X5lbzRUFBrJlw;Q;*F*WCpv#Ycqw5bmDYv_{%n_dIH2tE~8Sy>a+(e1hJX z_Uzd`J@U{)Z7YKz^ku2BKB3LJNnY-k;!JJzjO%o!cTOBTT4fWzgL@9A(%W;y;0N9D z(~%FJGq>CPtSv`_=0_jEn@gQ?)oC|vRE0ww7nf&BQIpTexqH8hfp=lxJ+_3rv$Rzi zHSmo|C_2!kFZLxJ@30pSu zY6tg_w|K*v2=qvNWq#EgD5c@BT!gXC&bM7ta=!eVo*7>BdD z6GZ{DsHGzvd{gqD1udp(F903eI}AMNNZdFSMmS&;tzXht8A=>s_!(q#wJ2l+!Qt$l zY2^dKXgh8=1hbJa-vQ^NoC-6t%x^;kl~o({KcS@sVHiCaG=MpF%#h{6{Jr~(U-tn4 zD8M}M5B36@+h9OpJdM~tA~!Vs_|WI3OFlExs{)kzAPT?h|A8^)8Uio*<@-nn+yNV? zT2h`V3-B;}I5LtAa~;cgM;;2URVH`V2}@IFp~>tGJne!OWIC5Y@KGpMMA%bKd?PSz zGftSZOu+e@X>&d$^E4UZ&OuWXokOq_we9C_izcR^QKmCp?%c# zX8Ix+##9U6gzyJeh@@$Y2FLC!B=YF0r)63 zo;t0jE-UPl0uFfSec&iBRHAU@mOKaj&i!QZx%ipWQBJi7^~`h|+W*gg^LNvK_{;wo z{;_3x?2$*RC_k4KGv~99Nb}9|+3r6dbSHZH3B;vDrgZe%1f98NyC;Ty z)@LCd4TEO2SQ&Tr%#2Ly!t^+DQX}YRX>O@*tPM>Uzcl3 zEGE0QZSH+*e&4@;cTTw7I&IBa3fnhF;ASbJvmr(Y#1z{LJ5n{>F)SN|?GW@7rCmEL ziQE$AJLbeOM}mQHk){esI?cj}ml#S_7W-$Gl zWW#WjhskwRs!j_Aiak{k2y3aOBi{xi7>uiID(?pCStt_XVB%$Rx0&s&Jo8<7iLi(> z(pAA|y4&ybt4fIcFz%M)cj^rnW#Xx0k&tI~lQ+esO3Q+I7v|Jy2t?Ci5u~9lJ~*rv zvocmiF;ir+WVQ!$g-(7G#75E-8j2ldg)c-cQh|)qAbfU1xummnceZ|1kxM^l+eaCI ze*wJera%gga=|YiOMrH_S(+o=-B~62K5@0LQ+&~)slN63);3D-;ok)U9p&_;*o-tO@Z)@0zkOxCpKd< zHRapQFrF(__lBVzV!KvRkH2u8X}LMM-=Jg(W`BeJOdBBx^8VxAWq7hKY#~E z(~gw%`J!~$#|94+cR&crcZSw2uWpsS`2EgUzueP|KYc+MIQ!-DxEuSwe+>M$Q$k|- z*c`=xQa~Xf>^J21_E-jXXNqJ~6pQ??jvE=*C#9y7E%C>B|zAcss&#~23Ns-pvF#C&980J5wPU2u1=7kAVK&9i^V%i)! zu}*my{uoAU=22scPlqQ6Or!X&qnb`Rc492eT^A-wvCtEl#aSx&o=`$ehhU}zLj_Rc zh-(*03IHX7fC8J58-EYwVhWU=Kq=es6ZZvtQD)5btA}rxWMULHRuqhl)x-RM&z;%1 ztEZESJPxk9Z4NIJ32uBF3}}=mk17E^_4{C4<9A*r%*>&mDGa&h?_!Mi7;j_uh3`XI zabH|z&Pm(aEClu9x0I9%Rt=094(Cv2>iYuLaUb8cExdH}oI3@B^2-Wg&bl~{U?i9c zclgz&tRw`9-_jDg%_I3y6j(OUf;J_jF?viBjFeJ!sO2PiSXIN^ou;WGD-Gq762qx< zR!oAJrZL6R(R=chr)emwS^O4<1G5!6u8>x(K0}cKgH<#fKI7XIr%Ee1CbsP|Dnltii!CQm)Vjp4CF$VB9o_{>3B1FBciNXS z0uCd4(^!{Mcfz*m10$i!*%NLrvKlSYxwD*5LdJ6U(atw13~6H44DsQRLP7xva8)qU z6RT8MTANPRX%+XS(9cbDls1qkdZwg3|-D#MmOrNgEbL=zidF2tQ*HYTf_U)-) zwmuI((KAC!>Da05z+)AV@CJ2rp+q>CaWPv%e3->LT%o;&q`?R5kb5+iS5tG!-?wK^ zR`F%~{wSd62j1_!_ujPW*z;r1IeMiK1-Dfu!au+qMIw#RHmLPx(dXnl!`bpjw;6Ew zi{utTx;q}{*S)t7zvIoX|1kGAIaMKB(1iCcLAiIsZ#M=)t?x}h*!(VZvy3@9lDl^C z{PgIfADK>PgnlC146j?hk&VK#??Dui>1K9gTv!@RMTBclS9PLv{4JSUVKF&zW*kM1 zfyjSNMkN&%DPw(<6O4)gbdso`BX{Js#vFT``O+CN4FVrz%+v&lC5mg4URq{nJ&!pa z!!|;~AWw&(TLCk7Y%3@dsY1i3lVWl%3IWA}0>OGgxxg%{^rW+NgdDiHW_0U4IF=U- zhH$)|lk+r)(W38|);w~QAAu+@j6hlp+q8p=WyCuc6vxY_cjlTp7d&7XID;=j!g1e( z3Tw}~^J98|@C(Yr@RSh2L68I|OuJNrwrRfIW?G4t`i$6$guBIk{vE-S9FiIX8)=(; zp5a+u?!Nm+xtJ9UDP%)LSY!SsW7|Z7ACoFHi~Lvt2(&7q5kizo zu*0c{@a%jU<+Z&MmoToMLhvhS1IhJZD}J44&iMTiqSp= zkdg>r1Z?XUnuc#HU#?vVY=8$}=t3VDmX0W&$&+OZ9H!u)Z&NIQc3i!BC8yoSs&OT4 z>VP|VSry9%e<3)bOz;s=uWtAinvrjD1(v`+x@`IrYfflI>wv|*!ZWl0NBW1m_H!4h z^btX-9s)%L&b9kY%N$S9sxtN0%W#u>eJj8ag=F>}F@*ze_*u#Tq?QgiO54F}cRxk@ z$peMKjpbe({FVM9NifaR_3YnmN4A)KHZg?V9}f zrcqSvClX-BGSqBvCJ(q->gp4jtEgnM++TkM1Jr8f(vd~D@`T8`R1FY zBY2&!_a2J7CA`-dc$X5g=sk;4iH2V_NbA^p-rJN5J`^i%uAa_B`PiPL=(n%iSugf= zaoAX$4!t@{XbE%awnZsOZ>nIR2slD&XWn7>r?SZoCWi5_Dr6V#G62&ZOpWI_TSXzz zsf`R~vuLO}bG}&(Q>($vX=g@)l!dWDRhUvNGvNqjX6BJGP0Ud`0_26&O~9HG*dFIqii?Zsy9_(uFkpGe%WvOwMrjFI zov;1^QDn?{kG&p-UxBzr*P4yx1@kV0NqU6nLU`g_(_szZ_p6AWTM%>_OPL&_hw5?P3 zDpIL$1ygXd%r7T*Fu|?D2~-M-HeuxePqOHS@07N2Wby~PqHZ*6-jzDK4`ObxQ=tcfzY6vfMGD^o6}gid8T#tIo_%syDA zIMFc+3kyvJXYzL{{Kjlo4;C+Y; zCkb5+ADalmXQPQ*T$(U>M!~VNBWy1|WZ`=yO(kWa&*e;Q^gG%J zjUW<=k7=ARw4_g8E$t^i1eC}#JtWJ9yiMZ#&-eM;?R!7<{m94jz#pg5?|k>W)8BpL zo28eY_&l--?uOr*rN(1gaox0c@80RLM;{G5vF1iGT53oae1WC-KBZB#5?j=5gDGaBf#A+@!CuOFy-k)7aSEu7! zyH7FMnrRaZ>RLu{oIH&}3DoP?7*NLnVVh$eaj4tQ>}Gn618KG?w$fyC7zyDsECQxh zjBZ&OqUhxP$uMc71yd9?fhR|d4I{TIg`mOgb@H))!kV7NV01ZrH^%7qW?-uDO9{*KZkEREXzh9jfjo!htOInH+xAXU235R@Dai$>>`e{zVe9|WNrhP|{D z_!BU87L<*_GXXzaUUcy0X}Th~>gtXNIpUFXUT$T^sb&6Amd11b-VwsY?&L`2U6^p* zr>$(0jmcvw1W1|7A~m6rl5&v*EMa5%ic&`r@VyR!5mZCrSO~n-N8RCPURjaHwzYbb z5v)IOj?s%VPlZq@#`~c}f@dfvS(TOe$`-2h^get`0$yk@bdnzwpRPWL(lUIV_+bpX zQ%B!V<`6`FJ7{M^+Rr=7uhH^}&?5zbp9i=J7vH75n5n(eNZJWqQzFs0Ha?%-bmgc0 zPMpfYVWA~kY_sx#cXJM?Bk=1Z68hl>qG@b;=AAw(9vY{Jh)W^w^Kz_gSMeeP|nmR|JxsivKFH8LR+R>3f3I-8Th0WXtXif2Qg_V-;p;u zm;ti_gB8ohW7m_{I$P`~Iwy48t5%HQYKmDV}WyZ7H64BVlF=l~+2&nV>AwD!;0Ut6pLfV^Uw5${a&)QASJXo5} zhKXZXL#W%$a(y*?qbmf=IgG$1?3t4Z<_ds(Gf%JMfni7H!mp)_`ii7tn6T9`tO0yD zw#KJq)%lDn?OwPL1j=2rVs+=?2?TlecZM?Jyz&Xt7O-zh6~5nBVfx_GKErCYn44CQ9L7h9NpmI;3Q(Gy(oV^yc!B>b z8U0f_;0ewt1n8+t22;-P0UC5UPCB;o$kN2&n5#`!W*1$;-YO}pK>#f~__t(RP)GQI zC&GJ0wsO}79m%Ut>cGmxda{1q)-v4EU0|~VrhP8JjczWct>Mgi5&C zk48ht*S;6pe9-YITe}zM&Cd!iii&+vh5|ARl4R043_4h}b<$LUiU6mGgdvDj9KZ_& zxC(ae4XJCDfTUxsoZ<4-MR7$xRh-klVAh7UbR=Jj9g848tQE;)dybpm5}{q|OVSSk z8C(U3`lKMPlw*l6xaHkaLkH-*Px*c|y9-+(v%E3=Ie5Gg4|mJ?uP^Z_3VzZK^Uk2^ zqjw~6>1RQb#o_opERQ!s+i)NhTlq5-6w25_8Jx1}F8%aRzWSBW+s5gKKmPIb%(Kr= zFCIJ;`c99?Y%6DIB|6(kA2^-C7d&iATdNZV<7y5~eChDpT_KdNn|6;+QuY+uvujsR zOy0GBZ~cHM%V$H6mt!qLsw{3}N8MOWPM;1RJUgAwJ>$o;YhctURSNUhp6|CpRlZwK ze!wMb61)bm@ojO_54LaL(M14w?@)4BmxAY59XZyYI(@3VTVJ~yem60&P(t2WIW*#` z9-Pk1M7e@`A4Y5c4$~DFflH6;{BTUlOv5YJX2cpIwNmAtoMT{_AjO2XZ;o|yI>}qd zPL$x8$OQ@n2Ch_EF~LZCULDKFl}N2CF-be4Lg!*GUK56Dv|t$`hI=u>=ydi#Sh;fS zpRolqkFd8JB;q|AWoB*sIan?TJc9iEg^I;)XRS=;N}~%~A!D=$_U&2JFM=<~n1#yv zt?{{tz&LyEY?+&p8@Rw5v@OCR_#kK~ zDOGHe#+>)%2rt3|VScVp`=L;VT3L2QJgd|;+g9s5gck(SEz)!-d z#uOjV#ba=-WXcHqQczLk@YmotJp4OA0DkR8krku|V-?8dEC&y6>z6{&rQ>?!{s%w=+ui zJ+Xj@Z0W8OQYjb|B}yT5HB!AQvAKP2cx}hsYP572m;Cl}i;)pb+^c$Wxy_k)`LHF7*osyPe9O zKlb~$zJ7DgS#grAzMI^VmSVRi^kqAD%8d60IHnww>o@-qD`F@@OHsljI&-jH;Lw*M zye~zXUJWjszq2N7w<7!}t9b`)0xZA#nDVNKgSj|@^L$I&tDDgNkg~nk9)k9p)9J(e z_wS#+_O(BqzWT>snT}@z#g|@sIUAV$X!`m${~^or4`+%g3Vfz!)@JpTLCKXU5vOmA zsg~v!OO`|aQbOym-k^CClnH83Be_b)S!PW*+HO6hcZs+{lh!#-<3fwJ}ld-Wf)i zsT9uwtA;5HCS*eKa-78+IgSXo#lL|i!sreYL9A~bIhN7QI8l`O**O?Six>vRsUslf zJz=&?(EK&Pq03=Do7Qis36B8Zo+%NX9(-N75CX?YnB3(}VX-PkJf_N%F>7zmAzV5i zg^B`1;n5)oERLm4r_~WFloH3tVH_$K+imJ(RBIGNSQuqkp`s&hNd1mS!@qWMY+iQ% z`!W)Uvc#O)k$LK5l>){@I1J%31R^0&XWES5bXw-no)#LWS_itoixb$i3ND%I9qrEQ zG^QRhvd?!)7^MTV!F(w}@Cwdh6;XbyF2OfN037(qaFVn_f_j=Vlh=ZOYT|z#f|p^M z7cXonk$VNsDi~?4ndv6GmQm(BeiaAsU}SWQX(ugT4$qk9Ehd%por;O@=EGo`&k7jj zsJp2rqg6ChWg~f{(uE79i+45c1`iZOxTYA^;*j^?q+ECh50npiQFN}Nzu+6J0V{b7 z3zs^(C?u9<3t-W2_U+r-sSeW!PDg$<2hUYy`}LR9^Jw0ea-D@Ki}USydezHh~QZ??`S|xk#Eg!@r+-{OIxz?nKH3TNT0%2 zE=I_%+miEeGOf~s+!n9|>2>;c&?HB53PCbS|4u!stE3JA!&pmSS{>)jm6^6#39lLS zq;;nY`Mx0n-?J^-Swbw=y%9Pn;2;ic;JtlOxW_Ymz)$faPTvQNyr1LWi6`|pZT!N) zgQYnRn*ZaU|M~Q1|K@ActFOE~egFI4pZ@A^|9(1s>g`UIZrrpT^2&?QyEY@}&z-jE zUFsKRR{RDBD^}zRPDVT34DL=(Z@%%?^z3ubw11~#WqK-(!fTc?HL|5Nr0xbyjwvu5 zxIX9XY|4Rp`_k968Rc{C@1sT4d(auHq``rGj4T{D6&{Q4;K9X9l)oy6Xdrb+%F+%6 z_-^>!#=y^1LKulx+FSz#rITO38i95#Bl2}|h>AzPdAn8VgzGapK*R*4(Wt2mdvn-6 zdRG(>qw&)jonY2Xz^~*q$BPl5h}A`_#?X$02~$uo!C6=`+n2$oGnl4X9l?sRp37G0 zR-ss?NUL9yEv8vn#=%?*!I(Vv-Lt2r+2b)&asoAXYkBb6RV$`f-#l8FHiem(2e;RV zvM_f`f0^5LjvZM^up#hPNl9Ev6Q{z95e5Xf5h#BQ%!2Ul2$-PS7}I>}ek>NBIv)m) za3m0@(h_7w+($ElBjoJ`(bYKN68G%f5&YcfNO)T;9UYk`t-Q{kx4Onw?TfuCVv#Yu zv@<7YQcx&)>be|{LkvxawtFU6;fWw~r@R<(5MY!b@M^;&*|?1WrUkk%UvI=}O??LC;u3enAp}x(^42!qg$-87bqXv>iF^oSN)F{0 zUOdAuI$^bdzu9ual#G^?Auq6+hCH7UB*DXu@pTe4;554CBl@SF)a}& zMAN_%xbcg_hu@rz5SW3bidf*FfL@6Oz$wXHQ4_wQ4D=gQNBCJ3m-HXjhJhQ_x8$48 zOPN9nk}@Ge$>h6}>C4dmF06s~dgmVp zPVQ7dN*l!;xNcwal-6B+Y7I@l{@(Y0P)nOB)dL6iO%KP<c5={V-c7pzXRu_CvvGo>0nT}24?QkLn#G4;Lh4-4GPrHLpzl)DD)rH<+cH?Hs)mtYgR(o=9YDZ@ipWjssgyNdn1;BFx8bD^Wu|`oy1lfCS=NmAI0W$%54$c z+oEjj$;Mvx`q-Xb8|}|wFAyD^uwYfNKY{6j3vqhll|2~#Fu`Fk7(t!p!aR(=2xx?< zlOT1%&j<|DrG!*apVWDPlyBt^(0g%7wTqI z18(>@n?uJc#F8JOradrOqY8q@e5%oPnRCjA15BSHs7%a8{hKEFD~sc*Jgu4_Oh-M3 zVDSu}gq^b^D(sR+3a+rRJ$>;XrVXdkV#pMyT0t=Lgt_jR#jW3g`Oaq}*4wPauXo1v ztXzl{w}LHo@X87Zf`2e?%EqvMC>tIzd^j@7B4EI3^jAhE*WeM(+*vpXbZ}yr#b4sW zmETUn6>n9hZ+zhDjW~nJd@sEk7_M`@)t_387(a64+;}fI4`$KC~bmI+!otQk~Kj zMF!2GdvRGGSyN`^2`#oig>DGDL2qN)qE6=l#J{$6;D9eM3h*PXZ+#App@ZV5wd(74 z{L$VlOLLmmJ@Cr2WoU;*eiCYP<*2vcrq)f#Gmzn;9~aN2qrfZzrC&3xrqd$}kQXp6#IWhQ+G zEsx*mF6s3rm7UvlzC%_580QOKP|@A+yN!XLp@d|EtMxahhxTrn_H5f2htjpvJu&Iu z7e~>(wswwEV7qds(OL$&A$}lRGj*|fT^u_z0$&*-!Vu2H{Cqad_d@0=&%|PJB^`>O zG?GMw1PDtBrl=e8C$UQ^rr*(DUJ|ypKVO@8tMeVb5=t12G&+jVJd$TvOGm=yz(p|X zvkg0Dt0RKR`5B3Ni@|b6G^N51flmcRWOo#+-Pv`O?+XFXw5ekdC=%<4j?N|shJVLc z$l_09*+1h@A#f}_w}jPhrsZbS-yqaI_A3Jh3#oQ}}En&~l2XG{ZR+9)_A6bVfV z$WZ=5E`hz(nt@CGDkze9?|H8r*UUfirR~)ui+E+o!Fd%43>vd5qc%#yFz*m-c@OTH zxz8qF`6;IYByFM5;jeLnCuIZdah>J2(kX?N^0yp>(N-{A`W~77B&-pOtRPABD0) zDZe{v3_QwKAKz+;$g^_Mi+8RnpJ`|B#_s_eSkzUX%FJVxky(+E-*}(+$yW=4u@^)n zjN52;`1Ngm=Q~*7VqD(b5t0SEkY-G6rM~Ku`Vo}qp-QO?laJpstninli}Io*<(ZPF zZHl)DRApbu^QkCaZ)8g2wc{tJ*W%cI7;cf zMZPblYeG*3aV)ERJt&W3P!PY2C=mQSPG+lu=U;nk`qq!0oBr3{c9VV+-aBY(v{@jU8+o*A zx@Nb7r=SGBOW}?9N0(E-;~t(FzF+^G@WS2=cp4{`#aM@C{Jaw%?|g!-VC|hR?>_y` zVZbDD2rbkV^Ll)+!noq$zj{NRFZpyh0M63+jWOFZb=N61!h<#7e3T5U3S5K~#(}v~ z4$h?mAs_ROM%>vkXj;uPKb;!^n|@iFvX}-t+D)3(DZ{2=6avfPOchj%Pe$_R9EbZ5 z$nthROg-fDyUvKY8;#Pw!L%XDle)1YFq`^z_I#FlW#q&Ug|)+0@@rPcYLJd<)VMl| zl(JY^FyGxfxA)*WjBrPm#2z`8^bt1rNupJeG7VKm5usD&9|mWMosk;mA>XJSJxt0- zbyxgD1fxyf?a8|EQ+6;4ak_FOr>$b{n6%E$0gQsOBK5HR(CMl}AP=Xp**b77Y0f0B z5qO1dn2UTcLBbU?$J{Es0vll@e*%F}yC-EB9b**IQL4%=dL=~E{b&& z9Y9~WFq}7Q(MVkVLST8aiPqJ?D^HgxzOg665U6knHdYeCQF+>jfK!Ih5q#RUc_gkh zz2uqS6d3g_UcsNVUC5w6gf6wsZG0<(?-L71oW4^5^MyEK(Dnx zQAR?J=dzCeh~5AIKmbWZK~z%e+NB7|@E(28wgRjRXR{I9;r!m19nIEd%6UujxS4v_ zLJ&Bv$0>wR)*gH)Znp2mLJ;2s^cF;B8F1==T#Cg7jY6Q1dW@dnxBkgmH(*zvTUXO2 zPTs+XxM|HiOPzMkzsr};Pp`f9diloQdv;G>{Osqa1AF#OhhBaq(<8@o9h+W#2M{crA{pt2V`F zFZI&LFK57_TBwE%LUmQN^+8K6^;FedrtKRyiQvZXIW*qlp}$Kea=#nyV&I(^7?zL_ zTKaX@)*Auu$^mhvV79~o=|H@sO}}i6=Dg0-Om-h#n~sTRU@DFkXX&s)<#IG*PNLUz zwv=Oa2myz=G2i*k)QBjJ*7=Xb6Pu?Zw)SgL3mg<>QvMvbwurtIInBUYa{nFUVebrh9~`FOx)_`g9P%*oB=9jf!dgeq&njT|+_Q6x(36%RVL6db-h@4-OnH;< zj5n(%Wg7t)74Q9;RArzkhbc~#avH<7WK%hJf3zR%KQ4TAQ=>xwBA0v3yEDT(g z17hKyEdT@rrB47mn2MXgII+1q>uvyP-uO35T(-*9e z;()FWb_`yXK0hy?HS%AZHs}=v(X;)ESZ(ncd+AisBP3H*md_QJGf0Ys^m?qo>vnCM zE@TvTmL)UowG@7FGyKqs85^-JCGXZZiH8+r@Wz4bfsZO7fwuz&eFc*8JOcc=79lu- zbN%t{9ESKW&pbPQ?|VP!UKt1W?wdaL(T`5|AGj|T;tf6H>OfBN#!Faz4E!zxcgM4w z_U%}Ho_YS@^z^gOO)tE7s4#xwV^8z|!^a-DciOq@o+w-^r_tI{D zk^UVJp*~%yZ8vDi&tXkz-zez$(}=z;>aw^?Q^J|+_eZWjdSKb@$YbgER*Gm&KE|whCt6|*S+JX&rKIHx?uh{6`*r<6qYhC?wRoG}F^bIjY;(c-g0uyPQXMxYc3WjQB-NfJ-n8Thu^ zx;j|rs7c7{@MT7+2gg}{Dmt{CNeKx`i~KM+jE<>XIt-iQK(V1%=wL%=3;}M+N1)hj z%=t8?0D(Y$zmGWhu@13RTnk(T28Ju|nekLXN}l3iF03E!6;OExUj#ud3V}hL<-^)T z$$(c11|cPljhUp^xp(i118d?FA~@*Tg@wpBZ9-|&7VruFnIW()IXxD$@8~WVls_iw z-RxM&xzd4WR!`J2l5*ji)d39hrHoYQEqLm1fTWZR0X*4_1qBR!lf2j^E~LAjY-Z~6G# zjDOqOSc7lIC*(rtY-QpvWxu1P43v7-P1>o-F*?e>Tfgt9_n;Ydf%{x;>FF1qpLXB7e|qGxM>_am-Psc> z$tOSdvFYCZ`>IIP;&b-&bSYCdr!xI?He0tp_ri12PoI8fdgi%R6=ym7&<;Q^BJtCr-ZkU)(PRz;5&wn6+NpH}!1|Q-_k7 z3N1qW#-eWXyI=GURd#Vn?PZb~<@BdwtmH)Us9L4YMH#(GpYg z`R=z6vO{#~fS5#?Y>3ea3qg(%?C6{X+w81|QLM{IWvnz{<`3B>PktP%Aj%>zF+;&V z0F(wwfwHQcM2Rt-!pgx~Vk*UVjL2p^lpOCd8;VHx4T++mhY`ITVEZzKmEn#S+>^xfCOC z`t3M&X~7^J=4lz5^CQ$9e1sC`Nq94rW%@@vBSA}*qjh0*gdWQZ#RD9a z3GE?hH*lykfhR8`cS1*;;xYBKbT<_*V>jxo08Bpe8zXhOg*bv=J6)BwfHxPI34RI)tDATNCWvkcu315p z5Adq=h@&2fFO70O z?{NVOS zUsUr}y~P}Kr2kU-?N{{TOD|1-^Nnw0Po-nip;umsG7z7%@D9Jf@lW5%ikR1@#~yoZ zdgS3prWX$#n*NW!_=|2G@c2hQJbmKhPfU+J^60ex!2W4lwn{h`{&4zuHZ#lFTlV5O z`0DGiu6%cT@%d-7uwcvd>5qSO`r;pae){N#KN23iu}3_dIej8922XGq{}fw07FdH|^KZn@0WRgH(5wkY65ezila&w`^SZ zF?k9Ffr|bK8q*0B8cR?qBv!Zw?IjzuXTI@JrW!1ScU zB-|+m>H}_qo5cX*b7HMc(}dlbmz_tJm?=d_d3GN(;uHV%!*7*YV}5)zOra296gkY_ zv@j7wQ|%e3hm++%oPBOt_QMY4y2y0ZikY!x<%cQoc&uzLq*m^dZD zg&|WKC@h4Y{4DLIa2X+njaP`Su>G~T5Duc`OTen1cJ9CYCV+4)0)=l*8FTPRU|$Wr zsB_R<#zv?RVBl}O+&vG!jZA5T4&^_Ogtd)$=F(GW%ZDRy3?Zov14&^Taq{;Tz4#^} z`Wb?BjQnSXg`gjC0Qj$i)Mx@NN-rPys%0rKP!vtE%$6kWN}-oV914K;2Gf{cb7HQI z#D)@uhK%l65mgiZtMCIQFThz#Uh1I?^%k^S@h;;cmjW^busEHRfcksGEy#uQ#o)nD_(HonWYcz{YK|w+G?ztu@8uY=+>lO>)=5Bl3yxQ zh5d4@9(+N@N|>y8iUs9{eitYAm6XU>2$s9o#=uJwwtRITP|heHbyi>hZ-+adhXPVs zPlOBc@1Q8yE%cc~FHX8pOaH(r<)mwZP}JX^8#b1 z+k>l+aGpoIt+1pT9|-=_Xl={Z7t#ERLCU*I$ghaJ-=bv8ySHt8a?jQc(>4l294{@k z^oZ>;;R5*v&FfD=~6Em%WYY>X=HXAdIu`?@A zj7Y3P!MM%IQUuOtw0)^gn(>QB9uyu9e;7OK!Ht+nDI@YF;B;En3=D2u*#$Ds=d;|^ z>7o=4QzjU=zF~tk8$<2dnfcS?>!2@!zyW21R?VwnM2WM zglU@DiZU}?^kKwFc__@mTa}9BH=gk&LP>6EhrCyRZCfQIzl93jz(YvSR-!z%9gJ>g z;aZAI8QMpf+ut+Al2BBxw8|1ZBSMxk!l?Kx6^S?CVgX97ZEtNp@(7&;AMokUA=z0k z;8Xm-0oi4FaD-XP*9KLHbMFWp4J0xP;w-?`#pqmM(XxS_eO%NXp2t$}tTqd*tSJH^ z32*AFJ@g&%!J$m}Qxol21!Pfo^{(;}S^|fEEk6qCtUQ7ry;4B@t^y)&^~yVN$Olf8 zRs6{-?RslEXrQ=x!aF#jR+;YdG{W~=`REkv-cuHP58kEKx7tst@C@2k2ch4SiF_$! z6pWUUZ_>eow)EtG>z2Bzo0G3sq5CNAj!#&f-&V<(0;Y(lDbXD)9$Q>{OqScHGS=?e>8pQLyyMM{cNqXV8Aa}V;%coe~|!|rOi>?(&q53 zVFy3JsmJqZy`cD)2tnyxCFGaD*Kcdoz zQZvhIO~G7D2RIXE>9=KKgf9W8Q(=r7;!9#na~2Lv4c{<=?gn2W^szPiSca(c5#UC) z7!cy798ezWl-PIjHVqJ_lK6z1sT|ggDi(Ry)isHOiENGaW>5SiB1)!vqU`V&0E4|p z#(o5O(br>~fBwzIa z=bFHHG#iyY@VuuvEyju2}_>c$h2&i^4(>>73mp zsW=%Hnp#5g-c%Ed5uxQkwc#@{m@y?tnd*j#$eTc=h!6@@mhz6mNa@tBEGUHC=2)WO ztXqT|&8BQ=2q<+bqfLB_d6qk~{}p0+Iocw3Whl>G9mT07C2atC-lL_8WQ3Ie>gpYVs0fNjEdad}%&|K+ zo^~F2qC6v_U-|*^5xrU zm;hNAJ?9_zhd>*GPa5?Y3dm5R8b9yhb5tj_Zc_y^ci}QKRO9nst)x2btFyl&p9r5xq(Z$tw!8FYO zW143uSE0GQCol~#=62!xc7rmXDath|hhNCr)PJpgK@yh|l1aVn5fY^(7LOHauN#?) zSRHHWa`?+~@=C@7xd8*5a3O%Lmb}nCocWEW{7$2_-S~7-NUJQi%|dS~BP1KgDQGx! z3wMu4SvdH@3uEbU=p%DQQ&!=IKMzNJQ?#pAubY1K)DN?V$oHnlKKR)5#V>pzaAg~T z>`J;WXLy~9_4U|^Bh&NGM>%=w>FLm`ZDH$^j##CG4aqf@YEdba70H5VG})$h!^Zgp(eurZqTuTD>ArYzCtor{ltfK=6PYv)z`R zObI+foZynTG=u=W68c~rLTdLW|-|@YNl~a$?y{yf>?d$?*s=WM0#7B z!$0N4)K6NhFo0iP>aNbLLj)W6%O_HnHp;Dx5*(PR&^Aw6(+TngKeIX*(13iUopIYl zXeI9kv(7{EQeMs9@~i+Ej=EqY^wtC}!WxY7B*3H*;8GATMdtfr_0%FR0bL~{c*n@a z(Z;i{%djSx27snOo%G_Uqjw&Zg>Kv{6oU-yN3n)u!j*usF4TD&KZwvjlAU&~8=Ye#rgPTL&(x2{?6wGu6(N>(x)LOpE& z?}2^be$ZQ|Q{Zxe-?_f(EDe7fTsiDg>LSa{@(sgj8T>l{K zb_4EphS@pjAx&Xj&0zY`_sp+S2j&&I0&K*0a&n552Kk||Cc=a{t zxE#E-y+RTzqtLO8AZEO^{3G|7Mq24#@Q*a|{h8OI6;R=C>bl)Ke?b86M*Fofu>8dj z-20@{a92bFx6>)>8lrJt3bV3hwx=STZS-Yt5KNWeuuk-yWC;i(JU$kDJi6RAo%>wyho$PU4&PFD0vuB&)WHl^c*&oZVL7Kx z2(Aja#L?*q)C!{Hi}7rUVbo@YF`o0v-M^Ou*C>9xTBb{J}F~MPAlfv z3nt8MO_Uwe1t&6vGf$1J)6Rh#9l-UhjA;z*Wkg#4lYE2b#!kMo zfN;lzOxuYJhq(* zmxMml0Srd>Lz!vvzW1$)N694hO@6Hdp_V$#p+A#xnRv=j4yA!b$U6ay!xF_Z!g47- zm`AWN6{XL?XhX;kp((XhS%i5CZRPO+&)~-!v;oIzQ%&-r{GdnW!2{*PR37?Ou0AA4 zk1q5N=_np^-yhW@kb<++Mf;<>@}%T}whK-`sxlZsXbNT}YgPCTC+$wXB>nolqog}1 z(>CmOK)o8~cj7`e?hDUxX3SD})*4HHGuT<50gWk{AaX%J!2)zAuR%i|wLxC>K7RLI zp2eB}jf9>?`?tQqi8|qj$B!SI?B#L!!r3kq*q|ThScfgy6Xn>M3)5R?V#Nvlt%!e! zOoD>6Q+_*eFQq%&?&a(yqM3jHbDz&3VPlkpVTp!&PVDw#ve6pee>M)Q$4;I|x+Bxe zuf7t$wd_KhDcm*jW!k%ce-F~r_uf8oWIFibp&X?5YR~1mFW)}>$={pqzxUqoyZG{j zw$7e8mHV(x@m&deXlVH#Ao*><55RtbAl}bZJ{Iro@+v3z9(TWgy-S<8?uP$%7+C(~ zgL|LUnNcjpszX$0SHm7_&yC=HV|Hih*qkXF3I{(8N{guq&t2(~79{Wy2A>Rv-ccC( zJp{=FXp=B;<`I#OA>P(lJy=WTDFpr;md(vhh{%KWW8a?LRX}uFL~4ZU067k)80N`1 zQk~86wDTA2Y#D(U#o|QfRS9Cu{&Ws>I+KoeEjuF$tOvVTO~NQKeslR9l_!6&8cElG zfuIc&Wyz6`PQuJj>s6Pt=CeNw?=fy+n0v^^R zWpX-I&OC*qA7j}3Q^4DOI54;c-r++#NyD-O4t@n!gO`q|^BxoTz80ClY;Od@K^xg0 zrF$imdxROMy*LLGa1<|!sk}X7tjZ|-p%K;r4uRf_GYeP>1W$WHN;)`%Byk!7drkL= zR{=#3g{fk!VD?U)M%E);{)VYlpi?UH-TRup38uttyij3pemtAPEd2QZmGag8i)f8I+F`VXcK3a}qnM&x>fkHZYy5OzixfHuWU5(k9>% zh#LJ;`O?mD;l@8Wm)2%liCSF2C!VFBCDL>#Zk^W0QoK4&=hxA(wkti2_EBz?rNCLMntT5FKi6H^16H-^*CJTG z2yA^5U!ag(jNi=R_=NoEsh>7bnwhG1`vc6FS;Vr|-$L8C{G+ z;xfugZnS)(F-l82Wm=oXfDZ*>Zxu)WmpKyb1iYn-rZsFKZj}o`gJF8#X@PWJqpn&& zq9mM-a$#g(yKHBRV^u&dkms zbMS%z49CsE7%_JYSvkjZlIQ8g*4+TGCHlEI%Nns^N}YNMTm&daF%*;`lrT-xGacO~ zKb94%U-UJH#<3EBg--ziQl%jH98)aj*#}dHAHuuaxMNUBKLknGUVARQn1*5PiSI}h z4^F{l9*J)s7VRrvI8--1lXAl%Vw5Sr3bmAP%4j|%mO?n%HVh|;Yi>_k`Jgd^vQq+K z=!8s#Z{B;?vXU3vNJ|hAsoqma)WaY6Auxu;fS^i*yvNk5h+uYJHG#6nb8W+7_jxEW z${%6iMw#l#+EF1LWkR0vhC6vKvQ09-T**IwDOao`eONrpLy&G*9l)E1{2z53_4Gnq zWq426DtEvt&f;C(qgx8dsN+aC6uwc`oTlZrwB*~oN10-YPXU+(J)94~)nv34dV&w} zD6QICpO7A$EP0k3`-Vp3rT?(JQ9M@ z0*s|dA08`fhSdcea5W2jbcyzrZRxYJC?TvNl#l?^1L;j`oW3T2lt)9J1}|c@>}S0n`yLlU*tV2!^Xm*t98DfpjDzb(RxM z3{hK6W2B9V>hv621)GCmWTtzpzH!hO2C#YamX2i5s5BTE24iK5(GO?IPBlcJdcP@U z@87>C$CqzTM~THD(;ikdILVSV1v6AnmJULe(C;Wc@2tTG8|#UcKla|xnH^?_$-z;r zBj$t?lm}*l0%K&|U@^jsST#80%A2BLbU|T)C-4$*n5EGOs{_9bIAUT|mip=nMoFi# zF`RrTdpf={>?OeRW0~`;+*DBrjwy2R(5Q$d7PN5qTvG<8)WnldSf6?Ozz+T8R{h@td=MbB(Up7MYf z!bzRM3m4)M;FOMGcFw!Rhj%c@ch)7~zdj^MBXiF~=nY!C^BpXUG%Hj2)%IaI(ykTu zDFYk?tauR}g*~sD*67>!VF5AR8w#KF0yx1yL17hv3(s?!)^$-|Wq1!Jac+kewtxdH zl!xS1n5bz4`%qFwU2ik==293FDG%};S^F~bUA$<2@Lk(#KXj_EfE!$3?>#tQ8(LYP zfx_6sWZ;K#U{_IXSD~A{)JZ>UUkh!v-GUqW%=pkJ=6Zu4?&=d1_@oDm6<+GB&r$XT zanTF+DV1shH?Hf?${R@sdioi(x35OwxN$WQCTXIV->Cz<2HI2r zuC3~`0`YuL|E7@OUwiiMnfC49ALVdO{Y-usnAPjkpZe7F$xnQur~U$NQ}_yH^IQgL zL0zWE#?)*3Nb*N`sZ+Grm6Jyz^Q5l;EuiQD*z z16Ht%KV!OTR=B{_?}?}F?@V*6j*=>C5ehHZn=hOUYm+L;clOr^Ahp2hSIS|@8bV)M zqG(v+*2MzC-$Fm~VN5-ZDHZX-R10d_3Y{sVbQqX?V-Zt-I3}F6EqMJxyCkqg@F^X(%Xo5IlFvF!h2P3MnCv4}cYYa9D+N+pe$Zs+e)|vVPBpB^mUv zy7jSqQc#vyX~3zj+I!LV$>>&{2Bu+iapg8+A3abyOxu*sLtEA4pVPRFw(ck{OG z^_wxxMiJ50s-o*$4czTVYSg}^jaXYRXFzmvdgINvrb92iFm29`zYo0s{nMU3d*WO2 z+Vr3Q?(e#y?~|YSy*RRdIQ=_3A#L)|Baciwv&-w;?kv&X4~idHqd1n9XCwxk+Zb&N z3{?01fVb0}g>UilSGXze9Yy{MdE8C%%VA*o6A$ir5_7a`ppjLHE}g?+X#~H9#Q=;b z2vyT8h-FJG1iP{u-_C4^W6shD+H?lO(b3210YZ9Azi{r{mlZ7hK76+u>&|VPI$h(p z-BQJM_RO)Ic1ii*FsWni4N*HA0#h;SAtc|*Tqhx2hf$-K)Z28)3x{5fsqs|S+P|G| z5wIE6@U4&*)tWkx#s*USEC?|ppyQdMxsZEDs>zMe;lIN1wTE=6SCE6*QXp714j(x> zrm%u93{HN^z+5m<_ZfGXJhLVI*zJ{4Cm(aKXERFS7%RT=j3PJ$>z2x+7@SKSRu#;E z1&F^4rb#%fQ=JD>=8i16!*GsAaWieAt+b6%XBm6qjQi-gaBk%f#$JXUI=CDknv;uN zT;T@3#WTVM^LEa-7`FnGS0D~0CFf*4%|^p0!aQZrbUt#4&{pt z$csQhf8&Dx#0mabaG6ii4R}>$Yc==x;JJKj>2tV=F zou3nY%yrd1+L)yP?uFZFXSlrt5C5+*MMA-(2v7)SMM4|#XQCA12^6iiYub%~-yG$h z0-{H$b5WWaohp1Sclau--RD9vEe zHu}q4S8=N3G-m$fJ6ck{0gb8MTI&*zf~~&b1sjf*i0x13)5pm0H8wlLAJYF)p(-CY zf`=rjA_j}_3l4Sgeqc83)CSL=ek;mbch*HX1qZmbAwKitSPuT;-~C@%(YHGSeE0O- z@Bc73SUdgj*%zmOIr#GQ2T?jc`NZSXs}byf{q?U;&%J(P`s$azIBi+GJnhY3i52$H ztFKoHxNtQCtW+SYWV@T`(uycp*-oL$l2hKg@Ke*x-}vU=cPi?C|KI-CY*Kb_tt6}_ z+V=VlJ3|k+)rEWj=b&%O-9niPZbxN@_4eqoBiTePQ%b4dgAYA8J@oKH)AI)pMH%_q z%9T%i;^WiaSWR9$crZNr*!187_h+m5y*c|P8(IdgD)hnit9om)W#k;`t z?w~zqx_|e#pZ_eMaov&o&yRlh%Wo6|%O5(h2M{4JQFZ9=ed*HJAxuX z>EOg^D_=A)m>|b9arq&{Y^q}?zjnFn0z`!{ZcGn8;F5CSoj`#qGfzE@+Led-+p}X9 zj07{k6;7#p!dNH~ge3nAW@OA%dt6?upzuz5_#;s4A)*e_QxxoEOK~(u3%BqVaG=+@50wrU_)@lIr0~glS>-GB zww$$A)zDiCr~~V8d>zV10E1kqb3+?<+|fYu_j=Hq{NWKCaLR$*AVQreUO#^7r_Bm3(e)-a;KK04Kv|;+mPkuT*lO?nJ_wDQS%meow zm|lDRjp@kIqrpq+s$Ei6!}ZY6&EP0~GDI<5$sQj|p+8F2!9y=j-;cuc=%bHJfBe<2 z#B#cMdOi8D%nmsq4};3kRdBQ*OI2!ER6lJ!Uf}sFSFXvD-=otjufCjt%t^&rIYdjVGg47`CjmzVFp`cZ_0v|Hk#pHzBe*}voK-M{ZW29`gxf5(#)0z2^; zp-==c7aiNQivylaKlE&u5Q?_KtaK<0U~LFwLzdSP@R-e|ID`_~lnR8b({b`V8MEWj zSQ{crD_l)`T!?Q;l=SSzmkrILL@<*MLI`1m&@f2Cy2?g6EDMIM(FFq7^ajUK-zfyg zj-LwsjV;9~AiH*MPaH~w>5UapM245Skq7||UIJC`s$fihEF?z%*y|8X`JC^VG7a5Q zWz0i5!U254f|Pn@6or9evT$g`fbmm8HpI^bqxKymg(KzY1ok1pq#P8*LZB?mfhiI? z6h^8&X2rmWT|VGb7lKEARuZvjVGa}+rvO)hh(g4|G&|?kLXtA2b&95Tr1(ik`0JL0 zu)c_KgGYI_TI89+Hf@L+Fl?t`Vy^7&pb#Z!p0LVJGBO=?u*3g66cz5Kgmz!jMv>1=Et=l^3PuS+Eab zp)H0rM|?^Gm}b^hmKHi2!YuhHE8mA@WhfZ4QbS-<7=3raW$TbUjfAy@X$7EoA5v>M z!1PW~p9OFt=Ff7ur1KKo!0;@UubeXR;f@|)?? zd+-wY6ielx0bn)X1#DePUwFrqk_hTL3tsipe#%29@=>R`9_nP}l<7!iQYJ3O*%Ix7 z*%ZmVvI?CVV8|P6>ZTvh%9J$vCR*3G^gD6kZ*|%MQsG58>PZ1+oq_aona*H+rQVsM zU8wmj+H`FO%0^eQ7%U${Uddj<1L!9Ngg(7A0)YwKdjP zR&lgwuOyqsIS}L2^fF4t5fmQ_;mm@#spv{V_`Bmb~3j7@B z?)|;R!18AwyZ6a5?;zjZQdu3{-ToegB7Xr)T?aA;yOeVR_%m$Gl+3zJqcF1*+8FA^ zOIe8$rh>>RHQo^hMim@J?W=)FD9eUlE@hyB^Kqc0jOZLzpBT~T%$^a7Gc{N}Ff;;| z&%$7`12zm_{+0w=E;xKEhW`U*hVf8R%n_f?3KuYAmXxh-FCPNt7r`3BSrfxGyQB^VK5Ee*evDlC|NE`o=`M=20>Cw_gZH1PQLS$M@M$~&N87*2OX$iU)~`Qi61Ye zAMG-?!L7&$HQ%eizz^i)zcvK3GUw2Dc^>_VkROav9pG~)3c+t_gOU>#3UB#7FdwF$ zHY7mddyb!Ix5A(uI2dWAr!ac2O<3LZRhB4q(*8#L+JW%)y<37OJz;Ok4=uo@`V1jI z6qx4rjPz~y1x{uwFnp;KIyKel{z|4)-pooByuuP#yutm^N3%70 zg!Q-o>4($N6Gx`6{Na~#vhB&~R5mE%-$nSJjq+miFsm3Jek|662;1w9beyUm94GZe5FaZI~zomqKsHj~}hS9lnGv zPh{CItJC|kb^M1u@PXpV*v0#uf&1KSR$3r0xce z`~tgsEIyPyOIC+pbChNA?SL`uPmZ8xO5LsYi)N{vQo&pX9OLO$74H6c&oQw4@dtK4 zxgkOVBeLTuA?$Qo@7yUM{51$NaR~W)vjSyH*3PfWQr&Cm9mh|EU}EXeS(q%%_3HE* zodO~l(Q0mtiUXMjU!zk;JI9cW`Y*DKBoFCm)L1d0%`ue&C+6Z=XT~HDjDP{Gip8VC zIAyau_~dMZI??7E0br%c*|WAjAC?2Z@s0JFT1rnVZ1y&>(uY9D{K(=>ssE-B_0@D@ z8~F&^;yDinfdIu&#di_Lgmp(>G<6IxFnf>r6ZTdSjAyc54nTQiWzAz49ZPVTxChUX5S255M1J`GXdpZ zTUJS5coszv$C;* z`344=ftxh~GZDmrXSk__NIRu|%4YRil}+gU1_Nbp7UX{u=FVFIS)VhU{5eUp4CCxXgU4(S}Z-6vds9}jl?yG$#wqXxzcL~T*+f> z@;B%|#gyjJxPCBS>W3$|5BW3(d38|MpD81$kowGZ(yn*Idyj$TkKVWINywjt458sr zIhGKeb65#zml)T^j!v8#!+G>*2>o1`8!f$~yHFg4XZmHVGRc%j8kZJ}$r$12oc6CU z1>+z*)(^*Ln|IWCFeP!096iycs2B(4i{Lq$4y(do=0-_IPSUuHbTIgC9pBOQQmikW zI5$L5I@L?3po9<*Rfy6#jd&2Wj$u?_8pZUEshsu6$9u41D!X=UF9XpuoZT=@qsblJ z>})6{98?KvI0KitbT}?^PhOo`!2HrqPN2N_fU%a@CJk!~MZ>b) z)lsxKyn&e_XAc$%2bc*MmJSS=g#*5HHkKkCAD(qg3ZBl{E6*dnHo4IyxOTfGFu=(q z3Jjb`E53h(qd9PC1x%M!i!uqeftR6hRgfeOW-rRM@3FLKFPb!CRFr{bqsR0mlr(z&T~AOf>l@Y*xMp{3EV(MuRMi z6hbh;Gm|)a1HXP@S6p$?hAB;c8kBoenpR%GFXdPo@#gl<>(;4CUEWvu2+Z069TYz) zQ-AeLsOp>g^sFQ)r@jz@35?+Njp<)LruJBk&=^W7S_o|Vy$#dw*w^2DdwTQ8@i>4!GVRXP^J}lYHocWItN!Ru z{xqA4?apRpFQ$D@#lbcQk5V8)M^IX8AjKr~sSBV_bg4~C8&M|T%CyO=uf3jW;d9fc zKKXmy0v;cAnBJ9a(Y`vXitLhV1GOY=KFKDTx5?i5$GgNSy8KKz7m~02iA;a7lG+f> z^vXv*{NW7nwgv7eET>Mit(KNYKLDHS`jsf3;T@*YE{D!81-Gm%mvhSMG<0GL2RaI^-m-wOj0hU@TpHPN z_eZ0tQbf`t%NWd=ZVdC=5-SNxV--0YA%ltQoCK)Ta|v*r_F{ zDV3x%?Lt9twggKA82D*mAQT9zK(=MMF=yE`QI=@(lr*rg)EMP36DBI@S$$b z{^3kpp_Mok8s+F%7(HgVDars}9{aM$%iJrt;RFX(G@9uCCCy_v*^Ut*v-U8Z6C#9$ z4lWSb>HzYV7sbj5)2L(!lE9en+VM-~Y?ropsrDWcgt~U;@OHm@_fYBLU81u~?(3Ymj8QZRtg8NsoT_XTVqSgO|PR zrR6u<>eZ6$R=28IMOGDABmrUuG837|L}H#t{eL^|Nr2!`Mb^;EZXEI=?>#3@#Eu<1 z_Ws9?9TDi2r{`6Ey%SpG?|J?f4Fixue*1e2u&YfYFbcibqX8y)OYFXOkPo-?@@%f9 z&SV~_q=*Aost=NpHR*f$fe|gQhvx-CL*vqTK7eNJsBiHI$d~*Am*l35zWOP?_!F=Z zS*&OIt$p-0{jdOa@@*hX8vw;^X>9?tqysq6gU@QOpZCGiz^=WYot=>kz z(z)9_p9)3WR6_9t>%KB692=K!a``9CZhr07UnAe*hsK&gyIV3y4(vd(X1^ z(3Xsv?4{m3VfPaVs^8el$8!F&0T=JS_kQUnefU?u`Jm#x7(4rceEVB(W*X#h1~IF1 zitR7EALj5mU}Ah?EFj`^_x!ITn7bJktI98 zNVQTrgMp`Zq}OJ%ZT#uf+jjGNcJ1!o$g*BCFrB&#WhYnVmcb1b0oqgy`im7ATBWZiT1-06+jqL_t)b2;R~G&(B`z#?EH$ zFYZB#nqeKzrbZ+2Mp)9D#N~>+7K45-fWXN8Qg+k30JH@8-ikoD$D6S-8k7YGuV zTgzf2!Q8;+qj_n3E@T?Vv=fDCM8^24f3KW%VsQ^vqA;LFnbJ{a6rGKQY^3HLJ;dlU zx+}k<5Mwy%WZ)=NKuDft#PY6Vnql~X99J3N@C_)VF!6a=cx1{;zIa~r zq$9c*B%QWA7AX&nz@9Q0rgY!9%5%m~(z)E@#ZpK4Q=}AIewGo?!P`b{F>LuxG4qZ{ zfF=su6i|Vcw2O=9N%@R6MOOwU^>*>txOxNOJHw4mc}ib-#+Xo2WlT~=Gm>p(Qpv=? z$dJQQQZjNczXe+7)m%(g)NS5JHy*ecFYQ#1^IP4~nkqMbY1gE$OGA24|M=bml?I}u zQ@;GFZ7V6o>6RDsy$4Kvo&!3RJ9xEtP+$ceAzl*w08mggZ-Ss-Afk=*TY0N9Z78S@ z=)08V;6}mt2Y{#`aICG%BNBOVNE!Jb8Cg>s{qYXCv;q3j(*rfer1%+m1VE&5kyGvM zZ-FZ16*vJV=T}f7o~S=rV8qabcK68(501RahGnw~20+LdfB_J=$1~8{E`4F}z4c4+ zF5;yEPC_$)gBCldlBG7{jou5*>JD@RPrMo4(Z_plru@Yl6nN2A;eW|h?z@cU^;I5T zXRe%^nd#2vfVdMUPj;uq&UB~8$7`oi8!(z=-`-|GW5l>iYZuc9 z7bR$~4bkM3h(ncL*uO84)vIDi#UaeAGI%;RZQPLj0)DcD40vMXtsc3VQHB*Jc3P!S z>JvlS_UMR$3{wO`F_aXn<;o0&rN@^ORA$tO*76KQF;Iq-s35=LfDj``e4!AXe*^3R z06ZFutL3=B3y%uJfG4(rr)<@=I(Ya^aqF|a^ zkXC)rBwuOpTi$qJDn(n$NZF0PrgW2a)g2!gGhjnq7Tn0&;C*w}N|Co``BRs2irl@thyU6gh){3S1!cg~hWMwfIxH>k zYHx-c=olFpsrpv{YWj)#>)(n%7al0f`(zmZc<+dbrHXz0Bqk!PYM^7X; zHL-BFu5~~A$zOE;{;&R1d0+#Tj6MePk^x%kA-2N+^3%f~l^$5PZf!T2<*)13udOMb zJ$rY>vvM+ksqxQBme}poC8dV%dA-_XLE6;8o^%yp?>=wIowTQ5`>6!5{^H$tGsv6l zUVHV`?k|4yqj)a%#G7-p+OPGqv`E@4xrf#ILcl?F1BKMB`kMrEmog;xad_axct(El zi@(nRcS-lJ{`Tqf*y1<)WW>v0N`n!u zC4f;FrU}-Lt|{ffvvDJy5-5H9)=k~EtcRaYN1uwDlR^+-ZPPL!03cW&56$){(YDOB zc>xHNxl9)$$sNn+*_K_;VEk%hwB*4RDe~NW;>7@f3;6&B%4%^YP02A9ybDJ2_T>-{ z!o5tuDMx}#=~+IkFH-W1HT3Nrhf4W-Ei~|+sK0IFb$F|B@E%R3i5OeTj)IZDId${^ zTogB@3lz#vnK5rwf682$lqP!Y&)~9itulEq%)2tE_*L`{02Z2ErlfROb+V5G<=o^W zu|lag1Hlj(IYF(yOQTHXH{hgWx9J^5SQ>dy9NPR+fFSzpaLZ7-jB@4AaFwFSJxZmg zt9>63>4o|l<&k{M{oC$2s;O?H9|27%H6=sN>$S@B(RU;9L1xA4GCBZ9(lLA#8Ghlb z={~aVSO7BC6IZ$AAMccVI)8gmQ@|$l6fO0NeAB*>L;3Z6504(_ll00qP-rJdFGzp9 zG5}3gItuDyAjNhFFa@*$6nsw>burv{t8aTwZsZ`o$)B{Z`SDKiCwZcWr$haeWmi`#B&-18$-q+}1fWyzndp*=(Mg$g=2iIA0}vpZ zjsn0I`ks6ZP^$b45`gRCd`ABhnR59c@gAnB%hQoj{Hd|A8n_)hak9K1Jhj7_a&n%{ zYye5U8yeBU!>~KvkkNRb#E+bex8bFiU&@{xSqT$aJ9c`k+q!jo_x5WqW^a#Ep(CEe z=qPesmj)X2{rT})SWz-DF|WTnv-|d|FQi^)yL-uVK|q`peg!&uJ|vp~ z%rBWh^xTS_K;i@T^$LeosZySPm^Xc{H;=s_qcx(Sbf>rKP3aSLhNVQE1 z7;On45|!;#$s18NrcJJAF47bVp=PX1M?@lO_f)(1t%w4!x<-706(=>SNQYiGI$DN* zI#VU~!Vr|WHX}A8?-mT2&K@D-5g<_%OpW;B^0Z2Z5#F3#_H@`)G1y!4eM`J1rXFko zj~3|wrSs|d0#c`<@Bj)(n35rUw-R-)>hjlnBra@|Xhx4b~WoKb8urctB?pxP*%0id+esebt9efe0C!LVyy>7z9f$_j=m(lT)fxQNRGVksu5*)@%Tj{8m0Atn?NBjYNugf1b=%4 z%g{3R47GR;?td3K28ficehjCn7w_|8ptaSbF@iBL&0wmrHs8y0vclUbCc`^M?D7P* z@T)q%LoUhKw~fH2#*Xg(mWa1~Qc_l0JGO0Qgb{5BLlaXnO&M5 z-m7W6lp$YJl`E^;eyhDC-hXIvGz2KJVDpb~2s6FdLM8{Bide-~Cnw zEBCsS0VfNp`~eU_e+2qLbhS%6>8~df;CeY;YF@4_Tenudwr}5>XW5lHUg-x}agydK z{?^7|VKhl;Q#6%T{ga~Vs%QCjH?llG-i<|zmL{&{LigK`KFVfmzpXxN$?YqzywYvV z&ZzBhPCfSFTa^-8$ysP`5=y4?NqTM0OASPlm-pZORktb|PQLlZYmwP! z%Oj?ZpGn)Mhh}Q0^<=$YEouNz-&&n2!^$}61m%`?m-_KfD_7Z1uRk9LhW_QZpFgNk zMb%f(WrPBtsJM{C7Z^QU%LZh#8L^r=xszzp*$B3c#{_cPx5CoV;dHq5qd8eIqcEP2 z`&n8woEQ>Yx>Irt3q?y22#7Rw5V?s`iJLX`C5#jvqt30nHXf8!i30P4Aw`^Ej3X{?kaC<+! zP`99MUq?D@M&3jKdjbi@eu(DN!N@#FF-rao-%GU+8ZH8D4oC(ISQ9y zlYej28BzRaK$;R4pnIpl1|YXGfJskf47819)AD8z&}uZvD`N^nU%Qx*;)P3BtG=Eo zhhkN3Ki6&KPucZK+;_@=lAz2fYx(o!xG4R)G)O*{8G8XpWRRMoX-Z#rW-!sCeHbL( zBJ~5x02AM}yYIYN+OLgL19-A+wXLhA0n9>nt_BbQ8b7I>`cJ!tPKu|1N%-z>y^+7} zH;{sEFXpYt5}G7a9S{8He+H(EaQ)8vr6{#c151@}$|%5-^o@o_8^hgV2AT&29c13e z_W~aZ%!jdXdFDG1D6f9WVFMQNl!dnd?Txtn8DjZpQ#F(ZfAnA8r+&48I;)4Y`9I&= zG^KiJ7tesgKKN9e0?$bQ4w`z}fJp`f@BmEFZaRTHnZog087^@d+7oa4w)R)w;q7wb z>x~#{`2f<_GHTXm$soYO>&dvg044bt@d7(W;6N_W3mholZw3wB%0Qbb4~flC`g^PzcP42YIGl3Ro9f}jp%^W0R;l_1^6<` zf-J9OzY$)abMX|5OS67eJlc}zROu!X=xMr0dS|XwTvgg-@9tftOHQ6V(Y<_NPo`8R zvL*Xi%8q9+OK_jte<0qFGXWL}x{m>Ukb03B^(egsb(u~=pdU#Udhtl@j9{h zN-U`So60;$!KV}F zXF7Z9y6EhU>!ROyQR%A;5Yk2^bD@hY=x6BsqKoeTvP;RW^e;>C^!^{414BQ3eec0K zxGnB>3XLK&Qi^ocdi3$Q!!BIS1~TbvRoil8r}$pz^}D+O#ONOn;a38TIubgf!33`3LuGEzpzlr+f( z2u9;+8OaHgtA-O?o4@we0Ib#}I2Q<@FzdXWl!i{5w*ezS6r}{9Tn(MP0Svq|T3Vb# zGY||3WzX}$u<}At=%!a1fQi>4_tXQ>(>Zvbc#tStik$KRMn*D9;Ko$8P8mH^-x+{X z0LfqH)LuYRUQ4^R_>=O+q~6j}+URNqp&550Q@?djv@vV~ZRN}LPNAWNAyGb%VT7%n zY80G00&^9(n|B#=&(wz@FXJ43*WZ-iJPOU|18nlPf{J{iTe&VHXMB^7x&cov`IIV2 zTKRiVyEbE=_NA!Pc9b4XC4C!-o5IO|$*c0oKa`>Z@TTP}&qt#>2GrGiK+5IpM)IT- z3pAwu426rrt^8AwA~W{@edTC#bO9vgWnk=*i1Fk_YX-Fe;Q~8(2cH2DjG>^*+m`pF zt^D&BNa>9{0}rsR9UZBWvGk@2v{QRWU^cH$>h0hBVZ<5ydY*fDE^fpg4}dg2mMoXi zXv?JW8qazF%Cog72LAlaxx6p-!x>TzaLb{T1Ab$)e0aymk?-mW5R*~Us=bWz4(Ur3 za9aFJrijf4t?KA1x*`MiEiomjOfri;aSy<$zDdT=2wW|QteOh-TiF%t7@~d0NA%sz zV9f5xCu4Z6MmlvmD^vpfO}F^oyu8Z`TwDzZc^KtlWDj`QOiS#Cysjpw6W9irre~(B z-47ht-;Ixrb)yOBf91s&y8rZF-VF$`h5LoXv#sy8Z5YXEwUf2Fu=iB;vZ;;wcJz!a z=_gasyC+Vbta@(QyeV{MDR*>=!}EA2#9|e1BBn(C@lP=_|1^ncf3ccOJ0jt32Al%u z#R!sl-ksCofhl;?xa4}rj_uw4Y)ZB|UT+>~fYw$Cbk@U%ZDrAd04?35_A=3&p}hcA z+ONDK0qGY4@Ww*($jE57D|tQ)5V{e-#}k#xhVC{%kxx%=NdL_B6oh<+9sl4zhThz@ z;UFVP2^*(z6`qe#pNP;-=kJwxK^Q*w7y_{$7qa?=QfJV0B#KQZXB<|<&<{trmoKv# z=3&K& zMw~5>kmAR`-hko_LrYfVuT0ORr_2~VOH6rkdL;@xm@;F8>`!CkBJGrBD_Y5xwB$iP z{pcrE$MAYaIDk6kN!uGZ^k|0yExAvI&`?G%^!dN?Nd1)~J$e}*7e%W~^_GA0I^>zW z{Fmnxoxk37Z9>DSp7$6^bg9hFY;6Ym6BW;Ig zK!W~`*Xm?je4Dh9dBLZ(c~|NqPnWjRhVlo@7v*CaD^F<;K(u)QzWi0DkN$Zjv1&7! z>OFSyRQJi@Pa_juZN-jH=$ncMq?-+$*@-MLFQ;tiST9;Of6&e<X2 z#;Tfm^IH55J!OjT%-E^&2#EvIen8N!T|4Tm7%PTMxrieHn(*YA)YC4<mLn z4@@{q$Y1qOH@bR7w>ESdmG{O=9R#DAU#w}4fRKfu%V@Wbw@(>5 z4x|nIK=r13#9Wv=E+v+{DL{d{H&Z^Pg=Z(DIKIP(06MFY7#nTEb!i$(K6q`_54wRHJOw5w8o)_< zx`h&?gj?{fvv!<~({@I!_=BY5&ejgv3|Nylxv``Z{YIbKT^dHHx<- z{3|7w`Zj1J3n~JX>9wl1GcoDLFtznV#1*pcgGPq-WY3 zO$-TMYQqAGskgs9IB4{Q-hn|RbCz9-FXAQ9UP>v0iG+NB)&fb%Tf3uqeO642BrsfD zlh_Y!;X;#-au`_vP+iey{~yl`9!z04L{fn4*x1BeKkaEH2MoU7=#d|5a=+tVap@>TrEU%VAie>TCL=d)Av@qiGXkCQpKWp#LK zS__EM2VCMYu8K#?0KxR7{;Qqzb@3zey>;_S>bmg8e$$;iByFcw{Vu=bNLAnS;m?^f z-!%@KXJG3PK*|#y|Y`p&!1u^B{#!$rxQ<&Zb^dvlk2LL;RGfJu>>y z05CymGDaG+M%wWfm^b7h0A>UiS~ixKl1?Q1wakH@t1{MPB(^?Y4zj_REzJ}Q&j;lt zh*aRICByWiX#ky6nGBqzh7_wnCL>v144oN0j2$#>RSCmvnJQPYSdk{a3W*Y@xB&)A!vTEiML7Xe+?!2NE{Gd-wpx-Al@A4EBo)n= zAl_`E7T)q83GfX`@e(cVHYRwtm61IWlBV4HVajrzmncJkh!K+BR1-ytN7YWLi|zBd zGR-f8VMa5ih=3!&gA%5=@r^N2*}n5V?ygoJ%9^63%$3QQQ1}c#qoI62uPOZ)%cOHL zn%aXh&}Q=QJzJF~c?iH$JTQnBa#LldE&!4CZe=z1dV#~_sZDz$2?V;&Krt50-~bDy zX6hfxwG>wr?IUCS2!JSq4A*EYbzvy{)`s%dZv9AHJqwr&BJ|K)0dV1=`l?%st~LV< z^brg$Hv3wV<@W%b6rrIv<+Vg`yMENBxGa72T7lgG_$Yew9{SYRN86ZsGc6%608!g{ z#&{NALRgDQX(LJRNN@UuJX^gXD6`SbtE3HW0A{i@^-Del;_?e{Aj^yg+H5Mfb<5`N z`0Ct2HU}TCq^Z_Dc+3Q1E%af;PW-{PHclt}}fCd8= z(-NjayrXZMp2Bydc^*u(y!GnK-QWNG=lT0W(%tJmId&?P#v7Xa-hSh??)cFo88BwL zBO7>;AD_psUWVHJ=7n|dLG<~#oL|FBv}gD3^2)rJX&;BoefsIAB?q6$C%Twy^q+b* zD6mBPv1fpYkF*HFw_v0U;sjbZV6I=2C-E?*ZH^v2T7#S&J9c!>J-4^pwtYu<6uS|S zdN2H0VA+0nM$qt)Zb%l9xs(!_Ih)|$X99Y@`}P}=mDSy`BgZOT5#SwdS7{L?j~>YQ z{n=|!t&-|y*lrehaxmu5ku(HSGyhCx0IrhmeQR~oJx64^1IP(*`RYLEYY*FrL8KMABDd# zWgI$wsylRIth_X0Iqb}5R9Qwc3gmv=ij*XQC(K;Zu}$?Ch~WMf?=g0Ex>n+t23nO7 z%5Zoloq2YnW%=3aSlx6az)GO!g3JStO?o%0gyLDL?d=0n1O=~)!MKzTD1i4;@&!Hw z(4vI_1m1k7NX?z&mCceUFx?Ap=blBuIRRN7Iv{0DLDv$_)VBqMwo`#EeH3fM>*1n6 z2Q=`brV7#^{_~xJQx3%nT$DkIq4k?m$4B>?TRo%qv|CL-qSbvLmwHJ9)Fcj=H*POd98hvS6L**;L3w~<{kOw>J5Z)@Pq-CcV9ng(E9iV zgw^y*_zB#p1EbnJLxXyzZ1?-THaH%AWTXtUSSi8CEqwzZzN?S6DZ`sOk=r#{(g}pv z)swsc0sAsV0O&I!0Dc)XyibLBtyCDW_8f1-8IaQz2{5+;=2k}aO9PCTW`zhk7+*!N zT(!EUdI9&q#?aq@9z$hsB6R_5#F7CB%=0W{+YZ;b(jQ+D_7+tcmZv86lw$tSf_>EHe0 zSK&iE!~t167~jceULSpOB-2Pa`8zcA(tpYn>QA4uk-4=6d7(SeN*YsOY)QYXc)4 zkC$bQ0L{?U8(o{Kafwpk(DjM2iEJP=Q!dRK*~Dn_KG@r&3~cJbz;4JUKO5q{<_e}L zfroM#r(}k6Q{0Gx;wUnEcHo)wb_Bhy%E)a++}MmMkHy9SHAd9F9E=(TZn-adfG9lY zCD0k2g2#Hw92eI&`U@ z(KBOD=4u&g(zqxWbu}s{CwNN+Zq!*2Z8^1c=qe){nv|;z{bdNXPfwdEurl}qnv_{a zlM+gL-#zlaveE8)|Bn1TuRQZbxqbblcPXpCd~d(=Qr@jgo-!F+RYv{xT&sx*$VfPC zXF5D6d5N3AvWO>YcVRdR8Bq^)&FZ=!1CATO|lmJ5k*B@~G02xw_W7 zxx@GZbYxS|qJ1S8+}@C`&mJNS0#6G0aN!|Z7)}@Cf#}xX5_gr+JU-eu%qo~<-mOVM zp%rKVzGq9)zXYt}hrWhHm(+#ai*Mi! zso1mxl0F;n z(X0i2-hAWr?(m_*-T4e=gkcaJZL5sOm=)k4h++bGJxqOw_gI@a48IxK<6~!Q zY45X90;X}i!wGyTu_J&9g-Kyhf;MGJzb+xOTyrMt+HD7K2UL4z$j^wzJC@g(3R;u0 zOi3`<3~o(3O$Yo?{-$oY z*#TE+85F!jgL}!Y3|kBOY~?o|J&KbU3gDBT5vELIDT=Oo8L`QvJR9Mi`Y5}=L)DE( z$Y@Hk213-45?4;;k#yRse#%5XKqz=I&G1oEX{ z@nR{v_?|Rq1;$*B9|^3EtTK|O7Of8R9eD7(ztD*;_rwhNNZ-eP02p|VBxRH_M)yGe z7-PT(*x@}fN+)CF**!aYmiI4UA~mY=fsgWvgpTt_+B*bs<68zVTGcnUk7&cp*^-=sEGWgj3@y7&s@ zg&&d6n|IrQq6~TD6S&b14q3yWIzc!3Y0fIETK~zHnVHG7VEvg?{@G0;=OLj>Xzss7kO*4RzE-Yk0R1LieHPDEUy!J|K@N1w)_6~ zzEcOmz4wb>b}z;71508!O13qQK9HBa|Kvr)lj4gn4+anY(Hr_t|AIUPwTcPB`xd-= zJzkl^@q}DT5bN3kB-+(9jSRIbS@xgPmgi^H(W30I>X1Nfap1u7r3VdC_08JuJ#_;A z>BIsjS(08c`rh0TXuj+ z%!GIZM%)R$=2$Pqs5%+gGdml4cI@PtEK|<;GXWs>0|2^AlK^7C1+YRf>j<_w=j}ja zFV<@_+5v`mi+~k-t6WRaFD22(7@(1B*qk|VV0Dn?xQx2Uat4VZsF8g-k{FeySo^6G z%XnLChR%ZqtAHq7Q!T(1Wx|jM2t=!kVlU%aMzXDYRALN16^oG z3&5!i-X(5+eaT7--Vd_E@ZqC;yyLGl406$#XY#Q$-)3i&K9E+1JN(`h0HQr-W-kog>1o_4$QwOg3p@A2qfdyVcQ-%Tl@rWd|k{2=q zoZ}0i&dZ|yrVRlVU<9qj3-tg7lLy+#ll$t52kKZJC+YAdax5Jl0kg%+qzhA;S26d* zI~_kY7Gr#|n@;^LljY4F%Z{WD1GH2W7}9n(u2~J&Rvqa-wktQvXDmZVfhzT}Pm1^p zG6T$~F~9IKf*%_j@7{?wgd~c)dyx7aKa-U*8QIUolk#TbM#d(_YjZS4cpy?$?=Q^s zGdJVG%zh*Ipl`kZ{;#_?UVE)pPaQsVsN1!3XWDFW@tVA`5vFp!=*pww`xiZ{2ameP z7bbc9W}3HTf?O?_ZwvSAKzr&$=(rWytmxSg-nOabCNug%I27JvC7GTWI~ChS>>{R{hFFia2lhy0ZNyVq(lUt znm^sKVYJ(}X?=MIczLWkd6-d2EjtcfSE9_WM7XVHX&VzgExzcrM6W$0HrETOufl~xJcNwG5X#^{;-Y8PW z<MKD z<{fFgGo>P3(U2Me^#CT|st*IkE`6c#vw0eP*UrF4FGt$ahYoNajXuPMU7myx=a>ak zGghV9*8$ihXZw4knn3&ExXLrN6J#p_3Yz-C#FQng{eS&XK5|Hwj3FI z)GhqTzYTmX9e@bHqQx8m)#Szt{_?Jnz1F_1j&&Mr%90Tfq+I>5KxXQXUO~yg2+xoa zcLS0Eeeo>Vl&m+n1}!|NAAa;ncP7&?HC-IqPmPTSfaKVU46J~?EnBywpC(8+<=qO6 zR{Y$_yAJlmtGnqV8RrLdi5COZOpE5^;(QF6YU(m|QjbTUcmpD9ce6sq)X293L~aJ; zEX@hFC&nhLjV!NT7@GgtPk-F~=fBNfB9Vhf9=={8Q}mZycuiLtB$0PAcJ%1c?u7%- zWh!qZQzF0ZHsu80S6+TO`@1;P%YZ8bs+8lD+Q+YCGuHx^1I3T+-rcAK`vmm zV8P;mkxYGMYKlH1lcv2a`^IbS>?&QCzv`yn7&LC&urUGETgv;^RwSiwBRGvyZ{?qQ zKe_;nDZa?;>Gfyfz|ePJ*m+Qc2!b@~67WP(n(NhBNsr&QY8N~TtTaWPy&^JVA9Zd@ zJPnklCS}|$5fvzAghnwkMihcMMgV7-6(A`K0D)F(l|%%IF{g_J0^ZKH@O^L@FGn3W z9!05>0wz2pcItzSE!VBi8w$!h5yjy5^2Cp9*|@e8G==PxMSw#5i>1g68O7tTX$l9z ziRG|Y2v0|WsywS@$Pv#~@xYi?vGG~kUxZ;yj7a!oHI92mI^syw&j<-`2{`a$q)Fi^ zM+dRmhv&%q>O^U~@Y2WWo z<^b>kI}DA{8-u8wc{X@a&`Oc(h~@dnGXRRA(qjP?$`<%B(v(ldZ=}u8$S5e&oH%1o zH{gl7@n$KrH-NpuC@{xBBwY`1N-5-BK!ag*F&d`Hcu|aQ$(NNS-eF|C-}sPGW1dm? zRLYzeBg9y0>tYOzRtt!v9CC-}@*&@T`zXKi$&cbq%2FqMuKML)-z6@Cuf5eJ>1tVT zB?dgQdWU>Rv1-?PrznZyOaB6N7+;t2@LYQ{G{Bm=`HdbxLfP(VXYp0q4bp}RO z#W-LIe(F2*J$VTTFWr**@a(OQr%*jw8>Ie&wzi$Qy*f-=c$thjR)N8{q62@)1ihji z8()l?8()e6DL+|PJ**OAWbv4+>OVErLnq|E9YgU-dl_6^&49_qjFXvu;qA2@`@8SG zU#nJt7ya6b3`JbLIFn`Z+1M~15e81c<^2Sq(lxwyfX4mMcs~QO5YhUT_6+ZB;qD~e zhxuI(WSEcBg?+uB7zYQqogE*~*)rR+v-7&#&wwSmdOV&Ou`1K?hWzk*->qF*r*hKm z{KQ7|gNJ1CdtKz3eBzPz7Zkg9&+a<-?!EWkD@5$floZPXjTV zz>&k}pFdFT;b1>`4W<2gUgxe9RQhx7#?vo<4h{^xuzAfv9t)k25n-I|b-~E#2sYfJ z_<0s62g(dMFbZFj9Y-kvosvPaI^{~9Nl&3HO9vFcP)aZjY%6vvKdde>s+gRfZAveS ztgfW#fry|uQRl!xv`m2ut*+*U)qs+3?E< z)qRKQ0UVR((rKg67|#^}3p^TE0^}HQs}Jzbl#RJ;Qw$7=i{b;?1fFUSO5ez|0lK@j z4A~TieNRmDjK(V@n3ghUENb;p>c=&WpQe~hCE=s?qolO8a)AZ_K^@BAhGC4PyeK5` z2x2{qUTKnMz*;^)nhk`=PVX}JPF^n~VRSApBXhJU`*ClM@9NAD6%Qi^Xep(b@+lB* zbcWW5N5%k)``X+V+rXtXWXyB-wZF0o$b@R&$ub2l9blsF(&2@=dd3^Tz)`B6_3!6r zzRRz*kKe$AJQ+4=T?KZMhVla>m8;Z;X+KK7AMv`E;_{wPn~uUC1{!DpK=7?%M$&$+ z);6X-+B5|Z4qmB`_sSy>S=1H{Pun)>4Gdapx-_d8fS8IiNj-oOUIEh!eWxdYEN=yBRD`xzackh7u*Rb zxyO*=iFU26-;>A1ct_9B5z#&3JU;s5P|<(*)1%#)>;P&iX6@Sb1@JFtUkcXO078rh zTE&dYp*c%?ER)i$-2d}!nocc5WihEVLC(`8 ze)9p0!?+~ zeoBC%WZYL~T7hT9a#X^-C<^N27~qgiy&}W}#A&;0W7sJY?pFCQz`X(FHKAk~JIaLu z0%8adu_5$?(GUA3$P;KFOr^+@&KU!iCtBaH9t@^!$8E(9Fi=#b2-2Z+e0djmGO~)1 zQ6F&$L&=kZsX9g2feT=RoBC2Zff0(|;z!PjVR7UdbgZLOfe8oq9d*f!5Nn!Q|pZmLvSlS7%e6K0F&;fvIFMPs(N>QAS?^=jV z0UUq^qk5*03m7_R_W1KBd_8K<=g;>wvxY5V;^5o5r9L6Nzzv8Zp8%NPd|>_KLr1#b{Pwq{v)=#p2hkmu@_nc~ zJvLqgEPZyzjvd|EoSw_z%a;dg!}<+1MRYR&Mtf`L2xswK4A|YfnU1uSJO-OA+u>PW z`WR5#-gsoh^@E3@$xf?wr*-Vau3fwG{=8ZuJeid(3l|LsFr2Op&3LmXawd&cIC%GU zFLH-)I!93bk)y{mHFKbQ^Yz!e|Lx!XAK77bv|AmIkCiI+BNC6t^YY*U4@z3H@Y z_!w^Ub1Twuc5T^MF%}Gk5qdRN6u0;Vo)FV443trUypJZZYa(8O!`YkTZ2szi zy#NgO?HT^!Ia&&E$l3(v>gT6e#zD8>DOJ2%O zVbv!rTV`;qu4r_4w*eZ{BxDizt>E7DOW?_c*1A%dI#+#juhEkh$vyYgyW%0zK6vG~ z_EMjkj!OPi@aolTs?Eh_ar-mYEq zQI-$dc~;IR(3E$T=hoEAWcrkNEouIj|NY;U>)yM6_w&E6T{-C|pp3EC7dCI++?~nP z1=@ia(*x+ga_wT;w>3v?W1jjij@J&TzIP|qa2~IiU53>;%jKgt8VG6cX!s^=?YBMz zAkJj!=J^BrYcC=|?_~CLB52ny1)xNaIxz0Tj}L|ZHpuYyKq~r-f9GGaN~ZDoZVt@* z;KL8Qx88W8`{|E=)csHY^Z(jy+p;xY=*!(`PWv5Qw;?Nd&L_||+ZTjpAMZA~r74wO z()FeP7VxP0_Nl-6FY*MGl3UwADD2ABE0K={wfpM%b5oT;M50JRLEjc$<&m1o`9=0e zc_;g<7-&srbGd~9!2pbSo{DXX40y5CKq*hJKNSatUfiH^K zc3nIktMiwE-kgy+P$HIrF|mI~ovfShraEq9N`t3?SB1;&a;9g-GUak1!CjWP3i7i1 z;ifDBq-d89FRFv&bV$Kk6d^$XG>mwiK3SvY+}n_i%hGdezHehtuMW*S!BgU3IGz=0 zw{G54%G0icJQi!#tghb-IztJ(08{GM?^r3=Ze@Up4Z+lvF`J3!_ug zMxo7~AdA8)dIs2<%7$YV2NTH$1}MvQYuDBYvAitRdQmHRz9NR7aks@gWe3PvR=j+9 z;&FIFBJOD0v2Akz)J!@{6imR2ksyPnjQ}<=7X}f!$#i&c7gtM<%VnMCfI*mkRlDmZQ2Vk0+5x% zNHs%`z6KHsC=BS;hJM$*q!06p|JAnP5!xB<+Q=#GRXj+00T_LL(lRp2p`4UmfFj?~ zWOFC^yHB|_4@etb0Vwq>W#KV^21xbvwACans8l=oPurCeOrAVQ{WOJXL-zp@bjzdn z*!7?bPux#~-}$aCz=l;1XrNrBW6b+iYYXG47_Jj%(JQ@hd$0#~jNuw&Y0ae#+%4QAw7@4&%y z002M$NklNZC z^U_Qvw&Fx>K$f7-#1G*8fBb*`bHzpQfHp-CHzFCl-VEK_8-T z3M~>(*`E2Ol2}4U4+hUwJwEcO+Q!UcAwf>qdJjmwJ@^{jMTY&Xe{1 zs#kEQ09wlO_kQdQaBLGj@qy&yS_XkA>Dh^;kAI#G-51k-+VHi+x@_3EDX}>-jq7QX z2azSTSDzWQN6FAv5%%=iH|D_5&)z(6kfG&<6b!~VME%7((Y6-nEnqAb`Z~Ks#+OZ6E=hn>$A`*A8tWK#FA0h6b=KLcDTQ_eg zL$6~p&_IioDCQ!!<^;*-_wDH>vfc5OjAT5wEi&{f&8Dm+Hw82iHz^}eiE+6yz*_=N zHYd`XF%`>DQ#Yv>#UeV~x^cm%Vp*iQlvOOimZgPu+4IcuWX4~;EyWXPXPOBZP=Ct4 zMp<#kQj{)Uhi#dPS(A~ajlM`a0k6Q#`~`$;+=LG+Ezga?=4k<<){U->t2HN4rc+U} zMnViSgT}yBH%uP40^k^a{h6V*k_lMgJfA0!B@Z0=zYKqFO`pY z^rA^RX&8IcK+=}-30>%PF}8pLntE@G`s1(Zf#y9hDsJ`hr?gVbsJ^-*c1sG70?MDnTW8t+RJ8hwmDy(gW& zB?AExZThasraorsQ(q{6661poF>D4I`WzXyTF#j_JTS?)ytB7cpDT$2u;kW&s0qAYoBcVQ2n!4)>y$6}^iB-dh3=JCUdg+O&$u0L zUVssOrGfRUiW|~I`G^O>iwD^Yc@AxzxkV=chBwLpC!Dnb|;S? z&+3`=-I3!bN*=AAF@0Cl_jwQSvL^?Cj2C$LrB~l+zcj&DKk#}@EvN0<29VJS(QmK= z)~m;pAKpe@Kz;pmrm_TIKeumR;znkp$78=D!*qhF9F&t|e_LGpcR$+&~L&fm3xE5C&X>b64Kv*+^q@Z<-GZfQw-OmNNgNb z#xG?p&Zx{P5zBCSF}PJJP=T+PvKq%ut45IPGnzL7t^SaIYIGeTWJnl3=fy~8Lot1a z5wOMl?yVbZ1&|YiDK#6QIoxk5z{?ymqbfj^0uTu=FTARG-Von=Xyi@sZL}lM&ZRTB zcx=M~0uMK6p#G&b2?4!4uHH0Ph0hX9}f_ zfSXEbqm(k(X;ZXnM{Ub!xzA(dy=LSFqq5{nJ_nkm(U^C7dc zb;i6D6cCb6a;`M~x(a}#eaa|T`R*H8wrr$vM;!}fgkExqkJ?&!E&mo5V|l6HWWsye ziEOfI@|^=m;lDDA2}$UC0|9P@9(>FP{rH3)8G2S_`kC*hNwhU(ED4~{1DpnA2EXMs zR|fv}bp|YYIt*vfn1IlpZyA@m@}5I`j0{*@}&EJUpO5 z=kV6eWEy!@wv#{d>EY;(4b|^zQ^wT!R!*|TL;8>b+?(J{Q^9vKeE}4$N}So{1fbuK zt}7;LsaWO$83UfjLTB(4;2m&jDoIQgkMNgWf}43It|l0k9@?{KR|a7VyI+0yN%U*; z>|RJ;e&yvCvVYRK8fXBN_llWcmg|?_Cv!k9y=%wU#Q{sd`t8RV%x&!UZe5>MK^H4F zdUvw3HCCp#+^lRO_7a0LST>9=;`ZEj>+r()-Sz+m zQyMM?769qvG29oE(IVm#X`Zr`=fF3vdg&P#9fZ4{AOR-;z{NasSRAG2GJ@8bYHx!m zvYL{~JqFIGlAswC8l8EE!lK+L1&ZF>wmR9`+_Zsm88}`G`ATbgpiOn;S)~`yXKvWm zVqS{HmN|z8(^3~M<{-Xo>}GQw2Uh_D^3Vn3+jj*(fLUD< zdY0vVK&fvp^`eY{0HbN0PJPvr=`?!0llf^p)b{EDe5eaUY-IsMVM+{*VmH)T{`iQu z=p`FGH`;?7q=bKZYeN+CYyR+e&}F~rSXh1b5H#X_@q9K z-!Yh}H?UDOgl2J!bsSbR|r~h7482Q~i3!e3XscpK6EU5dO zk&G-+Lhh-r_i3kg7r=;Tp^QC1Vaf!@lSJZr^AP~ikBpkp>X{VksVsm>y%|OA-sZuZ zacDHFPxu%f7`ZFADM$GOU~+5adS4mt*HyG9UG6u!(k>}a>;k!#mru)AUcid>&HcP1 zzxG~VMyt1HJwJ;_p$OfzvZnZ?%;XVX0DY!p0$;Kz^8N$t*njxV(+Hn;gH8@M(WohH&gK*9?wb-`vDmo9nW%GQ^=-c1gjrT z@a4L!t^hg&jgwh?;tjcxGL5Fy$(|JA2=LD?s`qXtU^oMo3$rub$^afgzSB921qg9C zoqlsK9+Rp~STxu4IzY-$$NDEctpM!Q<+E`i5fZ^f`}d=gysp|Jhj6Z}d#joy0|E z-l1>*G!*|Kwf+D0yF+j9+kCKG=21L$N(C0|a;c*kZI=OxF3oa|S_v>?Vw7$KCy-S> zye4?g!04zz$Vh+`+RmN3m{H8_90hzM*-&l!<-{)~J-S?&BIv zK?yhOI4$3OK3Jn<2L#V-KDGkF0R+AA%bat5&r`Cj}C9cbYl z(M~{ux)yLudz8UV9_TZ|?hPo=0qFDw7M(tG(<$LG1By>&Ad^2MZ0b#U3}sV8m9Tj0 zUF8wTYLon}{8CK-x(6W2lnB3-=^brk0Dw>AnJn3EUERy8lsW;4fDBo6c~?Cv-U1*< zzU8UVUU}AE4861=;Zb>MPdt;%d-5n=Bp*By2coX`BbRs%oXe9OptYq>9{Jq>Nh?o& z+DVyJud1u2O#PJ=+VHXGT?2oSkI1U|d2JF#g}+IImV5wF(<(e2!1!!-EIoB*tPVJ{ zJl~c9WK3GWfeKJ>DmqB4!is=~J1PI;!$%UYcCiL0-eH{eb2@@& z@D^DtEs{dq%ja(TJbr%_0Fi|k1i5R!>(}EM4cK|{`2!`dzkct7ZYX+=Tu@KjbAa3F zcy;P%22A>f7x_W=!?aILLjg+3ZhY!|0{B;DBgm2Br&W&viRrkBY$isI*98PRby@$b z7@NHNg_jH)m%i}vH@*40K!|*!4!UO)ACEA-cr}~qO$A8mYl3`L79f{b8m@VDLaSHw z2lD;&`olRe^t~5$93;Hl!KNfAMDYp~ghT3V%q1>_>o1(Ib5zdG#NY-T*f~`IXB^nM zp8(ASp)!>A581YEb#0{JTXGdjE-jI}*WxK?eWZmFxF zQ=5)i25erJ=LKkJM!s#Z#dBlT&Z@+*P@XyugPt!XP?UoB5#<0sMo|n6u%k@DxIQ%< zlzN)p5DW5b((BZB0FZP#I(e1c?t+2sAbpuSeWhrq0i>HG-yNq@C zU;BLo;3;EzCR1qHLSAp;{aBn0*gOX)lxd?lJdi;Dqxke#3K;zi5MzUG^S*ck9B5Y; zV4z-<5jpa`+Bo?zK-}2e>pC%`CogpJFctVo{TXXY{azG5BWq6(-M>vgw9`|)&!fU< z12{!*2-g0zhJ49`zwQAT3@ZBZiF{Pr`rV!v{pkaZZwz7#d9!v^PwzDYZ`vS|ue`jk zMETVfzLtUtecrF~k^(n<_r84cxOn9qya&ecNq;P0B0(YThxkD*(8lYd5{JX|sEXei*~o#I10neq7X zRP0B9Jby^iICRbcgPfC(0+_j#K5<(J#o5ZyU1 zU+S;#k9~?6pZSMOr(MENBljKKw^iWmfA|moZv+VMLTSq0w{LHGCwQuh&d8~fzUg&8 zujGzUf2uCb9rZA-@32D`=DMcC>+o8&nKxyVf&s$Xl* zK_C4ZWIs*%1P6wG_|mR}7zr#e8jO66f}+I4TCB_z3gyN4@JgJ_mcdgQrC+>yBWEdG zEbjy0U_>V{X>|b{~vc(@pCc zqhuP$dv;^J9^gYkTg`)Ro0Exz+AwN!vgM&MyR|Y7QE23Yac0~Ijghr!5ys7^#&cj| zNt74*?9;&$La@IB*lZcV8hd_q5*`|X<357@PhZFRwV@hYAf*- zT+X(}XB_Zbdgxsc-tCJvf~9&7n{Ht?eTsvZUu@m{e>s$ z4g^%7X#$t=L!Qd@O#1;irMyBbBg+W)qwDevgahc-Cdw=R=e-(z=ZCh&KgJX9UehuB9&}&LVYi#?{A%KFygn z8)9rP$76XuaS4;zdt^9+muzjG9a2qE(PdVUs0-PZmfq3l^iMK%^AWtwB)0}$Lrmf{^)PxIhI4toMJUcagz6^^g4Mw~2j4{;F>z@b^ zVyb6NaC_x5+v9^V}a|qO58bF1DvBVGX;wj;lmbZT5V?UHy1(ZAh z5%a~|=+b*vexp$W45PZ>6+>;)E%oOuvLw)DcT>x7ZNN4>EO?KR<>E~sUFbE9vUW7P zs%AvRyCc5>gsCG>(e=z*-wutYu5?CV0Fcl}C~V-68P&#@101=oakd#GhDHoW^CIwq z)RagmfE<|}z~!`H2AENixAN`zA?~FGrJBoDe%n_DziPCmJqrXikBE1h_bQ&3@Blzy zaK#Jx&f`_!q-13fY?^%Wdue2Vob(MK7_dhCp-Y=FT7ZVS^q|0euN4~FxMVEv<3R~k z0gw`yCNubx4`q_#Di8THI7UX^E8`y8+?Sa9+_Zc2(JVulGE74>+W#J=2!WQvqDGQL=J-P+4FjdE$LdiKvVG zLQns`E!e%=E7Pi1>eB#7t79qpL7BckdG+AJU+=rh+~#|M$&`n?k3Uyl|6cJZbh>i8 zjs29=*wG}0m!-O zbKPqRMBS1R```Wim(d?Ncr&2Gz9lcb@InGE&s2K-Og+Boh0b)D=2h?`nUN=htP^RM zF7o0wg7uajvlFj=K93nSR?>IT{nf6so#>@49I%9 zslUanFDJT}|1Yo8Bc6SEnkS#=tC8AtJ;l@I7vf=~)1E(Ypy;=(*Eak5cZ1ulf9RJ_ zuW!bIq1ShAIB2B6pa{NP9k(RU!{!`6PngFivwl7YHBAOgh_p6W8fnQs7^9^?1z$4U zJQ91iZ|Pn=u%|}dBG1p{-P=LD6oNF2i4iP?Yh*rBYwB;-Hpxd$oaqi7J6Q_C$hj@K zo@j|c4B*K&dfU!JaL+pjXP$PKx@{$<6 zQ2givb|@oDCU57vAXlKvDikLHJ9CDoW;i=N+6%?fSRDw+kk<0Q@K!RjK%gbSl7y0bCW264$jz_)VL#AyGOW5J~CC7r)e# zcf>RrPYGZoHpV_A0EOSuQ9#nEvu)8CQ+es^{@TvoYA9QjbIVgsc*(Hq*yd-+B`_e5 z0!dL2^*hgjNphf0yga}_uhS_CZEc#vbPdl_$wbnWp-sa2oBVnOs2hftkI zOj9oU^_v2COiA)UFS#NU&CmmH6gUvmrW^pts#j!~_fNTi6vK{A<>HGpJ+IZV${r}j zqA6wf-U`zuJU)KcR7_;3KtuB8xoZ3#0Hg4jn_M{n3?AWM*j{`?k7wFiJqoP&+w-jc z4)~X}u9m)esK}-fZV0Gn=%_8xlZUJE8@~YI2E_1IUHdjbOZDgULCcn{_BeIsOv&=` zlP9yZ_IQ?6jzl&$Ww0?FupSQw*@)~Jbl?H-O@@viJ68R~N-l9o6&scT4a0ITz(G6C zpP!hw0SGauQNHtLt|dkwB&I$wO8I*y22=mC6#cp&)$q2D`56k&c^$l4avym#Fu8m= z`vGA^K3^AvG=8!9pEYor3QzCd&y;NV`t7&h>i*Av|JNF#Xw%=fZr)teBDRHBI9?YK zGX0v@fP7WRht!~;@q=zXeDr9h!-u>7_MiVO(_1s$>FDv<+4EJt1qNitfXo&HX9A4$ z$t_#9WXpt2-7}f;=e@ow#wYFLWaV1koAyb~s~l4`4K$Ox{)3vYx<1RZuS)s(XVKD; zr|N&1=Hh`9>#}?I?rvM+ZN?&t)$U|HWRNBO)8(C~*Vp5~(19(Z2PrQG3osbYsF>lz zOhK8aC(qZEi&2KaE=Iu=1&@RPCC1H?S>BJm+c%dnIhGw#Pfnam+=$U)geEjGHY;OH zOe^r73`gNo6oS44g$ky0M#6^d^dfW0it8m7rNu) z`JK6AZpceXd*t-lZY;c+O+1X%AEM^XFB{>TZXlTyGsDC9xewSF3Ad58gRK^2$HfgX z0y+FDLA#9q-K1D@UOYka(?05DN+n4L@^CBkHV>7*=wn#qAuX`sz(2s#GvyfV z7wECqMAG=wC_aBFJZXVI^=ZbJB5fm>GOnRHKTB47kApTWV)Ms4Jx|DUQ|Q&q`7W<^ zQG3emy$~!H+Q^u9$~B(Ucmf=hS1X`I{@SBAUeXo_7|0P`^>pE1@j1Xl-sth%2cRIs zD&u|p)_1fwn$RY00zIhad9ox|o-TCMCwT%OJ;+JQs(<4{0~-x6G!QrU-r)V>l?(^; z7oC7f-DV<%2@3jy}fwlZFkeS3E2e63RjzP$d~93Ic@>wnjpz>zq#rQzL& zpBzd#54#_I_wC5r-R^w6R%A#!Ud{maMy9rIXAQuyJ!H}EQvag9l=vKyS0({i8 zYx2fVvPyRFuJOOgh`Q*WUw!={AY_npP$RucUg)^fq1hnz_19jn_Hh0TdDS*P+WzVF z^*J!~{HE0hYjhA}N>O7v<7xx4^V$F7LX05~0Vdzg==s$b4^+oGdB)~F3CxUJbXQ^( zUfj1cM^@VvHM^)r@a||2ohQO#v|Ktpomjx5PGve{JPJb4n8wr5&EoUOVUQ1GE&?c`nQc(53$i&5f&Udt##^x;pxr1{jD1ONuq*&C)!A&4(%qh|comZq&3 zRr$Jr0fyaZjJL&i#!mk7W*lq!B4umO3Ra7708`qr(x<)cJ7eU^<5E}3>pMH1@|G14 ziE_YK(_OCiw<)XMIN}q-3$Rd(@^Klp%L`oulw5jePFd%k-!4js8js3JAIbNk$Dg`W z=}d!=1D7=3#T!PsS5^%$*hH>vjD{BXq$$}=e&_<2w0{FJE!G6>E@?}yl6R#I_~hkL zeogH}c7S!|YY$3XK7dGhK~iUxH0qR|tW};Vw|J0pz3Y4bY326!!4qyxAJ-7!Q+YMzT;Et3-S*9aHXhux zc1k6GjY&j$Pi4C<*0Jw2b4*JTra4r~*i)F?=XrJ181iQ&u%C5QWS8VI>YA)VwlT zVi&q@WOaz;$By%M+Gkjijt~VtFevg^o2evQ$=gNNsH^7o8I$l(Ct9EN@}*otAf?72 zn~S!rSH!h>cwQURCBw_I^J>yqI%_%b-FPb~Qihu$Zt)_SN=m&LbX(QiwU&oPFdZ+? z@|3wO%d634*`IaMM-EXLz?^mqcZp;ue4QKA@!-4jl5HFCrRh(4X=7s+N9UcAvts#^R}x1 zNN8(7qJa+e0!bvoreu$bpo1GN002Iqm0@Lc{Z0n;U0&qX2pxz3Fx1&^ zG@;F>6mdFa@ifm0V9C4qY~WLuN7+scJ#uXJMn@jD0%*2AS8Jd z1P);HHDUu0EsPwTIWyk<IQQLMdFLI8gMu`luhEQtBS ze*;UKvc0r-XUW2B^izZ?9;N7$$Y)*Q#l7e_%bx}IT7Bpk4`;yO8yVp_wx1FiZ9sk? zk0oEppycVRYV*ecA&>H}P1hn%`ZTZ3WCCvKao(=dlWFVw(O03YKHmJ}bNdlJ|9|>6 z^lyIht%EiK6Zar6ltQqse>zJz88IDhK@68YME334UK{+Js}&s&%kX(Oaxw5o@Frwy z>3r-^$bjg`0E{5l(M*33unQUS^CSQ_HYPKD5Iqth&aR(H&pW{=);xZc4nt=&o>$Aw zY5A@tdlVT(Y$Q3H^rjss+*0((2f(-z7a%vaAX*BZL9`zM&_Vf#aWTE4o|gXdEb!(~ z;QB0naKi#zquEw{d3JkUlVD9}&D=IOoU|0C5tC^OO4#x>3XLH-lN}n3)J!|+sHQ-4 zFyM+il5u@DQSDa($QXOZi&6!2#9DAS^9V5(^4}bxUmfAMnHZ0WXTX7coflIl&W0yW z3}&Pm^y^7SG0#sP6j6=1Q@)FV=RLqP<@->slssU?NB|Ug-1cC}s4f@7C7+dXTcb_g zs1YA)|c8TY7Hx6w&BO8I+o{OYoYZYDTh+)V-IRNgd_toybA|v9SaBDw#lZ(x|&p zUCX1D!3;Bs?%_!p-jwSvp5U=(T+=)l($ZXc+++ZKsaIX0h2fPa9-y}Xl6piTS`p!1 zftIups>DnI9|gd}dslft>fPpf(FgFM$@wE1@l2hH=I}z^A**Qfw|PMFt@tG8Rxb$} z?;ib5uDK1MrR{T-Zb>?I_gx=QC*Mm}k_K;p&zgs~8|U~$edihB1#AGWdGnmudol(X zkI6Urv~*Qn`@VHG8~@dXF|En!k9%p#x`%Ec#HCNW1j>Uh3}k%|t#p*@Uc7uavqT=a z??E0u_=J?XkAkX*)VaT3h08}enL#Fl?}OWqCz<Yw;}VO6#?c(Fu`*s9^g;Rkgky` zx4SqH%GB<9=dyakTq%%YdhR;K_G&qQmOb^vgAJ^%_H z0j@w}#RYNG+B+jEt)?~{qGz7+MwX+_dw4Ayt_|nC99|S~V(JC(vXqtZ7{MFu519fg z@o^?gzfz}d8NYYiri?t&|D#wL<_ni~seA@XM%^gyW++?GDpz9}^w7I@WKDq)^T{^$ zp+r~MAnIjIY4JU+ws+j4E7MLl^w zw5fOe)}H=~7cu3+Q*-rhw$2X7Tc1&@xnk2ad<^Ka`O@vo(cit7_6&F-Yn5m6kPq*I z?rtgx7+ck?sFhCwS-r1)01zNYdhT@7Zh%(-p|sBJoKvJsQ*K683k2WBNIk>oBp!#n z0HZC>P{!!zcfN$q{N|a+r+g7~XTX!7Vf#HaFGc?5JAmTyT^`!NDjuR#u{EI+jZLX$ zY9!zfc&U47OhA|7^qzZiW5D|SlqIkJ_q>#+zX2gYV|kWRZvdXrpO=2(qQDvb`Zz*9 zatlb{H=Z#5o+;DS|E=#SP6?XVM?j((AiPOE(&n|vS-$!ELQp@l? z#r+;RjI0d$vS}tCZ96ZcSisUih`!M-wa3z;c*Rm5G#eC|@~-cL-+k+oySXzM#QaL&GJ^6%ZK4G zeXum|Y>2_En1s-6`eJ#ec(!E9YD)l{;KEuZlQQ~MC`$ug1iG55CYV<27-5b_Kudw8 zgZ7<8-6>XzHbl;??ol58i52mjcLUgS_#H!GzY)fkVRJqXZ~!bAMeCdZ0Kli_l(mtA z_lzoy9!wkY@bErK%P<2I4F9x&zG%{$l{5T4vl!y$pb^epT`up3K5Vw6^C=QCGykBW^7b6F- zxBy_6I!nhW16m9uWr=V9mAyOdwd=^z#C*uhQKC5B%b6l2Qc@0;-JMyJtIK7y{lW0h z(*J0{@Gmf6_=5qvY&T%QRgLT#vMLA76vg>+CNJly-{+09$ty~!)RamkrA?lD_Sr)W zDSJGEbeM+0v6fxHXq!=wr1#^V{)DK(H6ja;lZ}P`*9QoD1t%x%( z<qTYM|gG@e#!!+6*Pk8e8Klrdf-Tq5xvpd_#+qXo! z0nHV*G23Ws{XJESIVGCANwTCO6b~UwaYL05`OV*;dF2IhHVU*tSLG7tqcoojOtksf zp~5*r$xr^v``W=V)v-Umwm(Um=`U?!^7z`ZW7VvkJbAj~^48585sa^9q7uRQ-n(gw zIOqrTgBkO#@`;kP-9K^lvoAcYTZoCqs+>QxS?4ZXnEf<+vg`^^KYsj!jz1fA>T8}^ zEu{5xW9rGFdj5s>%s2PK!%aF&&hSX`$jA6mhK`q{TV62lkNTL;)bFG{U3<2!a-Kd< z%J8WtY1i~$u}*~1r*XWNNVz7QdlE|j;K75Hku~k5laWPZRH@JR$Fmez_#glDjW@4l z_Ix25rU4*6OchmcCRF;~o!h#f40A*oyv`G{njx9lng9}Hm^kwXO^A0nKVmc-$=cYM zQbs=pyg$Ajd>10KJAlmM1lGbx_6O`dNGG%87ry$^;ce=9SRT<%nQh&;jzS4{miaSl+;PbGkdNhRNiGCdYr+k3_#JogeX4t z^1cGAGuc@aXoL2}&;XL|XFw<$XIb9UX5qku!GJjsxM(3SvqQceG!^JXKw7FysNRT^ z6HS0;3a%n#sU|J;3%LWNM&w5W&;?~6v^c4{kKxIOwnHorM^_moAGD!;Vk%Qtf!Asp z$yDx6d|;5xKvs^H7Y$Q447uPVfW`Hy>eFtnO+G8iQ{SDez5zG5h(FuhB4YR{a*t|IEfr$@QwF>Fa)sSR{Im``7 zA>**VAW28ts9ymZS`Y#)MCzzR3!JC;VHTaqOAjCO2xwX7C;DHAc8PagtaxcPtclRugL{J;FXAOz3G_a>5z zRG$8Q(k@Rgef~4;JAaxMim4I5{^0oW*L&wo$-Z)ywort^7`r!MlO4%79|{7zX? z_!ij&^-J=$@0>B7aNmwGl44}X6=OM!(S9$b9$dQ1UlV}4<#!1M?SejxUI zIkJCuXQCR6^C&^27ie@^kos#x;9*EiI)aOtkz*#zH!r>z)jmcNre%pMGl(_&Z1`sX zTbU!$i8wY|X#>@lfuz~C851a1oT^i{U$jTs(BQGDQ zJrD?zE^P~Q$HjQ%qsi{Y#dbX1&WyeTBs2op_;vsxgzh`{Npm1%3fsfN@(znEypaM#OhEe|! zb(VHSnS5vzfP>CLGpI2MbTzPnw*uSLMf?V)I07H2ss;l?QHTnk5r;$Hl)f~SQM*cK z&w$Yjpk2&w7Pq)F*ZZS`>nFP$_L1mXZ_Ln9*( z{l&ZT049@rX^2)*uhwsVo&lEoLlfw1(u6qoyu1Mr{Y2WBv;w;a6aXyJoYdN`8?(mc zsBfOj{Za1$diwsn?ONO19Dqe0+Q|Fz3<&AFDR&Za&2Pd1cvN=>9^%~Ri4YjKde8H; zr9k7EL_qyt|F{3W?-6F&Q6DqWxf>vTCtA!s`}amGm?ivae_A<9V=d!VuY1N(-tlbZ zTq}?=BtNI~pCkSBKjTXJG$C%<;T8aKFuA0OKJ$1s&Ch)JE8Ns$iEU+Uw@rNt3InT- z9DX_Fd{ki1m&WAa#77^_{w8Lc4?Z|izrFk6eFdzm39za>X!%u+A=vfFv~l{N{s=z* z;urri`|H2>v)PeD2WP+hwvo}8Dx3Q}n<-h6I(yj(5d(em}vNv*xe+zyw%;WN( z^Q+HD_q%j!;q`sH-ZVSD6n_R$Ffn_Mq(X!53rR|Av5 z03cbj?!8NkX>@zh#F;W_aWp-1?m~ne-+*jz4n%VyZzdl8A?M4s?$Ov=K4 z0~39uvqsQvh+1uDgtgT_JCb)DJPo9jUnE(Mu=1y+!LXQj&Sgoib7mj|Lpgx}GcuYP zU7Fw}m;n!d5y}d1(O%FVsE61Ft~p27g1`t|f)psjRL>>C>cu=qV4LNL9Etg~;&KZ8 z2|42!0K~|(ht&eoHXzaarGP6jTd1%Gpp18Ywln7sLP!HA+v4lP%!5wq1T1Mwz|qd4 z(iy-(z9=7CaOeTtsXyUb?MH4+yRudbl(wmVM%s{%33>t&`anTu%CAoNiav!Nd^P|) z=bkgj0WfrLd!$~>J-~>*X!AjR7$MBU_?=i8B7eU{v-Pg}sH1y8N`-NL%V$U}wZb~e zlj0+B!UDS*0W&kZrR z#4kkTOR*cj7R?Dl=x7!ARlm==@OlVgA+c?M{xUJm_0pHQCraX3w}$0uK2 z{WgIR0A#Ap1VZk|B(pIzKb!zy_@1~M7qdyHCI7UvuOB^H@c!-x@2C9}#W~~bX(~Bg zbxdut>e3fJ)o=9a(MJFR6PpdQfBMINnEmhvKTH_(*Jp3N{dRb1fOi`w1Xrni=|_(r z4^wT0b=sub9kN~Tln$eeKE%f<((BQ5vF`1YzuF9Q)UTf0p<%c zkaVjOkyDt?3TRwON5|j^bhB^5#A*|(Ud(LyUN|t+#ekP%320^pj$L6YzzB^4bvY(Y zxME_K&jKKqs9;6Yep64C-N04kae0(^5;|Dd5~HfFWa5QOR> z)PCZygbp-2hN|}U{s033YaI0=(6kdKK$WhHd_)KdVCY_g!Fk3k6B9xQ1JiGLC^LEj zodintd<{FIy?I_SL=+FZR_XyGat^n7ULT8Mz zg4%bUagMq4-k68F*ys3cNW(L(O>HhO^_c*(>-xnoXAM7!k@hgvxUYYVyytl)_wE&v zbX)%9Q6G_%L0QJkN=^eP*w%h3!$@b8S-)8y2UhP^Ba*zZXl*M`^zmK=fjT1LGb`%L06qUI zq1%CC8ZM^Wr3hO=6ZLCl8_y1K&+MSicWSzwa!t)j^zj=ANip)AsIg;7zgS3C2bx_Y z%erhBS#!X^OHUJe=4?>ynTx;2pQYqfm;9i)#t%2ggnm8zaw&X)mh@J%o7(VaKl^Fo zi2O?{{4fB`wfUcqm2&mC`q+&x@ndyb&V`zPH@-e%J-zg1n*|{w&z4O*nfy~<}^gUpt@lr@8t;IP4(Wv3Mg={IK0 zLWrM_<7u5538yeA&m!)wXjJ%u5IkltJf~?D3L+vPfSC^y1dLVybK=xSh+*0U7$J0Ot!?tv?JPqZ2?M)JGV#3TFM&}L711N%d|0rksH?bXyIVHB`bG!gpLA< z0Z?p~+3y8Y3_Unx`X-poZl&irLV=%yx)NG6GK9MKFn!N>&o^NWprDm^FlTjcI(dfn z$_oxul~^zL^t%x&G_#p~)DGE<_stn@2S$ zd7pg6Ongs=wrG8lFH=MFN_xtpO-4JWz4F{Vx27Fv)N<-m%81!cS`KAs{+XUvD^jLR zkd7s{((*v-qz-wW=7QFQ_QzPU!DH!`a`{$fbWxYVm@gi^syti(B6W zKAB#2XU`B|#2~gwozV-7oQOY#6(ximw5>Byy%P{t8_-(VOJyaWY8^sM0AvUoF*>Np z1)<^~nVCxN**k@=fjwGmPG`jp8?Kcld^}#UEp!e-jBwD$XdT=mu*PgWaYMqm_)JW8 zXwd;VoidTU9Gfw763vYcd7~PMo3lO1V^@Ua{>7~A#z<3M8X&8EXaQ(j3>s#YjGx(- zXap9wB~Vz-G0V=z!WacdBeR#&>Hq*h07*naRPY`}9SSfE^TsT~dnpWZZy6^+VV2L2 zL4C(eeSNeep=W5KPGcq;2 zCIFgyt`T4pK;icROag<-D=z}WbMmqJ3GjO<0o2HvwF$z+JVSs2QrF@?Vh^A^UIA%U zlD^;|EL`XCx_&*iaKhqeaIK5_90=h6XhuKftv;JVTZ~AXDvwn_K%Qrq$izC))`T$t zHL3Yq-js=dk$#3=1o0TqLr7x6fM>x-KvasBIt?w0fN;|5%82w&3}Llz1K~FbW}1$k zCxOnS^`_560}3G>S}ZJ<2TE{*dbW&_}vwYz7mDVKJwStV&mQ+pN=q~6jb z_|;El{-Gmtg|Q@Gc_>?fLEa}wYetDvs(K9o61t>;02S$@MXQoOeyii;tU5F{XfV<- zzLaZC@}W_Y=}@^p6=cfW(xn{gOT&}|@}3%&IQdqD0ixAkzP^ngu|UoTIOoasHxmM8 zBw8D7418knH*Sg-63uNweba<6Wzk+(){o&^8Pix4sr&HjP{#2|jZ&1Xgd+k{J3eELo%4nTR z_j$khTm_n1V2r@&*XMe5{?J6uIZ40BtDk%}gK_v}h05vU%whmG8MixHU1NpMUB{dO zoWr;nW95^{J`B;`~wt6^@ ztTw^x>XQs^Lcpx`T7ZUh{NBGi5n98H&4ff`y^-1aP&ezSYiEZK9w@U$_->9yV9ssk zgzVI@nQG3*WTOe0c$8_+&X^S<*es_lK#Cw5E$ps+NDMuP{#!Xp(39(ULNM71YXn4=jD)3twrWQe9NYDpp z5V4iuQ}%+&)DcYyl^Zv*Y9-)+f}X%f|7`)g^0<mRrp=WXxZ9bP02O9)kY(^EtEN4f%?Q`l;bHt=QXXx~A4wp$VUh`e z3SIIy0ypUsN^z3X$KtU=^6~7N$OGT`+2>lBoX#uN=ft>xJ^%s&y5<^fm@=(Ry-EWBA|L51 zD_Z;bU69b4B$-@M=Esj|f+CgXZ`Kk{E$vg)qSKlt}DM|y5@JUsaM`{u3e{R z(8q6+iSy!vZeu@Hg+gn^;ZPv*YiNyKMZex85%3oZU;?>05mkF#LLf z(Kk_d{l;N5*}4XN2|vh(gwUOSROiy@l*C*V~; zfY9L_nF&7rCNJ+<3=ZvBI3`GJP7Ug-ano+c@+<}ErBZ!42=5DQdy>( zF`JJDC4%{2mgD{)q2+C)w>N$sY{?vM&t`Ru4{gk11n$Pnu(!qZq|pH+hrF1GfDFv5 zJ}qJ5fK9b6(eeN(n7NoK0-w=jnEflG0CPZ$zgg-~XAf-&ln*p0-x!3$1VN~%A9W-o z%z>WPu2oAyOR+xB0wJ_H zR?E<8(SA4pRBa^|2(#QYn5089<6VGfXavUoKkDGO{Hx(g-f8H98Z!no)YmLK2Cmdo zy8&ko?`ksw#L_?Xa%dmH>Ex#!0Zq=#=x+u>f&v20Y3Z1i^e>`STLQ+IhethJv6TBy zj-0yX)A#zU=fg+>#Ehl!xNBzJ15l!oS(185%`6^x@TuTTg32;zH<(hvMaonS2Teqd z!Pp=b6v-!y9T?O{0mTg)XtMx3T946B=TElG)oz4#Eq~s1uNs!*t*qWF4U%4d44R~@ z&6i-vE2ZPSEcqJuk~ew)k$umN`8B5wbRV=G%V>eebD6>aCV~j?5HIBK{{BCAVxvEy zWdU~RW(x)mmK)NKmorxGWs^Ap%+OO9^#RDzjw$yP5NqW6JN_cUrwY8@_{^6F^*-~F zum7fo^U1M3?PzO)Ls@;ocS}U-BL@#8T5@*R4vRY-;rCbBU*sOAPwr_AGzLwZ7fA+JVm5-b{c`E%vKVRiDQ4I3C+n@>0|9qMDn8-ON>FVUZ z!g}-oa z$Qt>5(XcG6Nv;7ww}Un@I*psfS6HZpgjP@8l5;`;WDgh)7of*wt z^BhpZr{mmt%qJ#}XcP!%+Jn1s3`0}qfmFMK57S?{kx=g`TOEb77e!420R#^dKSdeT zRk;SZ7-7?;LDK^S=r=nPV}^D{**U&0M5Ew&Q~Vt}61qOfbY*4u#n?N7rfJO!iH=B1 z6`=@pVF==?5Xk6)UiyJ_XcN?(aPr%r1t!$ldlmBOJHUvv&8VlDvVpFC0w7(C9}!_X z0_UZCd1n%kHDf?zIiwrgQt$Ob9(+h)klX58W)m7=fDUc&Fm?5IQ<$I7MyL-Uh1qJ^ zf)EMisP9G2r77{=!<1_a0!m`eh*k=3)_>KzyTeLpp2h?qm||$xfTGMVoBrdZGnG-D zoY!oTlhRJ!{g>>~GJVET;gGV*Bh_pDviV7F0VsdG1oQA!lbVAJ{kh@XRmDPj6=x;=!1Sk7p|hI=j0W6W{cq-#-rjV49gG z5_Sat+yDFjo&C*U|NGg0_uu~Y>_7dd|0jcIXo2woqheL}`KF8fHRhK#n$uh#WhNt| zI9MBR{MM=!31GoTN1KjG^4D01Z(ULgfBwedH`!64F)LNfYH!7~BC;rTwSi2CV9<@> zk>~8)ayd(0Z)dhk0Eo}=O3W~O<72Tu8@C;OD;#K%TUKH1(lf>C83hsw=s~?_+bo z8^A!=G(Jo;Nj0scFYJxOvNZX*IEG1J2yY-0SZckKm-^8BkZS6q56}Q$ZWX}Ej}RQ% zm=R-Sdx7PK`rUY6pVsFc04H$lnzpZ>L(U!j${+WnU7;7DKhE9vT*~RY&*bM^FcOYv zi>XZh6@5=f%m3lqqOLK00LT2E!iXAve-3(-g`n0iXfg(1U=m7s3EE`qUdxd*n^u0; zypwckL714DW!~wLa_N7{qCE;SR6`xohh6u)dH_fRl+3|RuMO0NL29Sz=xYTlsdoS& z(4!m<$q)Jr8deY`KmD>}BLEa%pap#Iz4vQk)aF0`FaMC}lLrc{-+uSKgd=~y?PgiE zu?@st4Uf?8eEyCf>+y4;~kn1hACR@ zd(r=YczSuZBO6-UWOLod7os^nlbA>GsmsLZ?RVcv`=;OIjnRjn=Br(QpT|U}iB|I0 zN5ysd;P~;`Pk!{HM8XYFN_%$|R>r0c@uEXav8K0vcL}F_e)p44pP0z`v{yg9_w}94 zCoAc@lUCdq;IL{^MUzXr?r{CPa4h`0BYO%$2r^Egn7#dBIF=Glgnr|KM4(hSgevw! z!0@(2Xzk1l*V5jt0R^uf+FyYrHpRQgKMcUQ)F`PJvL6i8ji|uJ>qn1-K|h+EPW%uN zR`EYf6>z}J2qa9Jwm^i;A?RL6pqm3(3xA;g6Uw5@mc+VdNpW6Enc{m$ZP|EYktQo5ZzeE<)UXGi>up>nqxha*>ocIdJ*Ktx%!i+&+p`O_>Bc7O%2CVzCZ zPYwXkI)xr0sscX%fp^tS+e%-$6_}|c5Hi%J%qTjg|G-ejL7p7~c?eSXhuWX!Bo8KE zM;X}sS#_i7tD<8(mFH+wm`4cx0WN7Z9!9G-4tMi@0n~^$ zqKu=R$5LZi@}+UjKZWagegg==rgw(!M*Y-ORe%&ABA zsSnT2uH3jhTiUa?+S(DKeF(D_C3&RzTgOq@X7{`QsQedt;c1dy?dwl3{r=BXHlNYI z!mq;{_!(@=1m;rcWKy%@aM_oD?O7D86aZ6B1_WA zfW^o%<(?);9h-B$@|CjsmcP94Km6E9u@MHDLw|d5C!Q=!%vm(VToxg~vr8=ZYtAwo` zVhK@#0sxV>$ftxEA%=1CqPKUA<)xnIN5V7Q^Hly@0~`orF%fR2E*ArWh!W-|YHQb- z6F7^qonFKMQ$}C~v)-1ObD4k6X@3Y3bwC^Srri;76+^z3ci#EnqiT>|efeNCD^>w< zex&_~k3)n!khD9Y-}yrTB2&;bD@Hm~N!fKiNm``?d~v{|G1!xt4B&v#1BE~XFaWGr zK0FC_{q9S7xbNqCfDoFHv~Thy&@p)*Wx{m3>IFz~csJLK zN6+i)lWE4Rkf}+(Krd;I=ZE00AW9O-F=i}6FkpS^ zOqP^h&i*6k<8zjABl@yEUYNM<-+3_Gl4p%?KyuoIE+-?)Rzm*sGW6|I%nT%W!3d;YCzp}96sZwR?fY_iHRWB)=XFjq4P+?{}KCUK%H+jDAP9DLQ23B-GV z^lBPupKt2W{!@Q&?qf1>;^fI{240Ps{Yo5=`Jw59L;I(upRbIiNuK_^-(Re^e03`6 zYqZipz6tuR@LrmEd`+88H2LZ|eY;aw_@DmrCvUDqSW;_hTG3N|gjgp85ovTxj3}GD zMNnO=PeN8JWafzBMDCYxgcGv|`~UuZd(z2g-LQ_@yy|?6KFmj&Vigd6gahznHVjN) z%uE;>0aFy0!>R}Be(Bqmp4y7Jg6T-QOA*vovS836B@$>Hu&&4S@Iltdi)?DCnz$7X zf=4`&Gg*4v&G=$2z-$R3z8EwbX4wxa&;gbQajv}<6V;UlF|+%w_4mBYF9yh5$#3SW zeQ~%YcyyklhYwZ73}_8sh=7U=FaQLD2CSHO01ko{xZ}tvbiHNL>Vy84;VO57rA44f zpCC8OL@%HR14Ls&wl>AI30c7`!3^ZvY}+&9xnTS{J#{u`UxFF%s6%JUPT(->7$k(h zG%cOYt(q?YL+^`5$^n&+gLi8p3&5jws0k)?wd=2Czx)t9)8)5$SG(@Wr(mZ8j%I7L zkNobZ4w&SYfQ|OQ^o5b{h^i^@50GaT&-A7J2z1)5&hY1gQ$2v17in?Aq<1eamELHj zeTQ}_Z5A4;D}hWSqOzlo1GF#;YFA?b5b-?;EOc=XyQ6uKzvBrL$vr|>-=@hy-!7X~ zU*{1d(*bUGAMGjCFZ)r~Z}ezWglmVOCQF+)$5}pocW+F*7qZm(PWHhG^i8;ceKi$x=mpqp ze0eHi@vF(pyXrd~-y{f`@*Mpl)yUWwV{A>dHDeDJ(MD+oXn^%k6W;XByt=y7Q%d6- zS-#B2+h4Zrzsh^+rQYP0@qpLpaAHVwMP@N0U%|m~ofCMnXZ)g_)1$TXBW4I>rHom-PG6=SYBdf--m&W`CI(E{l#Txfgi9csCDI50B7{ESh=DML076uS zX46bPgsgY@N$`sROauZGE5wJuG2rBH@MuIPifMlnF$69oVk#$7-EuL`vYa06(?Z~P z)}FFxaAhF`*9B2A837|^?za-*^LUoKoy+PF&jUi2;tz8rWhDq{ItZ_AFU8q20E4MV zEEQ!p%U33w=`k^3_R<7G04B60gbC+@fY2ZqB+jv;zB(f?g+_#%db0hZ11%6515v}& zmC(&-nSaVD?V%;%SR7MGH41jqOkklM3#IZ(N-gUJe^u_8tr4@Xh?6B-zgK2d>~)}ir9GxTFX zqKSOeTiLDLQa;b4rT6QcsAY4Gena>Z!-V9xR;^m-rY+RD;5Hz^dFv4Y3fy>~Kz3k; zK;Og^Zzpenq`;$eAPAF3-leIi5YBZu({L07JpLv)#O>ZJ9{qSD@gT^au4v7lk7H3lQ?Ih@r|nOjF)dqyeM~Jo27&`mX$O z(xlU{dlnc2yc zF~MYVp}mbK{a-*u!0~QOc(i-n!8C2C-Dm-Opmpky=Iew?b-szu$ovoR7qLfkgcsn| zw169C2Ld`ST)a@e(69-IkIc@VJ6BV(@x1@Q{+ephAALHu=f&60zUf1|J30>1XSB0E zBdD2i_~Re_u)Y%bddExpIQj8)A4sY8KHo&YnerH5+30?HsQ-86NO**?Du9YVoTD8Q zlZT)&G=hQ}*sPPY>PoaEme~?=FNV1f|A4z01j8mz@EHu8)1dS1F&m_J#Rh5VB&VDB zX>82qY6MEi&KEkrn89hg$ZfXbEToFiM>d8ZJ=CHDCfC?5c`;%ru#4CrxP- zl;l~n!}U?$uFo#924^^H1Z!sY;ZjU8i%VIt5@DzFHLgYy@j;Ohx=!Mx2p^ zFi(3J$e4@#fT)eBJAvuHG~3>D1`Z$*xB|#w{2^J=7f3EzoXpIHbrb1_*FiZ@oS*0EV&EkIa0 z1S0K}*diO(1Cq>10%Z*WPAIDL+LVFA9J#z^El?9vNq^7^3|sw%l;Pn*>Xhr4&6)>! zI0w-8WG3pmKAt*uXX-pVfWy zf9xo2b7hpMujbsZ{GEf4q?B{lwByHK|MVZ8otNQhx=(?JmjDob47j@-XYDg*&&{p} zjBL%sf*EFE?V+fqhj!8U=GSEh>A>RIy|re=DM)Y!VYOO-etLnv8&`?2wx@BNqy00T z5TF$R&R#qpoj5Uj`Q<|y?;E;=e}p5@sNX#Bcv{9`E>vD%nPEwP>jW{fN0iqz^6bYy z{6YB5=Gj|sz11t%M=+Uj^T8;<$P7ok4xr{dEaaqO#l|V44VxQ?6ex^V?I*;^$~=GA-@@$QDGit z`&PODQti%mtB5C=EOOAd&(iXRZ2X!2tnp}@-gxcxhI2P&_r*_y3G70inKW|~owVIIb?76j z`%HOI`uTKy>Q^h#rmMs2uf0~MTh7TEypz-PrLXh&&h(wHlW1Qtt%aW)-u)&9FQmC$ zS_R?)0)P#j0&}}8gnK#+ZHZ0a4#f9Ctc+b-hY7;QaJIW=%D`N7BIby$UdZUcKrhF4 zf{^43Kw#aMWVnSz{lfyX6I)wUdgVziNu=s0SAn3DQ2AA ziFsjzJLSXXm25&^@hRty%V6y>rv59TiDnsN}XW{EUc^1)oJ?4i9d+Xe#E zgyuv3%ufcOWz(5wmre${(N2&6qy#iUseS;49d!jPBf!!5-1+SL5gt~7n_0O$oO42c z6s-~M7yzJsFc5w~@*aSZ326viWu>js|I`U!Er`*6xmK{0{-bZo+Y(&h0Hab)+B5mq zq?58M{{%SXO9LZc0M|+aW-tAAV%UI~d@A^pj(v7KQ=yC=DVw~3Ddl=%dMLT(0zTz< zoV?@%=ou5!@#H-BwCMx_^d+;w?s%(R^0DG%tPDsxDO0tKgl^JvUeJ=VFwGfXZL3_T zaYRR-0V+cD^oyEdeD|XvQeOX3mZzUp_j7$Z^p8B$=ca4p(&!`JcT9Qq$NZ0SxsLV( zq47*m{{MgXce8)~mw#;?7k4eD9q!FO{OH8&=Rg04+3PRwOI}%V6(>~A@HL0V=gZg_ z{xYF)+9E{Dl^mQwa#uN9R@a{ALv;tqHA~0xrYv9HmtcmR#%}@Cq2Jx`pM5dW7y}=j zJeBdgHLL3uyKL7uqbZ#ZZS+ka#)JOhFm8=`hqActmUDhCBInZPT#dgFQ@52f`u*2+ zd}sR3*F~_eA)SRkd*jHP0*>*=AV~M^-q{%iXG~+78kCCPx^hB=FxN?5i22}fHh{Ad z1=FWRxtJLX4T#tmIt`Fu)=V`|AXp(Ht_7iNrj2P4o|doDwD7ZlnmxfDk+HCXPT%CxjTb#D@c82Nalltd0;RmEgV} zfeLh-kMGAy%F0pHjCv^=o>vd-o*g@sD5U{b1UugrLQl{(tA|92&gY&aCpN1|+m%{-8;q;n)$n5Qb;s*W$j-^U&wY^@NF!;73DC zyt*e#Xm0SLxm7#pYUPH!m7hsSTL4hnO4$ix<cbDfvJx&XNH5ZTs>88d195~#wp2s<6cGgb?AwN%m(omM;)+BsSm z?dclNbbcfJg$PNrN@>zW(6pS2Ul6e5`D!v!Hj>VB>JE(Pix2Y|!al+}^>bhO(E!6I zG_;knRb!ER(ww9wEel@`?U6^T86;Mj554Ri`A84I8LPdRv_|_&YkjmOG(OVms+`=@ z=V?JEz~lXX47BNd4ovz?u1(jIae2Y0no_wnzNho4Oun`8bl-1>Zy#-B$+THGx{4er z28a#5wr$%U5PY?(>^LAY&73&99G|S^Y`vg9p|3ukb`S~GSRN}TMjnHPsP%-L&HK6i zqY38C&WBo-)G?`Ep?S>9VP;S0(2QXAH=@aS=e_qc z$TCArKA5oO#%9YHb6&5HfY~OnLdbZRtug#93~;69A)1Oh#YRYPiw}g&>$ch=TUue- zFXle&!nOnhI(Fnx>WETnXE!1kE%_x7PM=$zy>jTl?2V&`%P0kS!!U2gu~ynkp*7H< zjx;??C;V5KVY%7WC)vTm;M@5#!b z01WNNBy)eQWwN15@~1HoG>P!HM+~2gEy)w0C{RuMYqR9xV)_jegB3s`%dUhrz!AZz zzQ6{MqhImqARI4eg_7$`bozt$w4pviQ^)C9j21I$&*?8rNcKiz8X|}<#M~tv0@17x zsAzzo5QVlwlvp5_NdpKsYh;q*#0%slt1?D3mj+D-ItrWpC~eO#q-KxQBbV}QaDC{# z)_ya&SGn)Grepg9+CwekgQwC&1Yo2OTkWL|Yb{Mpp8(9EoKduFb6O;%MyNj>G`@ z*-!u7?8Epg2uKL%9nbl#L=(RJ`pWF3?K{I;;sY;Y{z!2o^BibULjTw8uo(a<4122aUTr8+JiCedJrG2j&k@V$1#Gzj}^mF5B^`V`o zYh)1s@~{8;uk|JR+0TAD`=@{U#q8wq6RTc{mJDFcxcUD0c4fJs`n1Vtf2AY!oL@*R=>|!Gz3AIA5MmCl^=4tc*qhgC~Rt zVd;ylVdhAvUA($7`{>+7W2Js2LaO`WS)fD!BSHqF2P!Zz0*XKbG?d-oA_z~#wp^cv zFfwKW<_t4nvr5ZEyHY0KK#X#5%H<2gXX4`J>jhF~wZf=xiZI(BdK^h4Si9|RjhUx{ zF=fGf*XP}B31DWg74I`Vk=fb7{lb{Bo?owst& z^6^{QaddAcG;zL8zcEpepEPamSM`j7G>Du$6YZw#0N5dCh)nsef#1!^k)JiQ+E2bk7FtmKB4`u zU%yr$`;(viB>d{BZyo+%Vq;QRQ{5Nr3Hr@k_7?J6R9K7=BV?9hkKCGc zn04-EhbYluZ)EwPw1+x>p{Ay_sRIEAXms#IW5fmvh+T;`hQMLpCiul7A?TSJ_-+6h z)Q|+NI#S64d+wvP_A+xv1Hw=n0g9Fc10Xd)}l?(waEXrIX!NV^G4#|#HFVno!Kt6E{)*MEDO)8)$mnKoW4$@rn6(1%!j@! zEKB+125jUSaAPMpkN44}R`JAC++f^{=_Ag-Fa7CZa} zU8-Xmc{bzx@$2JPraRBs_o}whWcr@fZu_%83{9W01H|&<<;;t>96Xc_?&6T{4ba|m{ph$eE3M1#or54 zB;*9+Vt&vet^AnOxPSl^Jwf7IUW9#Ks}Q?*(e8}29*L|NvxyiMgxpYj$Co2%QsokW z`Y1nzEzx#hY>Rt#%w9WupiY_`RRu`08s+r)?06fy^-e-D+65imj zQXvm~aYokOo6?p5C*Z?uXEQT22O=EICCo7E*IR{R)x(&nU}niO<-A@n03ZR5R?R%f zGF5G11s2T*lg-_`*Rzq+2sB2>a%3f>d=MJQB3KDx^&s5jf$?@FPMB&n5Gj*;@&i@? zBxMIayzA5Va=&J<_P6+TY#nyP|nn zT1t?*^wox7PZ`7dW@}QvI;4j7R7~k@MC{t6L(+%Kq_#S8d`Q(rCe(cE0 z%I}!&O>N-XbnbgPrfYt4bSE5Wr#MZ%``&wP7cnhfIdZtR{_9x3YI^od)9nv z$LfR+Nyg|Dy69N&$M?r4D6qP1wD^-|OmhP6_&4)rO|$8H_@bLb! z7of=!?|+R(Ss(hh(Teadx}0%2^}RK~NaqN8)#$(9);fJx%eC;=306i$UyVUJB9;HW zsJAeDgZ*-R90*V!wsJE?z{PB_Q9~oZbbQXGn4V>wOfSNy6C7o#=`d{am?@!z!I*uU zi4%0YGfUnX%>xaK-ww9nt`pb`j;M4q%lXl*qK-xI?B_{lS~v&$C)?5uVG4zjaid`1=`@TW+s z_J5#ZQcr4XpvW>-Y?%zxhGja**#0dsOhzvT zgzXI2+!gHsftQgy9Esx?Oz>OXVi*HW206hjEV~)3!?kIK40LK6G#Hu^LRi~IK&Bi+ zAal-*Q)lwlzWRszgtxZg-OpSl&1%~|gwJNYIIN06 zGnABf1-3ksx5UO*zI)Ga&!n8b=Vk1(eNc)@l5*#D?K6Qmb)KFd?dsXlH>T_AHeCZg zPJEQ;#WBNZ+v6XeNL|yvvK<2-5oyzy?aA`eciw%!^kB(QI3R~gQ7I)oV}VZ^RaJ&i$`y_-1_r0!!f zo8mJjMY7y*D5@D6H0Tqu3!!S!)f!lWyS0h3lKw%5#{2We?e=UkwKjD}cQe?{5oUZ?EWf=LP-P|2h6pr300=QdS1-UL|NEgC5;2w8 zr2ryjS8m!A0Hn*1QwBafoLzw_Ga&vR_RLwIHrg5Ru|1j;by$ox#3bIkXa@AqO7K@9 zqz!H{K1B2+4EVRuCK2?*S0pQUlE&df2NDJITp1#I^KAe!w7qAwrIjUG7Le0N>gynSPXhDG#7T z*@0wzm4os0Y%aIYOn=7@7nPEL>Z9ZN{2f2t@gsi~VAV0T&8kZ+)9T;Om07z;$CxJ^ z^<&z!t62#HOrAP*I%NgOqVY-l1Fy#H&S;sgXHS{K*(6art=zfT_EnTozGrj${5{P_Oi|T`& z=;3?oHwD;!7@4t>O*Qvt61lx5)NXN+=N?5YtVD_xHYG3ak^a15hAT z>SGWtjscpX@`u=$Wwn-g@)NPbBHg({o-owmsp1JspoH^km!U?mVG@=*V&*Sw$+q^H z$(Y%UnH1EXxFbRB!kr6pCS6NlnSFo`8=_eQZnh}Kh%y~IxWD8%m2K~7J;Zo8nVlhl z5559~vwdHL5eHVx)~Fc)fCq7m#QcCN2cbod#1HvuO4Be~)^q1A84J9`byMgWM1UW%YYlbf-@-XNO*XroHbGXqs#Vcqbz?Os(cQ*z`^FnV}BX~i_k=SZN#4f02Cc|2!)g@ zG$yzoJV=z!fDH86nb|I|as58ukIC6Aj3GB^Etq7OKqiLA7lS#105hxKMB@{h02G*x zu=oqf8=#n0z>NDNXd3eynoI{~7UmN@xVAN`*i8Q6TB6+%{fEUXI{PJ08gG!lG- zm?gamROn$=#u#tvqAdXpf$~D80Lnbr7LU*ea?_vntDw;*G}XsxahjYz`4mXx&0d?Q zT`8Y(%dG_;H^+IO9kk4;U*l`lUu`(id$sUVcXZy7zF<58ejlAa9oj`OM$<-nDF%e) zyEKI+6$+;PnIic@i3FUPQtF?t^t11ePoY4YWN4N2TZi63g*jG@4gLd^pFDMHb|hvD z{gJs0@Dm`>J?-VAEmyarq`oc3)XzHknDb0pO1$R4{{6FKM_KRUSdX8Kx~+~{yj3=Yhfc|zEA%)7o0X|_oi=%8k(X37MheOk;` z4j04YF_6)0j~gx^*`5EKOt8Ds=4L19ni?1aZ5~Iya4zi<*nIQnq2{Ckn6wI* z0J?k5Y88U|dh#Ts`FrRWeglJ6-vF%4X@s=?W)AMz(RN{uDJMazj07od7@rG#6)k+z zM981uuel|()tS;7`1*YUxHH^)k~vcfWXB7Bz^-DsbijXT{!?x=NKvgB2AwG+6N%< z4B)}p5TI~K!~6M@_W~&A*RT~YP z$9PevslM`ZZ}s81sqAtW-b^HKM=EA zPK?z37`H~}8*=M*1ZWPS%Qr^|azMjO&2}smJewuIR(>!Un8h(e2(mTyFp1zzM+XWB z3$uF%rZ38m>WUB=7_ zMkv^*Ou1=3IAjhXi?Ir}b^qSFfR-$?jp<-9u?v*@aj zjFYS#9@nn5cTQ%pDK{pmjsyyIXhWc<3=c!I(l2zasUZVmISzU>0fbfsc!5Z^*-kq4 z&AAXJVdef7yK0fQ$4-EsFo4L{)FncKn;e9Du5oRx7 zpma%@hMz{9;quI#_@~GlbJ!UAa`4=?^Ap zLhFKsw8`V}OYN{dZ3AEe9^#|OpT-6NGxFHuZ%j(kMuQ#cEA&-w7~$lrlu64u*{4rC#6hgTuIf zrOowiAG~ckG<`ZuCv3ELd(yx4sVnL4M_)ZU`}M#4YPLDf`m_kr@veTkHl1fR_;{M) zl=AQITi^L22fpIaN72Csc>Kza9Xpoj*o(7Y|N7TmjPO}$8e6OD`&rL^>|Vwu@6I>PaLuSO=nWBwY{{q3T_|HlSLm9sG!vKhNHysB02Atc43(Awcrdr#7E=QUQ^L>;n==z|LO>Bm`nPIdQx-%^(1K$!Lh_fZX2Ev<=VmAg19W*t-lJbmMzUYh_N)OAXh55w4-wR6R`S-zc<5VlEWno?KF2!=4wvVofzC8g;u@_HE)tY}uMbT}u8hO=9zzT2 z-7@>6Io+8*FrIQPZEVL2$%i%E|Ar_P-2JGPpq6){2qrBKSSv{Bj&k0;Ve-u;Jy!1%}Ri7Syef-MPd(YKb+ilcH-_?K0 zhcbrF1uSz)*DsjtdZe2f*M_vd42#=M=E9M`V-B_lUk1YPNejX`19xE1 zG&2BKokjC3?M;PO>dchjFk2@S33%xMBxbs1bCZA~1ejk4V8WtWl2o|97Rh&a)X};w zA%?jUAd{mnphQb!r4Hc@2x2TEf4T-dFg;bWe|qTsV}~ zGn`DdxBKd->;$zk`)C7gO&DqinicJ%K5X{}5P?z+RLT!{FwYRo+S{Q%K!@}IS2K6< zJETPegU-O0jjHs&n$H3_UfS|f=}Pc!NiZwrn$jfj<^4h&d*zSLfGgh|bS|?EVi5iw z@YLo6zGpd(D(7g&K+&`rZHI%&g4RRb1XHuTnTgLTsFN|9*{??#Y-s4f^k@b#Xt7(2 zk;YueH*E?>)y9I6ejZBpeMUK4(AS^h20Glr{s> z(gL3-g{wZ#{h;cC7TTcRZLfWW2_=-ol|e+dYDe| zLIf49jTJ8Xh<;~Qw|VW*#sF48j&=vCn9H=ibAUu2p?22p=)mm8e@AmAG?z|@8IAe={nEoK1(JW@Thl$$z{eeCMKx=yVh%8^%QM~}W* zUnDX^=LT5v=Xt66A5}fp9m$S__A{Z8Sc!nVnprWMe>DSXoJ$Sn#Rg0;2{aX9Q3tVy z0S#mJ#@}G88Fm_xkS9RQoHr2SNrTWbBjX1`i&ADAGMQPiyW6`YqA!DKZ5aL72Sr|M zvp!K2)kv$%LeF&;L}dv4*k5K-d_1V=gEgXw*;I{$**0g&F(bs>^|9eB0CPZ$zes>! zf*9oxw8KdiK%mXg5t#vKK>!7_ZL1RQ-ZSIph#S9{l-GUyn_%S(W>`z3f=${f@6@rA z_1qLOc6lvW2s}d{s!&=8GZ$J6?-2MnhBP*0g#$s2iC``}*KHqQD2%%rlK`*^;RseL zY~`&9Bie-2e710XsNS_@V^<_Cf|US${MgE%p)sHpu<}FsyYeJ-qTM1q!oyNl%Tx)G z2$Tp|f`hLEdI2Cc<)l6U5iJD4jOOSgjJo#rPrlj}jWIzqP%qEijqnE$fIl>HC=W+g zW+?GrfXX#%IKL*2M}R3aO8Zi@KbWch1e^e1J}GX?IOVOJ`p3mK52PIxqO#tO2xD3GQ3k zy_78=A|M>#yJKgzqYqF?wUS=6uJMJroIb#Gb0w?SZpLZ#`(sc52zIOszhExa$1SsW zxuzJmHS2VQrZ-ozQD?MM_{RGmd=R00bhc&dj$BLd#^>| zIk8%ivL_mZ{d;!ilYLr}KJCN(FfD?J*?=%0P|W55A)Bl1 zkByK|0^u`Oa*Q2a&Hnx7V}My=kn%RWC*&}3CIjcy9F;!eki>Rfp*^%C%kfSJkT^b0 z07ZJ3x3cgvKpz|Sary-+m?Q{+3ey0G5rAw6Qf=Y7dJ}f?0y?xGjmcs*f#Vla2hPw- z+M-bsQiQj6tWc?ZnKLc-e5$=kdni{m79l*X3ot>565iTSTWcTYAPJdq?TIrdIxCwz zy#qYRPrG@ah6JdhEz%b1GSz2s*N8!*+}^KwCeH(9_0I@^)!u}>xFatHlxmNHk_b0# zATq9cF$vKSsh(Y(2Z$UR8iJWtru}+eCMF`jH-sPI4L}2%RR3HbIqi~;e36cyJX_x; z!j_rwa>j|^bALKi&5{*@s z?WVko*^|lGXwpw}`p%vKA!t$lfH$NMY5`5ymKgJzaV^(9OG9FO*@nO_<<1XBC_m2! z9$lH1@hfQF$&)9``wkp9Fk8$7V}1DkLikKL;`2;~Z(ZGDI*uwmOq8Ic-afm0sgX4? zP{Kt|CJ=Po>*=V|fjM&0!{tV}^>x({2jFogY=AJ!Y9>soaFkyCM zi}Pp`(y=gE5j1zkd_&05Jji>PGsOJhZ;|DHDT8|Ee}rf>1JMY1mw*)CLi`G4WV$+) zKuhX|MuanS&Cmi_f^0eKh&l@-P|X87grI?do`kgg33Zy4oAH4OE9teJF+&jEK6_&> z!%D^~8o}B)V45|quR5FXd@(b5f(?i;v$5eCK`3C4CC^MF7?d`(nIJHRPRtg{;h?VE znes^-XyK=EHzp_T3_Jlp>NN>-AP6`R{749W-xkMOABFfGBMqb+w45 zVK$)!+PiiqS}$OMSqK=nES)BZ4-#$1t@P>J_ijWJm+j>PTJ-znYKHihOZ zF(Wy&aRFu8-{ygULN=Ip1QgTB=jEe)Bkxd&sIBv;69%1+kTF8d*So^nI2CP z(81DO8v#cv5sH>fuiG3jmf5`^hfEII_tK?{ zS;e%NRZ7dbmv&7b{7xOCSyJUECv8{lZ+zob%y62ZpY)kELDd3L>OT(MLU1@{YXRXL zADukeXtlrk^{@K-dHpj?l*UEmS0ii<-c0;n7_A^*98ozy z8Uh^k2phtLP$NhQTLP0Xui%LAuJ=g1-3bUsSKEZ~b%)VtUWAb+)Y;mVXLp3O{Y`)e zv#9l9RD4EG#c{AJc#;kQV?bKtF4563u3*M#kqY=SyJ^nD9 zUOfvGm{B*TMbhPWu`AjVvwA4L^-Z4pvXwopgEG)?0B`aq^pqdH324t-#Zy3#YZIeV zCUF_~h!ASpj46iGCUc3jJ;OXiJEXlU$Ro@MaP?;vsc%rq%{h|5@c~$vI5@!$EyHkV zr6KW~CPmq#DUOKzv=uO?EXs(E=rcg18T8P?pqIYrzP{yPVk6k8{Odd9dN?q8Dn9_A zF6gIz>I^^uS7wpZIk3R78X(~p0yq-%XhHkYK9!l0_h<`^4`pF;w0u`uv|mh0Kphj9 zpk=OW6SNu2+lR)7kPwiM=>*v0LZ>9>R( z+PlFfPvfO7q5|Ixy&h*p#$sahoD8tIlRb@SI$nt;P@?w zW`>t#vlzQs6X8Q+(Ws3RBTC~{xTfP{!ghqUjFFFm-FCf8uws@N4uQ-xfl)Hmq=hp( zW@6C91VosBrkMIXVMQQSa0WmqLv8(0Q`>-PXJ*A#?c9mu8a24UVWKGt&_y8IJ=Kg> zI!r56{`(VqfJucNv>JevVFa%lv-9&=c5CNd8V`AI%izRx0Vj3Py$D*&bU8LeWvbI< z^0CC&>{qAl^4fq9Lb!EL8l_Xp+a}fb*<-n7NNKDJy|R;FgHVi*u&I#Nm@*8J(Sf$DQQQ?8NUzn>>C3dYD~o1CH@C zbzpE-=%@psY|C}vLAn4BS`oMvn)5LTNN81B0|NVW{CU(9AgaHNyUA1ANei=U`F*GV zNGqqM@4mgeo3E8lKnI#SJYNuzZ)K(C*H3{s{w=@=!CdniaF_PhUfbgQT=SUclNRO; zu<=Ks*&2d5&d~wO@+*y!2SAOkVybX1#^3=r;6fYdAnK$a0GjAS%{^(rltsO0e+r;d z*3vZnf1ex{ zN$#a@a0;G)7@F!w(zO~3t>sU%1y}%P%2I%VPU*7;4<2Y80ZT$s|D~1DZ`G%ohFnvx z$*d$lAW+-soBW!(FH!2I-IQGqq!~h2Ox!gf5i|2KbKj9~{Y9mu4SZp2K7azyf$;+c zglyY6^{7@tD-Ow#_8!7t|6rcf{skafLPoX%SGRP_ucFvbY}L;U;euN@CR?ao<4dv z@rdGlnl^s@$idm!3)w9?jQ&Q(+>d_nMtp9v7gYM=u= zEcBTlN;XP2U3rea4GNzkk({IrjX}W+@j05dbLY>LFLz!PtfI}+r9!89yQb~1G{5n? zotdOvxUk%@xO-{$?D%`{g?^!D(vGb!X%G56Z*(=13aImqq`EZFb^grR_PsrO_Rbcg z8NmZ?Wpbuht&3bh%#O!FmhpEQJFBmK&hw6$&-v{0_}Icvj~;l_>~bac?pt><`;A6| zKv>8!K8=FuQBh-H7$>IhqtR$g2r2@Xz6#Kv6@)39bif@s+|jOtip+iHF#szqRcQ|i7j^gsRa>nX$iIt*TlZGJ;kwtIIo ze~iuU02Kkrm_>$J;KhO=z8T_8G(u{G6dw;u1+Uj5oDd2SiV&p1q5kiz8#4`k3`k=V zARNz-ltfEbEZLE^d85jc5ElduO&w3!(^Ay23i+U_ZT1$zWD zvyD9E;W+?;kOm^vg=xFF2{nM%~6HClpo#msb$M|5hw>;N z5JbZ_fJPF^?+&_oISMWam@-G&2Ov6QG8fcGY=~;U@~W|{$EUiIX2i8#1EBj{E^kTDrUOEjbWJ`t0SC zany#0kZ}0>q*%T39Q_HIJAhV!5Pm{}M7|u~8vXfVe8RMgzT&Y?;M=|Q631x2G<(_- zk6FlSq@7(leI(AQH{*M=vXb!-x~C&Ffr+Ni6J4LwNk0zVQ0D=D9G-VCvVp%_?o=zX zJtlFcXX6czY9n+-nX!`Jq5qt?rSKQ|^IZoa>nw+(vA|ezTA{D=Y9Jk!uoBeE?9gw{ zrI;XKBDREmOzcHM(?Q4*5|eY^!=UI}35C8PK*JK?ty>7Un10gW9PUKq+#Ml!GohHz zUy8am!e%M{64E|*;c92Tv>^*|{^Oix8Sue4YHo_jWK+O^LXN!RLy?#tn>S?EpFm~= z7C#bBm}b<3k9-MfLRUSkUb%5S4xs@!(H}G~z9ob&LHokyxkGLSY5~Be7a~BTuFr2w zFwmeg5A_BnbR@!-N|f-|QP~=qhk70wGe^-1n28hIYu03?O9Zd{?qrj>2dOK;jphJ= zl{1*iaCpri2@ME~!PEhzY^IlVeGHZ^FAmcm>Uc|thX#i*bYEUW0FISC+L2IrqTw>g z^3L|GvQZc231!=n@ahg_l7_lQri}CS3j#s^A-n+*^_3q`M#TxJ05P;q1U&$OPV&~T zw&cF|2yVhp8%UEzV{N?YTqn>7@%3r@%a^aVugVjkCE(?)>_DJ&C!q?QXg_Ju@X%aM zF=Jc08D*wG)i7n z*XdCAf{yfZb!r>u`S`ge{XFM?ekT_fp3sT8riX&$ql_cnfScU#`^hJK!eid=a}u2n zs4TzDzHf(C0+XFQb*ezaOWNpf{{3Ig{^eJ{jyWdb`ty53)ZJ$;UY#vP1^&UQUt|I? zi-Y`yta{ttQkK6bS$R2>|5`M5mv>waAlx%Mar=bM9-boa@k}3l-v6YkZ6kdmnrmjr zi|J#?WJVt{^&A<}6`md;)3}#@7glyPzG22B6R?1R{34c@&*k|!y~$jx_3}y%Gl}4t_f#&>-#`D|+$} zd`n?4;zNXECN=;w%iY=T%!Wj;JC_|y&xR=l!!%aL`h;oM;M>v>1ZBeLHTp0cMJo}n zW(=xJBRd-tj{Sv(a^D-3nkBzCZ^c7MkM8>s!Yop&a!1>j)S24G?KNiB|10Y(nz%s}!o zgXAw__AWm(Hmf(lCR4)M?E531EUpef8sX7t2;^sQQvb@ov(vmMT|!WQ&|jqIzVshF zh)H8!Hl_r9%X?Jz%7qCGV3GZSl0JZd-ZT)*K%Q}4-+=tq2gRTeFGT;LIguxZicV;x z|G9=v`UkB9phr`N4$@qk`k)V0KN>3-8p_|f<>LG=^ba7!UN+)`T#jSq=1rNqCw=Ku z0~2n~5%bW2{iSQF*s^QG+jqt$jcBxPb7+*lkLDrT6D9_5Sj_-vl(FTzzdhr<^gWsi z{Z1WC4Tc6MPrICQ5Yl~FS`|=fi)^xkwjV!~F30I->?Pq*xGDU zkGIfJJ_rCLRC_P&H8t6|>bf?M;MDc<+`WwB`<*PlP{-7L*{f^w<^&169X}|2;?uiD z?ma&TI;bW&wWeDFMCMThg=Y(Bh9_Q+JhO4CK5RmDBN|c@L@TuLj^92Wo*z2>K5DY? zr*TYe1juagW~O&POSzVIWHYdsr)ebahv{Qp;ZvQx+J8iAS;1n(fSF~Zj^?^}7nq2R zz4&(sJVuZU>CVrCX@!vFytgY$N7skhp1X84pyOJEfKb{S13ogti796#Q7X;o#T3yo zXhXt!>Laog?|Ew6i%{xqJ`g%jgmISWil|8ga^&#A?k4$S{5}XF0?CYAn#U8TfFmlv zK%njl_*vYwSfLLbbRegUDy>XMUwNhXb-2mDfdB(;2oSWPX{j2!9eFtOnu(48TLYvh zL$wll$Meo>2z#LSnuBVO2C!t{r3Nh5Vj<>=wPH=O@3@+=g{a#FeJ zBF`c>_CCv`iHsHTW<+G9AW*Y<)*k!_R6L4DoI4SKP!2xUSJ(6ph#;7`M5=Du2MnMS z+5u1kKhaZL`NxN50L?H2i%oES9jt?Z(z2)kY1#vz*Ti`b zfS3XCwQ($D)s0qlV(!@#CI!0%@aovORQg7f8aid5H$aAt<6@~GwB0}mk?1+ki!~V+ z4REq|%zg3avKmoJ(~`M5K&X4&GXR7}^V(Op{w*Gy*35OC0Gb@`*BLZs(YkW&>U@;- z>1Uq@Mm`(&r}yxDGw!?5@n)7w$=Tj5Uh4AYFUvEOGA%CaZ~p2p0?GF#7OdXP*s5Fk z=;%Z6TUv>iXI85E{@Ywo>UE!E>(Bi0=lYLfnxR7PCKS=phKHwoDj1q8nd3=bPpb6;-;+D z-XR1FA&uBXqGV32A7dtt%Ly+V%$Wi4LsCA3T>tDmOh`O@m~u!#y!Ga(QNUKyT)ujP z3{D}uR$$tVfJIh_gb=h&QP@)b2;XEchi$066!xLH-fZ)F&p&%w{VxPtauYOT&81@R z7@&gXfhnF9LEOq%Ff#o@wFaJL<>JM+Q^0NBmqN3=m+7D2yI{svnE;BJHb8(G-6LMa z%X;XV4WrgBwiF{-=OvYWj{>BK31Z5Laz!Ur49f4wS4YMQQ85D;1eQ{^tP5xi40)C! zTULrC*u_UIutDesxO#?*#CZZq-i=Y(Dc)1{Uxv!G>&|XK!OLarw1*`}Tme757!L}D z;sh4Zhsy|!&YgR6%C)q06itP&XK|-U`A+~6@`5(a29_nwDv;MIL&|zDbMYuyT(~i` zSDnC<`T!0I=U5)_%Hpl8554eQfA?!k4VgYoTKvY}mgE2)Vx!;PW+=c3AoVPF92XBE z0btB`zge^d-JWSFsB?&cUg(C-+65@1%kFb-nwIS~4#149N&xaFfsaRh1J(qOl?c0i z<9B?f41Az}>d+q+7H&CX^!~+*1KNz~>%C`!kjJ0ACr$7d8nXb-s)X9usTnu#5XC*< zBtFs~0PBErT@%bo@gfR7TY`<%aS!NpMfu)QFWkJB101SgYVf@s3-I9anlj-Q^X>O5 ze-HipyHa1?V<%xx)tngz_XDadZb0^zxmf^tD`rmg{^2kAgnZm%>w?>NKbb^jT!H7i z{*zBW9a&%uw!`L^_)eB*{Q{7U75-a2T76LecDV1){?MV9=wd``d7j$4sdtNWCe{IdU(?RNEkJ#L)Vcy2%JK7IfGfvh1{#_9*WJ}X=F3Y`A4r`+$*0W{)r*#qkX z!i!N$Mw53o`^0zT4*7`B?64L$KiKq#kc*DVw##M#I*MltfM#+ zi~s7={@&LUlD!o(W4wLmQ7)G^ zMtKoBpJ$D@pMXAg>{PSLEGGd4it%}hM63sqVP^Z{F@J9^me*S`QCVi7?oa9tBiMbH zvOCf?@C30PL zabK|hY_S_a34z991<)O;+z3DIqqKp#n>PzQt_^6VO+-H60XU!sg>HW!tEgnUly`ZANA8%h{&iltEA7GR3+o-vsMBaMMsiD$OeNMkjKFDNMe z!AI)hmRZZZ1#^}i6+jLkpdS#TU4V_g&9l)2h{aF)T zViqjCfiCKl=KT(^Tnr0v`2MB$hMohY1F~jPhvQiyO8xo^4B~?|TczG9`Sspu7qDR| zJFwz!?Q_gIe**#0ZarB{mp)YPCIywZtSoENFxRF8S6(@40}-sG5vE;QO3Pmz%Jj>3 z|8LhQziqN2F3Tor-Z233XCHq&?ltw}X@P&nOFz(4aPY-<-`@Gj&p)n>2X=n;$)}S* zE24Ja0$b?29na#$7nK8u9FOlOt|7iM4i#7#EN`Q7w@3c`zZaqF2TLvTmr=IK*YVzM z%hBTuuXF>l*YB@?z3e|e+8fE2=Qg-{^VZFs4?p;Dtg(0FC30)<>MOK=`OtUb2VIEV zS>wp1CPCgYIcr!?Sw}bdD&9;QhzIc`0k!eqH);KyAltvPle_=RhwprZXCLMh=U`~@gm129RKqE8%`~5992B8VGXv1Iq#rG%LoWfYM z<+X*latTd=r9i?+-)LbE8GwlbS1*A`xq0?dGeF>lvIHcYlOw}9pi1rtXc~Y42$fIi z0#R44UYl_kvtRTy+goioG$4e#hs8$B1mVwzE(sZ6V75v568a;&qpRN(m>^Gp#8T?F z&Yv6W3>pD6+A+W~1@5J?L|2>jPdi+*3TKj_QR2Of4nRu6vlX0dU1asq^05v)O! z(j=*BMUOUWJ9h~HvIW^{D=QjTj19`&veT>3K>2g;(~_-(8e@r7&KXRc{b=k%lh=R+{AH*;N@`#z0iv~%wSIy%((q}j7^7UXD?Iu?$T<(hfB z1bfEqpNjXmar1ipmop{TTXm8ts($~{#fjGf7}57H|KbN(ux>=hW|+l%;0-NZt*voc zO$m5(GaJRDH0D=<5SAmW9*oz}%b~wi(fDU_~ zUquFE=OGH#PR4*bzJK{lbR7%JuX^}faBkzP#arL(&~Q*E!~1%7X#$aPNIk7Ek~W?^$s~aSojX5>gHtP#+1m z`8j+Alq@UBy4+V zwQb7i8He$ndu#VLK~ap4@fHxxG%YCgMiYiBO***DPWLW+uT3-Yz)$|^XSr$b1xAV= zYFxyDyj}ht#J_s^+O+L^Ejvxi=H@M#;K7v($91h*EsH;sA?T<~u1-P6+GZ0sE`>>b z-~B^hjsLbBL+>^5vP#Gbv~SYu|;xcmML8vmbFe zJn2P$*4Eg^fslme)2A{Mt^hE?l42m#30T4#D7erJ`eNp2!gpK+DT^U!1b06nEE(AZ zu?8%dUYYp?z}k=FZYvF}HlWx}6qjMxiiFvZxB?|Na1uP8QEi4a&D}`sKD9;HqKfp5*wrju?AgBKnj9^wdS+rCA zl(ZEma$0yffCdjMLtD(M9L8=1biNqi1|+alP@-NYV+3>%dVs-zkJ?FD8%O6C7+f0x zilcF#+%c?Uz!l|+W^?pz%ALZ(r2S*L=mNkCGyrtKlCjkfJSkp48_)|)SuubvBdsr1 zPZ@_<4O5%&FClyC)B;)DYiNwtXspeP_qk+>=ZMaDm%EP@V@nGUs7xSrbyzkoa2i-Q zHfY9mrd?uU@B>;=%%=h$`iHhe(O9J#BTVX9Gk4>PZg^*nApp`nBg6I-KY|fvea3C$ zZ>(P#FH^Mlu1y(3UvG>8+>(``AO33q_8wg9qZoC%q^xzUZ|z;D=3>8BTf9@A-r`X3 zpBNZpf$#Cy7i|{Cn(@#7^5dO<{HK2&;0uVo{mzBq_v=^MIk*3wxBdL-(`U1Wyu0%+ z|MJru4nStW?&xbJHT$y00k73X@UI^2wXs&j*DNZ=8jol!>zO;ZcVU~k?mhIa_`+ZK z%IC9EuWx|=cj+8%IM_rE%}kVlkegXm;SN@B@3pJn)?;zD-v3(;K!p3aPx2`W&bx6h zS^Y4Jpp{!>tBH~KWZpmg`O5K~2O-V#R1!*&i>I+Yf%oFGVo+90qh^3+KNO!CC_f}6L7`b~V2gD3TU~Sf*qXKb z+ukb4;*Lz@0ST*XEc0dQkh8>!9KnocxBw9VKq<_prMYI#1V07t#UU`w*a3$lDY#n& zw2DO_Z?Co*XD4^i@v^zzx_vXi@^CB<7fT~eu-Z0!;7jf1UcoTx;YPr%lrBq$Iw>7n zk#F?_Yy{wPzbp&KDDg@}H$u9fGm97D@1SV8oX||)-e?9-DWR`&eFGQT&wT(KQMCHV zx*;|~9h9KeH{3C-P@bX8JV)WOhyZ4UmgSLR9+=Jn9d$3$EX$*uAP)z8>N~|fO0?fV zo)>nrj;R-q=+_2X)N!(vKCT(J@!Qu&bm5M0R8;-w1@0SfX)nvoW^uziz?Nsxoz)Hh zX|r+wATFq(UA!>hcX+}Ecy>5mo9+1xWa#rK;VRSz*>6>@Y%>? zJ9N}Y`{2Dtxe!m6JkjqPEwcu8)U1ggkrxxZpdRl(k|arVA?6w|w*w@kwdXDY_+yEddly zkdRGqV}6QxI|yZp1JIy2*UY)qEeYs}nTU}kIHS}b6>Mr5E--Pt7!g8^0<#aynH0{8 zuUckSIpRAm=39T~%~n>Fkmp#lBG#0n`I8A1vs2a8%en5{lQlzX`RX+D_bT!t1 z=4$t{6ltGl^?{Y?Mj%d~2rx85Q=ouiV1WZn#!{wlEF;l^V#9CR31Hzr?ee#A1Tp|D zKeOMv3>06sjd9?DLT-}I&5K(jVE?;QZz z1xNz~>SA4T95*^j-IvGUnRXfr!$luJr?{rhm?%#xJg~wyfr?yYMvt zLRoq5Stc$ALVi||X@w5|Y8!g`p!1E(Hw#SXQp_EbtHrV)E1fRBdod7qa_8fpeA;B@ z$j%2@M$r0p@0Z%-AewQQ_Vv#kU1K9rpI z)jPTI)2sLVYks3`>^*V^zsafd&O5DWDyy{Ysm9Db-_~RJyUzLkT@D*ivSf}c3?VyS zw7}yrYisWXnT4(k%&qI&SZ&|GOTB;Y&+Pu_(uI%imdNW~D=9DwWO$<4LC#LsO_&u|?XiHcF3TEX9e=Maz5YH0E1Sh3x^SW+J@N=n1%EgLe z)k(|Ea-?fK87XV8EUwr3nvTtUj z%DmbZ;6MBLlU`J@9|c0DtOdQw$Y~V`ONhVk-G96nU$8c^0}{pp5=G7iT=X5o_)X!= zOs?|v9~dBL-^kxj&{9m?IquUY?f}>Ak-}23xmXAe^kNMVvqGs6DC%LoU{#{nfEbF4 z;-yqEvqxQ)1 ztXM1pTqG2cHVhc*WdbOlrr_}lK&mdmxB8Nrn)>fi*7XG+Y`inds`A(VL(8%P>?jwo zvs`%qUqBP+(|(^CJ0EREUo=&(u{XxfO${p;DPT+eoocX&PNI_0GuEAB}~TfAdc+Gk}&HU5HAH-1vyENKk*J!r+; z<0e5hy1oli&)Sw8`l}!QFxSmLPJJihcNt@^^lqq!9V>W&Mf>%O!^{{&(%Gk+oSNneuLl%f$8w44(1#&$slzOWWsn>EiGi zyc>xQ6yG@STfC@L3C$h}J?Q;*{&DX_>^ia&&wrPXLo5Lg+qEuzmJ%p`M5fV02?;Yl z0^Kn}H_kk~ThL*OON<3&1Xz$jKnLY_V$T&q=n$Zk%_z78$gz~rp)46k+dIZK=o4EK zmN4kjus?j9SV?KhBFR1Tr2HT1e_H#LpCp+9qbwZ++2qzpVA;Hh(orY$ZnX(zNI@BJ z0+2A&?xS625qR>XK+043{AuG{7!V>znDWw>JFQY7G{xY&QF1(1BQZ%V zC;EbBfREp1dA?V@OF1iJ0hYBySsz@VIE!Y@?)O>B(n=&&7p@baW#yRI+*!s!EYd71 zuZ-i}BUzj=xDzOON_bShzwwiVe6JOMgfH$+tc$!WD_6_Jmc$3>?wtVC@QJbdqC{M8 zm0LvsrBzIL;z;06bdZ7Zj@h_WmEbZm}2saG!xd=NFUZkuK+U3tVBXIZP^k zVZ~6<*JNHr!1msM$Kpvp=Fhz!3upl8uO7;sR&J;Fo_W@|TC!|w1eOr*A)dk;c!W$a zR)SO?2GUr}o_|GVRBaZKpMU!4&VT%0|KrYuf>f_J3%?xrc=zIaV=>{@A$ROG#d4C! z82Y^0u4lay-V5{{K6v%wx8S!veeY1b=K9`}RZ2^H z%tU7fU76}R+s1nO$m+A)E6ws#WCR)If=vi#{a4qDMOdpdl!uD|IQLL;l;`8evbm=0 zvo4HcsgyIN{6#y~4MB4h^U$>WDG~yjAR(mRE+`Ce;%*|apFKM|v`1?LrTyxm$hlou zRxFSH^2wC}6vjte%|49{;B{#Cc-E5Bxo|R-CtQFjF(RH5!=UY49IP(-1&{%Ap4A6! zm6;XIC{hZ~Ibej<2f%R{YtPxs10AhilE)%+JBpkI#Q9MWFD%ntD`Av1?x}!^v7*TD zKPcsU*0&M7mCF@l<^brb=VZXjy{>DQpk3}cJcADp6x$?j!w22a4*;g-J%|51i&mbg zkfE!+ida7I@QiPu95_VJsjKJF(&vxKBRvGWYy0i9H?*NppI1WnDJ>RP@FP~S| zl+L#Bi(~4VYwKLaRp+|HZ|!mX0w5`U0iChJWGMmQfiQjiEVqDXBg0GFScM|9_ zDG*Hi$ZEXgcN55+C#{~l+^U-Y`0xLA=l?Fh&$w;lp)-{+D_rCTpq zyS>c*SsME9W;4*MSFcUmlH}92w@VLAVP3mlo)St%IhFZ1HxGc~<@>4xZNLKMzV#Cz zk^6ws;rcj`&H{3d9A3*zt)4l1;PfaNecoarh;7SW&FBe6&n|kJ5f{`Ln6M^aKPk55 z#%k3})(W%di7BZqHLGJ;Biv`Dg&8|b31DQV2{`!;$gG+F+FZ#ejim2+(NH0!+(d0GR1F&>?nZ zR=xxfURosffWoadhdZ7XgjH(MWw}yV5f%X2Tj|tA^%)QS2f%;?T#X4{U4f^v&lU*W9tF;^NEvg$)|Lc#cj7)er>?_?vv>p=&_wJ;|F1D? zV7SlTd+$=TS%7F+m0CVrTY)*2DdP(G;FF-iw9Br_i$6}zz4*gKK%Skoi55PrFTJCi z((lI{+Ughw1rSm{UD^Q1STzjPD^?OWFdKVnfJN`*0u_7T-9L0&1^c%H9-+^FmsSir}_b_n%)1Uu*_*DN_@H5wzW#e+@g3C?K*47c+KllVe@u z3n#2GEdk`)ObVc|tX8}L!B4pntXwgI`k4PM%RQeJf{W%^RtPI|$QFu*Y5Z`)>8&?f zhFLraQ~s@dt=OFa06+jqL_t)9(e;+o3YMMNjHHk#_;bzfPd4i^OQqEL-6=YPP)-ap zuW=pJ=C>|fs6V@t*ynzIq)5*cm$DLQJKAUb!}kS8_FilQMAt9S#-&^%1ay zHYH@HE8fFQT^2`lag7U$vS&)BfLMvfJu$8hGx!xpBM9?R=6$g+Oxi#JdQ$BA!_}~r zK_^gH0Mo3tXvTdrVPPq(vH?XgCg@IK8%N-TWkz4bqv(rZR0f+4UF=EZs#L z?OjRcq?2D(5q%Yqs68wR6MwJ=6!2HYk5xy^%t|?511ob}27NO#NV$KljS=xLz=J2Y zSPWySd|80eTzjl6vU%N{-vbgrxZ;2#?iiLOefuH{hi%Z&#%x_38z3=$`aD6tJqG{* z*k>9G7A521Te;(Us&elQ@MkQ%PwMcWW4fb(eVlP!n_WP!vEI1bGkqNHL36-l-P*P8 zowEGvN5=x?>-RE#uu$%m(X*~^<^IB#y;n;85_2R@$i{WX;p0y}X*}%-baXbQ1CCjS zSU!M_=jFsVwr1DM0y-jK`2W(|=i|xOcYgfwr(;>Vc>ax@tGDm%gjaJ7_HHR)YxrvN z{=a_!3e0E^-^7Q3L>9rn{?U(ie)hAUM(vd!>JL!O|8zynyHu+dxBuyyxA*G(=_>r6 ztft)`y#2;Ul#4xMM3EmW>DPk541Lt+aScUd>ATT-1G2{5SZ{SJ z2OaPjMLYCOIZ^cbRD|E2mk|Z z0fK-B&rN2|9>V_s2Oy@6^Gt2Gd{{g1CNNL=<0DHTSwa91E4P$=DsZce4R~y{Ui4mc zAKssc71 zK>WS;-Y<>&CzA^ZFI{hMDXy{?UE53{TPr-efBgOwK+ge~y+4Q33&69SA@~z7(-&mw zx9z~!+viW&x9>Xt?*2s}#3ovVqS@EcmcUBoZDouAJVKL?or01{^+eVRN=X(;ij28> z1;(YY60XUVB}`NJgf^f-LCSJ^y;+PTWn3&=Hq6Z2Kori?tQ&yJy@!_JHB(Ia`N-=b z>C2tmAnNh=FiQI+oH*VlYFQX8rTQ?;Gb0p00OH6B||U*Q_^TtxRj4R-%6lS zCJ!I8lw^rHezIA)H0h7qCKZ1GJbF2=n>Iu97mo$+w03LJhb~|)x z>Zah)asbL$6xQ-x^xkI6XbZf~(%k3=Ah9k0rYtoq5jF>8DME8R#!{jU%2tl&@z09c zSSx}EL&}Si^9-Ox!RxDW!gqczUManL^%^_E)Y#X6iOB&BC`^5g{bv(3=eQzAndUn5 ztuEF$zzQEJgH;a*K{vqRLKZfwLM9+ML5)eZoubwc$`?WP4R934V|*vYe0)yf8YiGk zypZSSKMm{m0*0O0tJnL=0|Jy8o4H=U<^@zat3^D%ftNX7KYY8_I078~?Dv{P^uY9W z1n?dhc?m$=KNt4?sT`nV9SW(;>c?a9qpX!*w|CyV^xn|qC+%F#+GG>EBV|Z6K{%Mz z)e^=jcq_c3G}FOq0E<9$zxu%kAB^0&9PmD!<;XelUh;o9v|ww+oY?>84@Ln%aF(1l zuH?Xb@4Yu!S%-fx&bREqcH8Gq*|l#UCI4;I+x_sZvmY_ro9Rm3&J{tK%5G}*4KNUh zlnx=h1qlKyl%HqK8i5MHq{l|_Q}R}=oM|a2pmHU_e7~3r0$$D#k@hFDLcEcsfd-Je zT_9oe?-BF_93d-@$kTT6^_<{YE+7iw+#6+|EWZc=|0G~@uV6L`k?TM(=Te+YpdIi2 z_uqdva6;kQALL2O{#w4yQf}ww(T{^I1=eP{L#!s@50!0n0*vBJ`vO?~!{q?n{QQ$o zXI9S*LAe1RO#J$=R>d6N`FB73%UP*HAm0q|5UzlPwDqhk{M?kB6u*|nrZ*DacLD+T zM^O;~+frT1*%+`Sh;uRPo;_W9-b{Xhk+to;F`=X=X?aS_`niw1Vndshf%^%T6;DA~ zF1pV$=lTcyP;lZ=^iACqJ^(|>;|cv{a-TL<9|h!CAhno{6f`if+#?HEqPaFv;DCX> z6~;}o2Q-v5?wr7yK6}2(0vQEx_6$Cy7`JkP4u?SM0d|yh_c+fw<(U&HdthfQegJP} z`Usx9(JBsF44Cj7^~@^a`m`@TKN65eLjcPdq8q-$^V;G+z~O2u6wnc1L>sONG)Fh! zkGpHRTV70mSV6=YDMuDxV35mACHh0o&wHQ_70)5grSWYX)aw93&{IFAfzD@MSl1Uw zne$VIzZJZm^jkfvr2gHz$CH*5H@W3CY*8l%X(ceGyfp_@^zRO3G%G@t0 z*qjY1mjXuvAruj2BOqBamStpR-eei!J~^2}elEFJ7L(@%z5yuC9Xz;#2F)%lV}zz> z1wzb9xm0)pEy0|y<&t6Dcp8WxYz{Xw%EmG?&B-C571i}P<&NP2ki*?CUAvU)ckY)Z zvzO#i3C7Gi`PfBpo8{B!KoCWK{(QL^ic8?Gc-|_F|MBCWW+}Ngd&6J>?tvpmiiHX6 zq)QWOYqm&<98Eb$n6%rCbM0jE1^~|I5;_&Yx_0$? zFKtVGQ(UhX00~*4mDxK5NtvPlYYz+3J-Mt*tOe+?8z{P&Ysx*WPH6G8HW?jm6kvl3 z>q#?xI>>z|PNXRNzE&V8g}wR{*}Lw3(d{n+nU0A8@jQjyx#P#y*b8<=E1T3&ZWOR| z?r5799m|Zd0Jy}&&;|n*w5zt8t&0nwV7z2kqX8xVdNbg$a6}{dH*{z=F04m@-|q8O zKkGzY7%;scOB?gHiX6`%xQu3Wu3^m?acV0K$&DM4?%pQfbG>!V(Cv36}U5?a*I?xtldgE|Xz~>KJRs zw6}jWyq2$(EUYIDdIP{^L{|XyTp!-QujY?#cfS9sWX927jsYrrzvr*+Q;4sxA3)6B z2eWl}r_t!+k3X3_6W#~bwew97ZU(YHYvqePAy$B0&T8`DVewPBM)CI?TO~F2=aq5k zKmBbxZ@-l*=2pP_&3H3tMxlp}d}DW9>=udP{A^ z0{|?~Uzh<}SS`;9I#eh`p&Jx zy#j=>KKTt?V8q>&i#A=o#&x2q%t;72HDii&X-gSpNVRG+VPSm9@2QXiBBfLQe+ugj{h{26811$3^4f49r-n zfQ#C?tRzOl2M4Zy1{}msAl!yAdZp(8JOGlJC3v7uMGsI}H@G+@&G{+I1vkBN-YI{( zjvm@FH0p1Bak+q9RyaUSt`rtZmLiGFyc4W|HL)?sHTsGc&^Akyyt8T-REV+uKk15(a$TGMo~+Tp`>p1i>sBOc79cXH({z-<_a6+Wr1E+dcAHiZq}> zC{ca@gjfj4&j1m1n3;(KIb0n?9don`CsNAi&SX6(F!X)`ozj4~F}h51mJ6RsDRal% zdvJG_?om!G3*9ts0|H*#%{(pX72^U3NghVQa0dWdCr`ApCm~6J)#bTv^@M$4n3e6a z`o)xpnKo+~@ktE^9mU`Ru-1zkr`ZDbZ88ptOT;{CsBTa|P6)?};oO z0L>_(Xo_~)fL5}=UT>CYL%;rwAjUe=J)^V}*wg0dOrfz7sB=IB?Nmcv1{J0N7jR^;%C%;;e_#PI&k#H_Q;!eK z>UVeBHMVvF3@;w7{dM^M&mKNqU(tgCt_n&odL0QYXrt9mtP;<&eoQP#Z_Jo`aJTYVMDUuwS?$CeSvqWq zI7`X>`Tp{dX^b!>!3B zNm--KZtZ^DyFux``G))>Vxj_efi<8{eA580$~@hwH9QVra35nM=UN{8;z7(%-)T z#a7gnhbMaDQPz+TKlotU?z~MGuazsrrhoVET6q!JkB?hy9*@hxQXAA;xh$wAAz!q= z)8CxG5FL;2+zz~4Y|kni+SP3a-08*FTlBJN`#=7G6p#WzY>s%dYuB#EN4ej&zdUM6G=H8awLF-ym(+!tn{6xJNQ^aov8k5Iyx z6t`6Cls-Ns{0_&otQo+@u_FohgjL@45q37KlhaBlCx*qCAfY(OYs16z?1}7a*d#i_!G0t*Irx8 zdCkH}a25gCM*%}^l=O{N#!PetYA&GSKI1Hy*1@{4fLV&Au{LJn6Yk$<87k1Wdex_2 z=#LMK4MoJFVzw&=hLuGSF86{qP|{orX2I@f?EtdW>u;74^Z^uDYk(*27PO_P%$W6U zcqm%v6XlI(0l1-U=a`OxTh@@tp4vHlp?!-sR;5V4zha&AQ8_D)r#vt{kH>*b$BF}K zKs$^b%LLwG9g-*w*xR%Bl?oluF#nB5pFE(irZm!C#EW zALExH_a3rfefYr#O%Bcm2#XEbqyJ0r`p2kzzszqn1rE&&En!5VK5r&VKu`#W6IT5{ zE|Qtm`0nlK!PB$gw?F_weA;x;`>O? zBtyD)37%UC-aD1g-9c%LtDzfM4^9SZxF-l*=WSxQHg>CR30nf&`RC2DyfDw7=h|rk zdb13zw{N$?qk8OMp>a1L$RL2TY z-P|Vn24p@sU~d^|(E;AZv5xBh1h=M?tONp7SZy|7IxALc*?^xOkSg7wZJR}oQhVMe ziGUK&LFtZ?3=C|&0&Er_SxMstlV` z3?N!7rPi1L9{MN-gcarRVL5Hy95~JH9s4}dHL1w%6Zou=h5x~f~&3yo0&)f z|IUwoFz>+iRv}%;GIAwX4>lcFTKE5ortKBD|MmM)0BsX-$*Z*-Uv45+sg1YxLCEh* z;qUJAhbBJ3ETj*?NT5)}Bj9&efZb#?Z5F<=NUng$*HYX*6c$sqS=6zzX%ZS-3QwBR z3${!KCQMmc%pUXaXG5j1CrI*O`*@_h`oEc@IEhC)H&bZWa+#bCl+28jK9L9)s`ECk_T9rj}MoZWK;H(DK`(1=Y?c$Cx8JMxkWs~0wDwH`K&Ud49!kEC2k}o~0a0TG=s6Y`^Z*jX?<}AuzGk_oD8Xyxe*paK2#XdJIRNrS zdxVTjp?g`7)F&pu_uaeKrcKIm0GB@6W5;ClQm1yaBmina3DBbN1e|lm32>R5DYbLO zkt|@qf&%DD5@$TLe~UZWo>lhHvsNh+4V(j#H-Iu0F5oDaiP=0Cg?74MnE1o1*AP|M#xzd)N9$^*t^= z$1K2KzkKMsEA#orJ+63`yf9uHui)jIW#zSc437e3EFH>t&=`HiWh)$Q&-bD)>Us6R zKQYnJ4S$deAAb1Z&VT*8f7toY|M@@bPS5D|{_&@vz`O&cP%vS+c=6&`0>xN)SIHX_ zp8aE=0{>PDq^nbk2~V^2@yYkMbA%Kju-i+{w2J}nC75T%pKn(Xsrv{&%MVAOcaBmX z7f*`TbF2K8XC^TS43-b3Y^G}>hxs;9+VcU5eoIIOw4`$rYL-Sy+g@;IfvhYS*HePr z7z8Niu{t)_$(>vv0tL-(ZJqq&Nx`&P8=jSegja(S~9@=WRg2VvuPT~8s(6T{^J z97@o(;zNKR^=jX8nb=)-(MJ1#hT*Zw5-_~=P5I6X>{On*^vR(fAa2@~a#kKo$GZRI zwBc!EWnA6IT0yx30@?u__y!QPmj!1Noao=)0a+%_cCK{o0R=(JM-BwCtH*r6A;N6$wORyI}$geBuh)<94K-8LGrmf#0%tkDt9Czc7ESWWfSyqjI>IhGRl z&ZoZ3;kj=dbA8^}uU~K{U?+Y9JuT0*G#fw_J0O$nvGVFH3*YNQt6pn0SNE{-OaOox zO;&|6yEH$zkI+$)!jC`ua_4XV=5L1XckbTa`QZy)r*!rswb2}(a8Y;*xY?!KaS<^2c9MvHcQCl?nt2nBqNJ= z3#v`RQNn=7GIa+M67G}(kaF_GQl9##?b^gDaQ@t>W&r1U`D|pC3nGPsMnDvc$?ZF9 z8Lc)k%X6s^rpk0LrOLtM;@4pE2uDSZOLY!tXK`(|aJuqk1yQz!`L(;2+Rg;E#upuYqMHx>m=p(}F~eXgak zE)ed0C|92z9}56rp$r0t8}9vPMpo9CB@G``_M z7&{8pSYs^r;YsacDdBq2R(&=*)_&)dDR6h(CAEjkL%HfT#z2O4Srw#ztQ$b0zRff3 zF~d?OZTt0HK&)v3jb}AY?mf$kwW0r~Zf*AmAc$|#h5G;qQl~a81q#I4_o$%LKDV=e z0b!@J5C9mnRJ(WV#*OR4&w`A_I^3uY+Ijx|xp^KvsFq~+*zhi-n`s!5tDW-UinkC zYQLU8R|@2n-3#JOfQ~XsFnaX~=e0y}Wxb>bu-0ZKcU=(WfdeTNSGqp&Btd17{Rj61 zHih9?07Ssx=@haZZ++iyh0AsjU=u%5{fAOilrwOlXyAxIbDh#*6(FWYaCU=w)kB$3 zG}5li()l1@akbbC%8Eih`Z}vXCjXSR*`+clFI}emyfhT5I`l_b%ObHmnI11)e5>bI zu&r`{pu2Z(RTjl$cALwj{3QY*$3@c1iIx{0wwF!kt-c{`l?f;?_1m}Ro%YBe+a)2h z8spT-(-T7iq}aDXT!HJXBKo+jOA8oKJYAaWl&v-m;7J(_z>9WVUz8L&NGjRv&hYuw_{fS?I zp8;2ur@s@E(mBh^DRZD|0TVf6Bozx3b>GeUbL%#M4H)6GWp)M%og3>+{YQ&Oj{#d2 zm%7q5xiFLo07y{xdbDQQ!&h7sR`CE{_ZnBUxZbK5{b!xP;}42Yx!KZc;{eE5(tfMI zff0F2YT)oPE>pfO0N!dP*28#_g=D~gb)u&gLHhHo@s%saJNX=)(LGuZ@%P4{KYrb=M=NCx94c?f#}n@)z?TBFWc24h|7rcnk`zA}pZ(*HOM$p%tc{W-8e^_!*03d) zlJ@{f{Izcp`Qwhmx25#mOXp61q%%N*eOOl91|8fiFJVS!QC_30f*VZ86a>)$7%-Ev zI#3kAN5upSBqMWAf{TVuki?d=66Qjt<~LeC%SEwfkt_I;u#q3*NdjsFd_taJ2Sm(< zUX-Stz@tzfWJzGl^$Z0!VzGMpoNpGicC{sauQd~XTwtPrR08y^3vXnRSj#6@?8|a- zNZrmYVpiGs^Ltah{;YD8}VLfs12!yZ%VNcBxefmgR1W z*mpxP!UpJ|*p0t)+UQ{6SU_SesdZkN)26;9J<|^KS2qyDVxa7a!RVU)>C?>cQ_ji+ ze5Q`7;S%xT+VdRcuK)6$P>h5j#qXMBxfsGYQi#fS4~5ODgr0yAU<0&FQnK2hQ(Nv2 zmND&hQkz*5foS!h!4RQJh5`uEFSGp-%GJYa<$mQaK*A!_Ctw1MxR1*c*g6qd_ct&C zgs_sl9&g+!fc1Ir%2$1!mp$}$!24b+ZGb@*kKww?RD2%);&lAr2Ocvf`^P>7ej5dL-+lAsN0>$IgIU|jlqhQepD}gKj2E`-5luQQ%maliY2LKO1hFT-!!4?Wa zz1%S)aQnSxs|&CY;DmL0pxd|;3Xm+Z5#E=RKzS?_6g!K97iTOO0S&<8+O^vQSfF@q z^HRe!2G1Tfh z5VZjopwa3dN>BU&%Z9j$4VW0y1&Y=f0UE{+O(|PS5J2MN=L&MLI#?aEnc>yF%2|K` zE%k$-GVUxbz^Z^^fC2CraM5q=0@#2IRuOyz#Hw=xB5$<0A9}3d#}z<}x3m**VF9wT z$38;F3t2>(DU{a(yg1+E81yU%z}PcmP2g0Hp;M7I0zxc-qP%$#(pgKc<4X$yUXW$Rt80DD zyG9P+nf+s*0>6#|yBFU$@e#pC=@HJ`rgbCSQhWl);;FuuK%_`0U9kzrMu7k(eEep; zE0%)5HQOcZ%(TZhoDvt`VJ2kH7-s%8+qP8GELY4)X8g*huqYfU+Gpn8{c?2xLzKhg z01w5iy#i#DXCv5e6o@IOfGnXD9iTvA2=>&*METBpB|4(~ZnS;;jT>#KSGH31QxY`}Wjxwta78 zaTS!Rjev)jm9i8Ndbio`^#U8s)Bz-Il~q-2$(x@SseHCB!2B_9Qg3gJL0 zXbUjnpx7xZe)X}sbe_Vu*Ny`a(Ee@KOp&h?<2KG~%qx$gHZI1%eUuGr1KOcCAi#>? zqdbRx0Xy#ZaSb3E3r%e@7L+_t zeE=+VZ^BZU{st7bHVscz1^}Z>AjF4a1?o11i>Ey=(Ag>vmJ)p*<%gT_R5vY}FQDd{ zfNFi!2haPUjUZursjzU!kF&s&6;G?*VuSF$HjI$(w`aYdEHsu^xbdg z#|n}KNIlZ2UthInU+MiCg8l(clZ`VVS*B&IJ%*=yCyl$nYP^geeeesO^Lzi; zr@*hH!0rccpZy4rP*#+>?``(Q^)Z%^J)usJ&1|lFWG}ROV0+2ka}5Cy{fMzsOew`=O<=|< z9V<~1{C&Q-QWn4ZV08g&gBO6%^lc@}q3(U}(!0}#%U7%Esr-W*Nrg)6Xw=@5()a{*(YkY&TyDZ4^&R1;Ahdpj?43?GW^f2Ftoo zo{;)Mp^Y+W3f)MR+;8*7?rCXR%_5RzHCVbVUPod0wC} zcMb4@#>T-j=)($gs(pF@GGM~toiPq98-j|pQ-CSJr+&avdnsIP=I%J#HUV#*YuD$L z!O5&3+xF}FV#iu;khKa1K#f15H9DZ}Mw=O*-m^I{^}M04eD`y@=1uIn`sbN`<27LP zRzb;Eu3nuSA%Ha=mFLAF;MZy`J-qbZrJaBN#~<$;>RtJ5E}41QQ){R_-dJXR4U`{^ z7HJ4fivnoLLWM;nkbJ9seON}H^qx8n#QWqL z{=@f_W$`66xjXS58nC6%t%a#t!#!gc7bcmF_BxY|#_ zqm6r4Jd&VVR+E%|{T#*H^XeIZSi4ykSa(<{wAZ);TN9; zESo-2fadA4S3c_93pzC}Dc$K_)?4n>Ua(!1A}&uz$n9mI8+B zaQyr%s~q@h>lZbhNbMR<76sh7ct1 zU$n}_vRsxB=Lo=&iJdd+r951xT;0!dV3x?8gA3*Iz^HGgEH9jEACdN88TUxf+)tqr zxD%gIIeweX0vI;#qI}ZDW1Ub4m&Wm2Dr0(Xrb9jgACw|sfWfa6kP0+(<-FB9z?Cw%4G0@bloA&uZ>+eOScV9F^-!i_N%)P;gr7t&3JYKXHY}yxKrcmN zw(oD(HpKzt=qJ!ZRSrMm$;QGx#)Cx$A5Hv6<(TKLHg^ZuTgq3U_q{x`}pCU_7Kk&w*cU1JrYRkceS z29PzjmN}!Pew%4}_b!}&bLdFep9^HOrU7HGw?q}fXK8r7vbl{;=At_a+;pCW5`P;v z^wVzd0y+VUKmZWJTA~fWW&f<-^Xv-f9m`qg#|6`QFJVm_z61!zZI(5}rhM00E6Y`CmJ{n0p7_d(jeq z@6V-+7pEQG!-wt2Yb;m*(S9FYDl z&}g7K_Cd&yVgH|BL4hnGZv{e9Oe3((wAPZ&CDdc2IEfH6qoGv2*tQSHT$J0RKnNjf zC5>zHf0!Y%iU2m}nysBHC^WH`%cT_(Sq$9EVuC&I5&X?g0UEo9-c3Py*792l*XoI5 zSyC3*Sb?saP^IiCZ~J{*4TMxgJCA0m==sZ6+5|5}Cn%2c9ZN+ojM+GaOjxsAP^?y> zyw$!G%WrPw3VSEgnFS7z4+ts7AR0}~NlH|?@{MRKKyx*)Bz3(00s@po4I1F5ZLAa&98jWPtQr&# z#p#?n#!3}Elnp#C#kcxsHm*;~B`d_6c$VM>l$JTwlH(NzV!Y4;xWF&02tb2a8CC$} z?x5%hEJDf(DDH&KYM?&+vVjar9}QS6l*I+(9I!w+QuMOdYJ+l>Gs?4eXv^&AYqnn> zxNQI;08o1XHDv=@av4yRODQc^&A2n-0}5MN!?(4Kf(EW8rl#jzWBq^(fVx>R(|OMW zf)w6>!)W4feZU`{qwvugJ;s94`^AblM+6-!OV$@<_1Ey;(55!Cjx4Zfi-BXyeHEai z2+T6o3q0TfW9uB{Fnw(7{q4Ht*vD!Y>j1MM6dpZJaNlnkY%5p@O9G34vaHst0<;jI1TTfbQZP$oYZnDYsm(01J~4^2>_~!k zvyJht<}x{zY5dL1qh^Ve06=7w2U;uz!D>V}M24O-Yo++ib{n{Qr7VJiG9EL3Adiw(C%-;A z9DP(*8>l0O7$j^e4c6#Lzhd)Ieido@Om2`0xSd_tA_HFu^klheD(mA>y)`M zy_~y%(o`44^}KKVi}!#qbtv1tK%#G;$-$CHpklfpEP`Yld|Rm(WzLG)xn8MjUMD9^`uxnCPy13D(Art`zlwb|Go z%*x>2{@B^gt)vb8toi*@9LQMNqCq7u<`WR)_KUPK-q8m5Mh0@sn(|`Z(<4TeLMY6G{0f)V6U#a2O|CG7&MHUq9%TG%W z|D(VBLGRtE0Dpnk&!T;8Sae^LqhDXr{*^xj1wQ%olbs*^=tpgz(Qe4eB=5)s9>2;z ze~60q%lOt&;E;@;VlyZ&L6LG_%x^-f7ugJYEEOq~=89QA+)JRP)W(955KOQgO_&N+ z1t17x))OmCRz}fPrUYbCvmbkXEm<^^Cg=%8AcxR>ErIO&;3(wiT^r2!DxoAyD`=JQ zJJ>n1GHKp#=fbd=-fn%Mh*=SUE16Fz2*6|xFGT&avWKNd{bDO9VnUx_CiJ-!q@M=} zD5$l1f`FG1vRIeeLV?xnndw{gB0c=^W}IgF6bmq8CQS*m96T*&iSw_$#&xxp`m**s zeIhpD_9)(?nex$(GV)nUK&(nxVQU9BhW?ELie5{3F0=ZiQLn{beg^>HH?Z`gJ%C1W zHNIRP6xAdvYZ&iTH%kl)$<$Y;j2(qb`2nP|!y2OjMYZ`zAOLv87ZfWX0AN}rq>RMR zv>y=o;QdR{8VFm;l5!rmO80T)J(7&8@n+@VhA>3>?(gS~k@6^OW_K=)7bzL%j2Xqc zmf;FmJu_`Ych{9ImIbZ#7as@&JrMw*Fj;7TEB64O6MUQdL_b*_2BZ$JH^`NzP5l^U zZ|wB}@NvI>D$4;-yz};ju}m3v*8xZThAuW;)V4!UvWNf{mFphN$NGXtw9P6Y<2$a= zu4|8e=r0}xJ|bKj(yhEA)4qc%M@h}?(t4lvMkDjo0-0hdjH$!-j92B3(r~VK$hlWP z?geK0*U!CYYIk*ehwuVm$NGaN>YqUH&+hyr`P`%;`DvN@@aiA0>iv@c%qg(@;X7}Bw8eA~9$r=g2`KP4;b`MJ zGY%_A2z3gd`-I{FO)weP%c)`tjuc=?07}_z26X0BDaMZ%qmZxmT8YC1yArfa>hrux zD9e=h67&QzL08?izU#} zxL*JhE{r3^zsOMfteF=jYGzH?>z8&BwghN}jdB3U-pE>U@ z0k@~^z}LwVkDboaVxJrRaF2v&8+cF`feRd{2iTD@bChXyQd|p2$ePK@^nA9q7oQ@y z(9&MRez(9L8FPYQ>56 z82`y)YSv$SfGT`8W6OopISSuiLie+hOnYjBfvXEszaUWMQ;rn9z5*g#V#=b_(GLhQ z{*)>j02ulR{Gh+z#$>=k<$G5DfMu~e&w=&YH$kTLQ=fq|pmkYN7C7^_^6(T;WPA)1 zI-nu?IJevzQlGC}C#*oqQ5V_)qsD~A#@`EY8t;HIYsenG2Uyf@V8uJ;#j#x1=PLjP z_=r}-6Ik5}`vO5kmIJSc-D3obk33!ZSnf8bM2m4KPU|9>{!%cy zhq0?-WTZL?e98q_nT@bgx|I7UiL5l1Bx;{vQObL=tmYCKlYWXx5G3IyXw?p_1ofAf zuh%A-Pfr9oUN6=2x%$ESvS#e#l1g~%(xYXo-8N&%Kz zw;$|$_W6~B>dK3v6NlUTGVJ!9f)hjpW9yF%B#^qD5Zv`BH611bZDRQ%W zFSTqJ8dN-RAyTW_bWY z@T6xJII%--K(>$jDSl(`IoByLe4#vlYlFBY^=aoOm>00#8^i8pMRHi?Za`1A6c<2v z9B9){mMCrEhO%l2aB*E5)X^lma4~ZvbvqT)@fM2|CfDQN2 z)?QZ}XsDz9UhnhzJ;BPJ_y5c9^?Rc!Ub8BT^=jxBP5$oh|6$@jSemR%(;pT_{S=#G ztnjG-U~RHfus8y-KR4Q5L3`{MgI>OV8wlwZ-lUx}whyl<^W3>N1LZ9jEnBRedJiTC z)US{G$L~jhjn{vnSBcE{vb;Tp-lo%fM%`0 z!DLD8oTaxig|6%FS$_L1NKIOG&reQ~DPyH^Cmfj1CkRgrNwbxExh$;oPzRTZo zx@%g|`fp_#&LUIY*RJ155DVOtye;=j%5a+@uoR#LkmuTNOP?ugiiO}r4`s3poGZ{3 z9Vljs$!s_xP9M$K33!$v0K*3ml)7Fy`lTF-U6xzHx_}M3saG3V16Ur+=FP-eZwPLR zcb3Wql30_T0UZ4&5RKyV9BT;6hW4`12qIQ5+7etg+e2>vhcyOmwb%V}g;=S=<)d%Y zp32ZCvwh>nvP1dlvoegcb{Svwx<)AiJY!J^=ok+c1Z^;m`oiS|9N{a9)6(VPwZ=pl z+Bg=9>c>y|qK&TMQvtTd1}M@G{DXEA>=__I3-xO=pjzRh{0BH!7MgI)0DQ_Ye&gE7 z5(50ND(DBmq>X-~ho#1XVJUp=*AD=%;wzz+)^7Jv<`WB4o3$J5{k*Ztcw+ut6>CtG{1jJT0Y4Gp}X|M=hk#~#?Yv-8;(Uk=FR-UMunnWgU9 z9zv+r1#mz7;DdPX{)|_lU~~8yfU*AkjsMowp)F7*SQ}sBd33S4o^DTh%KQzV-&o21 z#g{ztt4P6>kI<~eEPJeTz;jGCx4+6qzv}t@bKhMG?0%Rfgs>fK)gcP@coWlQs$QGmDamRh-)nI);O<+iaGM)REs zy5w6{ukxViCxH%^k0sR2;wVr+gd2o1zMT@e-7;MDD$j;^@_-!AO)zPy6Tk%JQZ8nS zl=DjG?S-H~q-f_7p;(@^#MX)$zX7KWfUszIsg@bsjJp|n7M+I=pSDf;_3pQna?iA$ z)MPQacjN2;P+ebioUqd;dh(VhZ!ul{n?P;T;M=$F9Rw_6Eh46oE*Gx)dOdb7aV>pPdf zywQxjrKHj7@>Q#g7U)1DN$1c+o#-qJtpH>|bFR;|8r6Zm%cAntSUvCs)|l^}*G_G= zs)^EO4L~E~u6*qTAlx&SquNek`rGDhTrWP#b3f(oo5jUgQR3Qwt`2Q+eftf#0Coe0 zJLz6LHsuDcTwA=n*Jk5US$IZ<)&U>AGs8F4N->><002M$Nkl4}h$1GNJ0P z!*BI?9&qzFKe_jW%LYIJL|7>(Y2ZZr(3-Wz_yDF>KjCKw-VnnAbO4&mJ?A}KmdcGT z`s{ZsGPKZ!X_x{!ywmdr`tGyf^|}TCOhwUVeXsk`R5{*p)+4LSJ}V#<*gJpj+?4OZx%x$_C!p~7hhaU**={1u_TBA zk&j~oK@=aQGYX(~0woS^p9hps?O~Z(=I^Wr{YTMK)BqW}c=1<^L6)rA;2E=Rhi&O8 zH!dPEGJp@DflfdF__NAxhv18E4{h%T5~n`_uLl7T79=ZzxDY6w&BSV@gL0NKM$-l4 zC}^}c3-vv_y*5q&X5<^CXH4|f7%fW%cNt{@(t3II%We#+q$+7NH&bZC$pBbg4*Jy&z zUUg@AC-qC;)eCgavgFF;3IQ^d@6exZg^cI)WhNG`@7?qA=Doj-;Zizl?9fX;funnk zx%Y(iN0PFSKl!wGHp@=$ymnkGR_AWktq1LteY=etfnYKMxU$jSY7}e-iO(IX2tQo?CVzUtKW)G z?Cc-^MhbX`d{_YS3qC%Pb<;cJU2)C3@T(5*=O(Y+w|{*1D3B%O&5!Kx>TRch#C!NA z1gw6!pYWqh%~-s_{sTe?&}DY#Iw{z$2bRydyZXH;D2fYnQHtY|i0O_dTu&Tlj%FdT zi)$-c67;MEHbNrgFf_{uw}ChZWoVP%@%S(`SI`@?y2VU$`Ta4p6))Og_Gp$p!nby7 zCs&5f9#0WDY-A@v7&nX;g3=)HB~k-I2ryvCHL(wXit@Bs-+&+%m1T+8Z8mr&CA2Iq ziSAixBi;jW5L{}-j+GDm>7D@y%=XPF31gOuuNn{S(I>!$LPB$(U}8j~B|1>TGHDtU ziq?I*wGB}794m-DXwaTXpW>msk4JlfxvVmi?!2+2OxDbPbLp(@>(_Xj{i1Keqz~m} z+<*@>9^jHyM4igl4wfrCa-oYkBYR@_;6wI}j1=d=}_kmY$_>0pf&g$U5f$IAt2bD>6(7{{I+AdQ^ojGyx4~yepv3Ha*)h}(ZBXhLF>e?EpA^8{#w%f)(ab$@NVNv zRzAGqLmvOC13!Dtfw%UL?;Ztqe|YJ_M_eO5l)i1yDY5|~(PK80OTewni)-afGIa^k z7|3}FzyXvf!wZkOC>1kz{$Mk;<404tDKBvzl6pO9J-@sm>E2PI$|Yd2D}_)InY!03 zS^4-F!{XV=M%vO+f?dEUMkk2nQ-H$FhArz{CTlBcRx0y(Q7XWy5sHDLvFw&>NxiP? z`)rIB5RpdSxk2^yQ>+RF=Q$vP3uVm7l?O1L%L<~ugfsK}luOw5pXapKb7tL=Sdom9 zk^va*vd;`qLpf7cXsiBhCO^umGQ6-9^L$6=iM2rY+Ro%}mTI{oupr?ZfWS(i{Xn;4wdN9vZy2@htvPC^wXy;m5YY8J~Vg1Z@qj^7PGlF&5j{sH0|#g=zsi@fZt?E#SJ$%3?^k7|><|w#IZ>Ramc< zMUPd-0HZ)$zu0KcWtT;{WHIv0>*QA5050jyc<4#1F$pR-zWy(d&YtP_K)u#1$O`4dvAZlxBvQ)<@z8f zxGT)v2pl`xKHgjXfII{qHwr~+25W{SW(0#Nhm`|Mb$SFbVQi)(I{w75EFGElojY88 zgw3_~Nl8`BzBLpk2~3FsG!;6+nj+hT=wfcmfirDqhF1T*K*&-6%dco9%|HA`MfK+q_gEhSO=5|WgsXSB=vL9lyK){@K>Fv@*&GWUeo2>qvsy>O#M zQXn&{^HN45zyPh3ZDvam-OL&x2$ZstL&dXOz0CLSAFDy_&^G;H(svjG3Kl@2@VNyX zYgIzAC@IZ($vxYl?P7l3Y!}}XCrju4pdeSvZUZB%8eB{x5B8LxdIk2Hb)yl$;RcEv z-4`#kv#mTMS+bfd4@hy1y9*7N$9<3P(+sqzk25=}J=$kX#fJbGo~7*V(ZZSl2npsK zy3}_dOgr=iNYno*qcWGUt4!14D#Vk(1`7iEUJvXkA4p)eQwGKAnrA60K)|&%n_K2& zRu0NvJPweo4a@DaGU~Eg;Wd2idFARO(4}nc)1YOM;9gsG17EiVZFax@p{r-K8yLYe zf^k`!Tn9cDJxcD@`yr6o3ZYfL5nfgg@0#{)E*eX69cv8mk@v}&4VVn{nCtFQzyAQe z0V2_?Q_}|gj0VP!Rq;-)mDz)*cf|MU_7OSv=D8AxeK8gZ?;(1reU|$7oJ3;-3Tni# zMEwBBp7)Oa#Ru>2+-NEPi?8Cj5+TA4(^Sps>Q&Y_)~`Jc!#^S2^|(4Y8-+Bnhv3Fg)Jl}VElw&a(EgEh#C4}u!-0BSZc zhR9aB02tgMm#Z1JJuRX3)q=2CFl%x2Y)aJDFc;L%S-3ENh zKugLRD8e^r==rtW+d1Q~(vbrMln({o?E@r%NlI574O(%{8Gr3{zwrS|eAX5MoE#v2a8aK30tKhG4{84usYS@~*L1*eRECOtvLt z60BJRh%&;`hZ3QXcJurC+eMZM{&rS{5tx;+lqKa`a9e_yA{*h|^OU#%O~8RN5+wR0 zWdeXOA~Q?#pX&p_K*0Jvim~S%lq2`gN+rKmohX+CHaj(I);2GW57RqgOmT147_(v- zQduPQMIBr+z!OER5Bj6r+kp&jAqw%h)h+ea`7^mg0HZIyydDs_HlrEPVzxSPQHK(%;pmHS0%*^XJ=*xBh6SXPYzkQAhi&6xXACfWWu|LKF89eW#wj ztrlSk8W2)DU7r|@6ywB$G^WqYo*N5FQ@h6Ep$uTC>#RM_FQBzzXxzKGevV`bkb6TJ zf?SQKHtCzT0$CI`#mw@jOaMv!!wdDFvc-SKA4r~Odsw{-m>Kgd7V!Y^xG8gOWQ|w{ zm&mdPvQ&*F1)%8zq|UP6_(J`_#_)M#q)ho=yaPaq@iZ2Md0m=u)n5GP1b#rHS*0`l z#|^W_+Pka03;cK|*WasssIhk_*o)IKE}7|T^r{VjI9h14_eeXIRi+h9z3-H_I33G% z&3;*q07|ra_?Ts5fh2#wQsdOM##x)<-7Ir<#znI`w{LWgwFYAOrKi@j-}rZ0U$?E$ z5CD6&xEk9bNOtJ(?*FD^AB6lS>ip}j4IpmZyzgYk3Oe5=f?qGgJ^LW!>oobU{dGtn zmux?;9d35}+-jACNLQ5b7pIT-x5Ut!HSC`uS;jnY`hfb z02T_MYfSEE3RWVJCeSs(NGJ(L-K`DdR>%ZPs!~t{Ghyc-oCS3{fAfZnrnNOI0i}4X z{2yj^6bRw9l$*e}6+}mXtJ4cXaof%I#aGLvKzR3P&ll&>l0iz15|V|JKtl(8STo{f z8F77kc_<5kr=u8CuBVH!aQ%8KXFSK^p^e%^Nu#46JnjbVmExO1=8Cbw*yStNYlpa* zwQ2^S8s%1F#9mCy$X<*E;2}ey0~kXi0AxT!N{}EvSew*|7TQV~0Y~n$S_kmbmlyGY zY^+vVi~>oCud{(fUFHp2>^S$YQX)(HSU3M}1tD zEquD?MyL#6;AGbXf4Xn_TD`U!z`Lw>_?_$GexR89!`K5ClsG^+JXjelVcrMt#mw9( z*#e3IU=~Axz(4`|oH`Z#de;4fGC-yrzaJgMkL@9|#{;Y&KrQ|NA~yat7TcI=J9-I3 zeo`6uyiV62ZiZZgwLxX6@&QEN#ivh}bxJ=E?gFGXcI&y}C^pZ%qaWLE?VY0^+8QaR zJuF4u9YEuY@_&f=*!sX~#bWYC3ja<(>nA_^dCS1x-ubj3RXICOoH(;{??3^*_8zM6 zx-sAC^zKca>gv6)jlh5XZ-2M*zyELlY3O;g-J!W_pZ{wuBGKePpaYnH-uPGz=bgM0 zpi7d~Pqj_WeSiG`rt9t3PwxNqJ5hiR@J`~Pk-r$}R~)YU>>uA<3hcgr&JtZOXI6v+ z$%_KWSSiGU%b<5YrBbKJC}cu_aE%YgB#Y4~8`cWX za;;DVfPoKXL=ntV;eM}>w8~9lQ5?{;930wa!?**PS1DL+;7S5Q)IoVTlwD@9OW{uims_+ncEi~^w%ttV$fDIY=REJ z8Pj^dS2=s_izs(nk@NqxcV|6zB*~S4nM|^{?_{&swWz8!fFum`qFYZvpd3e`{GLcRqlsMWxeRTQXT{-Q}SnNr;I0TZU@ojpd`Jo>raJ!-!c~$eGXdY7{@7iYzJ3NSSO_ zhG-x10bd}7h|#|#or?e<71DP}X*A%wDVxJg33zY2ryj^09jTKk0_~+=`rY&mPF9WraU?$&)f&I11%ZUBMDj_U34W=uJ>6#R_B>!vX%Yg5;?jhxBfui z$c6jk#PoJ9gHjLo zVgpk{RMeFh%ze_tZ_>|K9>ZLnV}OG`cr~@7K0pRCm53bP@3dCqEkdkjDeg?0DgSn2 zZ?2|ZWG;C?nN)!dOQAWG~mATLcZ&&hLzCr&Uq};OLxw?8n1^*-v*K=J$CZ8BjeH za$-bCEM>?yUpq2p_>m9?lkDso@UI6W zH!*OOpBwsR^y@n?v#O+R68(~U-1f7rIIvVUaN{CJXy_P#nW8{e>~sn2)!T+;NS^IVDR;VE$+ zI1q*vl+25z?#gnV{&_q5y8LwecF*yoAk*$U&p1NNmKTohp{?r>uM(KZO)hQ01EfAk zhWZ-I4_yMCj!V~qV(Cly+K!Ch{qaxLpYrZKoSjfxPSU8y z<$(Y9-+w>kDIQL78Kxw&>2TwCDn&lO;YB$S)kfw_Nj|uD>_)b9zww)!T^fBd^?8*k zA$9(12b(l0(i!u?z9vXF{XOET%BFj%_InT4Z6xHqs@YC%8u(xu*#G?cr2|#~*|5RZ ztJfX|mAU2^3!^5+)LjewcpTQ$LYZrRA0m=?fVkxQpt%4f;BAaXj31DBnxHz@o8~wA zHTs;GfaOUTP*F<&!!U{~VKf~KE70Y!FqLpSB;;haK_B;ZHbsk}cMZTnVjR3CU15{Y zMv#67XyOE{dIxWQ*?uyQkwBnX8Ob(b)wu9oT2cL5-lS^E?)QigeXM9B1mn2!>R z0D<4t-_l&V7*AAW$tW990K`3Zaw3928?cbWl4hg}F(E%a$P1_fT5SLrN6$PQ1BCVi zAcF&$Bm>aaey&kIlE|Y23>yEDw7ettq3bMHl=tkX@!S(7*kEWi0v;1r_W&~gIl+4JcO-#q|;K)ll?MpY~F-g2q zmLgA|w)}KfGa*694gGs2OgHIEzc&>iEdcL(%28j_1A?d#lQ*vFDBJbg*^=M(>B!)m z!^#u+S#6~L`k3FyslM(f1di^ItkyGq!f!l=>LRUs-GHEt`v#2H2k*;kmn4yUNU#8K zZ93j1!M*ynx|rsAY>*LIzWMIEUH><=nddj9E}wjIrh9+rOARdVdp_qK7qUfB51*aN zA8lvn)E|Gmee8=bzKqC7NgfjEu?FRGQojFSkLK4Ig(IgjU7OrJ3-N8|Nh)Q%dyI@r zpm%@SK5rV>H1MG`u>aqFd+h+ApNkPQonl$s(~LqHDqxFDFkTF%RW!&3Ks90j2+~{# zAYaYH;bJ$AJq$1bp^j{K2H;8d5Mac3P5j2=!fV37F@}N&83d!gU~0-UnwzPFK0GZz z08n2=w!0w<_YQdr#yesJC@-XmyV!SY5mpk}WN zu@2-godj6PMP92Dcwv~zG5lQiLz28BAqDg3#%Hk$2#C#^`Y^uKpU#Z30V1zUr>CML zTc9)8rby^kT`yhA)Ii=DbE8u70Ul)_Arpjnws{$$@@^msfI@77c2OU70FYe%js_Oy zdhdYAOPZIqAp^3}+c8S3bbD3T7Szht%+Z07xW* zZu;cnoy-WHI1^;09*g9uu5F*lfHa!!Aq&sROwsU`?E+0hZpmPZqUVyYho*VDuD%8= z1sIo16_hwH+NGarZ)) z$b2_#fE4jqH9)%gn~GK1?#TUxh`7%j>pecB#C++0508EI^=I9|_eoaLO%-;DEAd>n z{+5J9zUAI$quf*4mM2LcQg7{Gubg!cyT0|;`L<)zz@~wZrh)x``25C!(Rnu-;|a;q z%ZCZjvzi5voyh3pRE#_qX*U^*QCsAJF`6gk+7Qa4yJzVn1I2i`@!jmt@se24Q3wL?c5FmUI29JVvu#nbR|Rd-Q066M>b@&8+Bn_)uI&ybtkYFxp0Bj82Vw zV^NIcJ$aQomH{;q4rz)646CxmH~`E}Z{)Xba+Xm#IT`sA$LU1Cq1XwlMB=g!sGZ*} zjRYcq$*7fVJSMyV2$UC(j;RCFPmXRz#!!d+Fmt9k03hN3nCVNN2nqvkh0vLhyRNWF z`Q)b0)6uiXl)IMx8GS1fWNP}rwRDa+0c`cE2P5xvZ7M~br9o)Of}|iUyeqsLHqct} zFUmC{?npWceGq`Lv{*iLB?BuD8kCuI@@U*oJ}XB(kpO_Ht&jo7OkHX3iTMF518#O= zwReV9M^hiy0YP3OhkS=v&aSKo88606CrL9Np(f5BAXGsk>y#S;7X2)s;{BpGUK3^Y z88FeWI?|7R0BlPfLIU(RWsWz3w+BHU9g$mkbmJPJ?X^}ZNvA()OKs?UuZ=v;(Secw z+(V{GhYXPw;nv5=*?TY7BcHC*KD;^z$HYoa49I*RZ-Dj(%JP5vF9qx{9g(0{Wlap# zkP`YPhx6LfqX2l4^gZp)gZ6Eq=juCp5{&IP z_kzPEbg&(p1~v_RJPqvs^Y3mQumX&yjkk{H$+qGJo&-GOkPgYP!(;$CH?7;0LD%!`ZOf1K@`_x1!m z+TPlxX?ft=Juv`cOzcwXSrt!W2*;}eyqEDMI{@J!A@A9huHRE!hrLq7KxRFaEdrFqUyHaWN`Sof+_wk9_7;5VzrcbrgAg z)}IEjqJ!v;{D2n`(I1SU5nSixO$VmBJcEP`2p!#c0WhQn38ZuF%{%5$7xhD+#7H1K z^dt>E@W5QjG!$1n{Zz+fP1<@%c;1q3o;fsxNxqko9$fp5tQQc%#Q z6Pu@*+M3l&+D6^v0f2$C>;0}hkRT*RyVTo|v`c1v1^Kn&gc|6|R{p6#ujToZ$EvG# zl8>ne?TCERbvNkBHQK{{K|TOZk2$q_`7np1;wO~T%kszltWT=P8d%)V)CkhC?u&Y7 zQb!&T?aY=)Pj+o3E=C^D;zd(#ZtK(^;P1-Kn~yO)^@_YEWA3%(Ir8vC0o>=$=e`ll zoPV!!pT6%{B!o^77{MpWE}t{_Nt?F+q}^Xdp4VwJ$Li-~*holKvi;dK@X<7||1baW z<-xPKr9C6&i17(wuu+*^QW>oM;{op&v`+%?K!fo;8-N6;ydXwj44@I6<%0|>X8qCjt5(X(uLKcoSmK)0T-kQBO;2T*lReU!6C_xc1K^H!MdvOE-#a1JqWjYFQ#k~cYB zqg*3t>6|-^2c|o>Cat>bKM1JutZYzcqkdqnKk1(cr{&E06Tq}rihk-ld8o%ZQ$EiG zyCxqx0KN3k4aD+=j=R^*ju;@q`j~0ByIBR~T};UUYMy};!X$Hf&`s}==)HMHKJ7>c zLpBf&q>Gnpf_fJLE8&VPWVK9NgOV9J)K5mw6IYVc&6oVzNIxm*2r<^?^xSe&mOh;? z$qNW8KQ~0*^S$SrS<8~IGE;P41>$_6Pdcxx+9>^6I@3MkhD>|$g4hXFUqqsiCxO!x ztQ=%PT!`mJjjlo<%`(w*J|Z6LMGgh1a|uL<7gB*s@X!ewcE9^2D zbTU%^^pn)Px=7yiWMe<1l%XQWI@E0=Axj(E`=)`9qk;W@`s&jIPqBR*Z-III=K)81 zPIw(w6*VGC? z)}Py9nxvdolKrU!C? zsUBeDQTKq`b8xuI!H1Eb7eJ^>Jv!jg5D1Ilu!ojT(#tZq;zQ)iXw&JX#MgKMF7YbP z^Ng^s00_CEZ$Xd~i!sKOoYEGE^E$HD0Tk4kQ*LBabf84587^`lk9i5CudBNjw#z0i@{O zG!24-NF8Qbs`ACNXnWucgxwPej{{NUp#a+2fhVR%%HoMYyy#6C>Y$$5Oss|eNe*(6 z#i*T51$8fvLy1+&qTl5iBB#DdPK1&d+btlJKn}AdIiypJX>g29|LEM5aL)WqBPx!EK#qa6Fq) zm7p(1ew8eeIUCD+k`j1h##0SRDzKHiaJUE)E;2wUS# zud@l5{3l3`C&dH2EXk#NE@mLaRssqek|9_O`h%nact{Ez+P&6!-vJJtEI|7ybM*|Z zc2Mr$efdS6YjghC4?q4?A_ka%m+7_=rsP$i3y1)FhyZAf{K^0MXTMFE@m7SOAPHhe z=+jX!4Y3daNp9dTsMm-aF#?!m1(LA9(G@PcQS!tpm>Q@m+iw5_!UC)oP^jC~efIIt zzZ}RIuZX^6iq7W=U>$HrQuGD!96U#WoHxkSjnylDqoe9Fx+f$`WnP(bg8q=svw5A#!y!X6>KC~jT= z&d`|CeiRZSy%-ZfZx=(?u3KEZhp8TUOw58&vj>I6T^-{ZdsqVq)Wn`3BGQuwD2d0g zm&U16G5i;hO|fk*F7R3 z;PP>TbWJgE3tJ9qsjC%4+|Z9hsu*H{u3luEX&6C?nM<#cM#Q+*#gP~p+F3Hd7Woek zAJ#kcB>J;+Ei%H`^CodO1I;^kvNSWKfuVG5y$JaXtQpW*?wSV;hG-d)M$qIj%UZ8q z&HfuvB;q0Ff({s8pr^gbV~T=z!fv5}oNj@Nsi-eL|7|{JDkABX>l&kNdhHhUyfy({ z?F=mYsQJj=X{dY`FM^QpC@D)j(6xFf%X#fkaBUkQPm`DPMymDDq~FkqL%oqo<(JlS1F`U`7bZl+3M-x^`d0@1A~o`o zN8?+7cV79aWtuWmp2TEOS2|NS(`%;t^bI6;k*rP!r5yD}+Slq6zatZ__nT++AcK9{ zb;?nua)wAFg|mN*dx$Qg>ei8bWY(wJURok`pVy?(F7uk;S~UqDkvk9sZh~2DUY1=h14h!+6B0lGMjqhDLu7Pm z1GCvzWL5|vLwB-?8hPrTPky8qB#c-f6Rk%G6c7VW>dkF#RgC-rDzLv7@bDXXjkwQmg9qqTy>9hKza_CGRt<*tC{7z<&r#{Z7yT9)n|or&9nMw8KN>T)le>YYfx`Nx0!r#ye@dsWOa zwq;86eGWQ!mxvt2`*|*diT3Hp*?iNl7m==0WnH|zZyMM%@WC~(|M#C=KNzqNsAmM` zhG$saq)3Q(3E;)>@Gh`x0Qv=k$ymkUGS<8)r(;MjXDWda>*zkDWk0K4808@lGg4H> z5Rh>z^K|ST7Zbr)F|w@B7~@=;>yZcZ_n8WbYnhk8G|%21BN+pdXWo2rDkIe+ZuHqC zY25uY53DYXA@T$$MBbka>0;#MYbt?pHT}a2V4j}w0{PFgyqJD~op=CKAf|D|E-=70 zuR<{F^_IQ~oS4z#t2tnV5kk0L3K-r9(+&WfUgT@!tz5qWa`GTPh=wUO?J7TdM;uIXn2JHl=vQB$6ZLbg z>#UTaGik166S9a$hWvN|X9SHdkScP^gU7^* zsho%|AGROJSNljqCyj~8$Q=oq)i~~{@w7>^_61P~X_cc6R>v&D!XxG|RX4Klyf^9* zo%I#j>09iKc1#wnub#rg_va8&fyC~Aq~yEr|5PGkT1o!Wb(wTzpf^X~^TS^1X<7y$ zL^7Z7W=)d+@NY!v_040K(JT5-@cF$wQ|46K=+>=UeU1=)&;=d*as zYyEc8%Y0rN3HeLyZQnNyd{_;Hg#6~f^a-QIfR+&CX08#!iF$XcqF~l+If$XHMjTjG1J{y$*3a(p4 zzJ4`Ea$+nz)zos@0Qf7@9i}LZuxBY~gZv^dkO3NujQ~&vNuBCqPpOP8Z;kW30(bL- z+>Xj~!sv*Xh6jMmrxJfMF)8sV1O$N^phX(anE3T}-TCdSh;RZED0qbU_Ht9AT7I+`ux5eK78pg;}= z-v}S^kd|EP2Y4A?dI8kH&7D zJR@E2t}z-{mT~|wQm5RRwwRa_@*-Sx+Uuipy0J5?X)wQA2C57m8SjXR&!&$`FjK4R zSw*3J)t3jU&$dbV?wgY~Ms^&ol^=P?);Nsl53tE=#c-$- z`PWm6lVa|d^e^{R`&&ACb)0ni2oDH3kz53!@}%yTq^Xi2DNykwt*Ot0)ECM3D^A7jRVF8~DW zKpGK|YK}}r#31LV%EU18d`R1UMnYT!{GB_OeJv6;56EBViMZ!43PO#6PF)yohI{}p zBlDC&X5h<6A`?dY9tLJ~W(1EkiJN#7A~IeYpmEd@6Hqt0L|){PtUKy0sff`wItM(L zV+{QUybFl=f_&Ahk#d3ey?c)WCP;*(##ybCwDe16-~yE8gNPIWl8$c3-BI;!1kZRX z3pmoQJO&_VwT2NpCE9kM7ecIwvgozbZ215{EonwDfE+JM0ZyBwJbAx;1`u!5#5Xu^ z^h-YjT!0T@2+%|*y6;Fj-#zcecd}~_y7nDWG4(NR6B5$?G#(-6)LDMoM|nIi4*k@D zWO$KRe%G#CoqiKt*yPPf7g0l;mCY-n&ms#5ki6-FheUbg)z7+QIeJZ&^*rRQ0AwJG_|+4VYspNuaDjSR3brZBH9oiPq7(Yn&ODp$2Ys8@ zQ(g3FLCS)$-B0r888N-Xqjopm5w_sx?2#j$Cu5J+hmNFy_eZ%u_tIAd&_0VtaaOnF zcR8B2S?@l$-nDtHgDsi9Atq|#s1Txy3BcAKZP)0xlBJMjzV5GGcnx zA*z>E*!I3@VAH?{*FXYOFNcIMg66kpIjxa*Y)D=KlkE3#JR>rra0dNxZuD{GPoN&7 z1vu@hYWeP&q%%@_n5hON#3lgpLTCU`GAkIk*#R{0WPIu>P5BJL?6?`@5F;XQ0Kzbe z>Bwa@-e*cevQy_HhKEbr{vfeSv3#{(@=6W>fMHY?g2MP4K^wi#sMN#FEK#+~qldap z916_?2HqV;&!%PaQTP1_Y^97PolK<7t9VUj)R|Hl@emh!sKBX{ybeP^?%mH)*qk3Q z4Y3NTX`Kn5yi7CkTp)eTBkjU_P;iUx=!=0j>hrsLRY$oVpg0E%)eo=(d%>Fk%#;k$ z0eF!rp|g!mvo z$S+Vu0JNogY7?YQ-7GujA(R(Sn7~ZW$U$BKGH|CiX_vuM zQnvG!!k6Gj4&U{i@fL-2L|4d!awo3Hsx;x!At?4XOomOzAlS1gV!h|( zZlv4%$$>!VSNb$!gP67a#(pXF?Ga5>*Cj}~o{o7{Iz2|vWV(@%xBA*XZ5sF(8rZ*f zah{?JT!E(f=rxb*W@qRGjLr>e8Erqc3}Q?3TDDJtNjN(I!^B>tT0fmJ$cAW;3o2?&e~IZc+s=GfGCjH-pWPZjPlhz3VNd> zgbI*KqilJSOP}%`Nt*T_pe@0lUe>bIy|3gUFKt2|_0)3#xpw5;F#UxEUxSS_RlFbWk@UalP~9fzSrwK3_Pt-;_b2+5 zG+G%_lXi4Jh{>^f$h4Kd@1FhT;ljM4i-|3Ome1 zx-zT?Kp+?o2T)^}r9RVkX09jEd1ja^Y=G|>I-wo3O&huN0e}K@g zsk{*M^E~n~(57N|DaI>f>V$!usV6Zm%9DPkr{2bT&?R8$R7>kM@uJjwsj*#&@9C_?7@VIgt@WfLs4v?+rP~<63n9;@Zjd3nJv4m%fTX zxt8v_Z%OKfbgBQeafn!Cm!=?{`i)L`nCw^^uUJ5g&JY|#g}&*mQyZx>Pk?lDZ}2V* zkk-kYTeXzUGrq8Wl(gIy1nyDe)#sPkE>St%u=jjNEaKBhaO8=&!=mBZ- zy?rddNz?l9CZ;Xiqw~aEM0>~*y~)3I+>wk$JOHrkUFV#1Y=L~;lZXc*a{KO`oEw62 zB65EF-S=4$l$A2M2lQ}mw&6VAMt<{q@xN!=byEMtW~4rPp=lc4EArC8 zuR4-!Z#U<|BO-=nrs?v~yd8s+;8opkz@U~k&s`d`7P0=f&VfM z>|Z^9_F!2DqZT)%8?nKvxlw&)oF-O4AYjVQ&Ce|?J)kxcHjM!E-x#=z)?@1nBrg*{ z%9y=MV5tYdSuN6ZF<6WaqpG}RdjOEVG>o3fCVvK=;qb)H)7cvY_#+|WME3KrIiv$X z*u;ozjJ8-4F$Fe7ui`>~Zpf3Jkg_sASi&0UH2jY^7!VeZPD;n`Z_51k535JQwow)ZQTn z6Dxp3xh58KjCE20M__FahsF#9D7mJ+5CUEt%(c@WdF01CGt+MROR|eB$bxt6C7;{bu3gN8-;q++ zDHCu4Ufavl9iT_<02iX9BLZx)(v!L&Lax)+l&t;}yE9LF)qe_d`E4z8u4g6vLc8gY zWIzh(TYJ+b`FI?ZpHl^CeIK!*XMNaI8DW%fN{ycB*1e;@tFIL|fKoZj>xQ_fAH9v| zFZtwmWX5~jG4;}ZWaq)s?&@XtRL-cOyo|MQ0sIh^XggSwVe zd9Mc|`V@EcGCL&m@J;Go@~i&10)bF}Q`8A>2GaaO&u~i%ryg$=?p1Len#@%kudqS?&_cOu2U93TvIGjcZr}n(2r}^ zC>O8-20vaC?FupqBHU$D?AqJFw-_~r(G=5rZ@Rp=B-X8WhHp>-Z!_hoi0wVUoAzWFvw zbZ@85%y^piJNP*j=+r_E+B|rS%pNq5b1k zyPOp`=d&~N{dS3brB=4@n+86v2I>h3{=YfEG9nRt=?$GfF&^C3 z6NT@lWuR)r^3FH`10=)gdO3t6dS8Sjfs z^_U`It^nqfJSfjTA;5(UkDn%vAlt?7XHOJl$LQ}h5>n8~_mC4_0MBzl0DiiCI4cGM z768D8V~7qTZ|@I;W4Wmp-~c*+Nk^$_b>{goVpoisfhEGkSOW8Uc0}^0Eld*td!ugP z1sn^a0SK*BGLhdpryoU1hrID5X&WTN$T^MD>j0#~Z#+QsRZ#3nIld(WWbK)xt;%{7 zi~s-%S4l)cRO>u)Acx9l{D(}<{vM_-5S%=>;9O)(eh7o#9a7bF}B^t7?p1kST4AG`j{s znqsMUEP7WS&xU&Q)X+80gQ-cxjAsQ=>R=#gN|aK!@n+;9f2q56QbyCH;3x6)k)7vD z{_a`&K|BzL={FNR%j3nHH{K*3jv=Y?B>VsQ{qM71$WQgG+`Rdl%6OQVB4ozBQBMnp zklvw#Ob4n?c|x2+Qt6l-AxmzH>W;oA0aBKazxLSW*~%lVM@KnAyC)4-;IkEekQ?lRiTsQpze1CSK$wQkmq6!QH<2nC>H5G`?Y z7!A8$8B1Wns4+w{7MW=npmQ?Mi~s__$fjcQ0MKu4_7v&x1_+Nb)!~^c`zJg*Cf=BM zFbH*y!4A-vVgV@1KAWkXS4WvXNPy}B0rUAll?TC<>hSXA!~(3qLgfQ!-h-D9&4rKm zASN*eJi(fMSRA)9qcq^p!*c#YVku(exz{s!(d&=xdC*=6&TG><^kDhw%&DstAfV&c zKfikic=V;xR&^LPdIE8(m^#@R^H}2~BtSqM=vJq|QgQDoFnTl^o@p_}2jQ8ZTBFP8 zEP!j7Wq~gsqMZ<>{r!x#Q<%EYRrAkxo&rRmz^{#>XP|v5aXjYX$V^7PI|lf>CJ3X_Q&=eecfh#JUBo6@TcfdUUQTXRFX&&eF@!g zyzx=xJFf_5$;*(Tm+31f6Kh4s66xUv81Flj5v%hiuYOM%wj}Ds+3;{LI{oJN|M%Fn z$b0G1m3X}FRu2MFmz1eaGIF*2)*m0XW7EKlgZMjl8A1D(s8ruArET>G;+ zrF)~~9MZ9Nszny)l1c22sICVpc?axBgSH|g!UzN%s~zegi9)nlr#(_%KmxRF_GLFv zY01gh3%r!4GL4YQ2^hWeN~r@c0B^zB=yFYWkVR~W(Xkaj;!Aw4=c5E`7iW^PZd|`s zS?E}~jq{1jbjpi>BcaaVoq%QM1W?nP&‹Vo(@R3!Ojm(*ONU(Qru-cyD? zPshtkqQ9w=rLG8#P1S(lnnJNMhb~NcT+BLw^f?4aTYK8C{`)kZ z4)sD*-75%qer{Wq7^l4u4cE%o^xSxhJeclDpAW-Rf{b^HF@(p8DdjdEBxHv!=J`V* z)hXY7`-gA8J@&(&|6I?h_DQqVMr6#AW7jwHsRSfnwd=bFv$BYYX`G=I>7!`}SC70S zGr8AG-|xM-GLtwjdBvJkzXb75J@?Y?bZk!&*Y$?#wY+&Qo^#UUCEKxSVAH_I)4=|J z|N657@fqBxYxLr#WB`B>W9Q~KnsJMFw39p4?+*90kyeuQ1rWT>#vq$!5aYps)(F@r zAUFN#Y;i8m!l(rSIT3G(G7tsR8AfH7viT4oVTi3@0glLwykCceIBcxOd;k}t^%9(b z0uZjDjj>68wrRcyYzGbVC2PEiBU4}bq%mG z1;#VbC6YPEd!ih~1L+WqschtsRskBXgzr2X>OqJ4ns(9efVbe_lDU+f3@U?uDsO3} zQD48wr+Zq2bRi+GlYf^BC%t5oUVBF# zpVO5}Ne{&2JK#C}0AWMuv?aGc-6Qxz&W8;8P2Z(&(`eG@GdvzVM#|Jrc&Xa|Q;v4h z&!e_}reE!i=#)q%4{hgKdeBD@jMe6DXaqwni8l1oUW>@_ytM9VE5FeTnRtj0C4Ehq zhxhMvn|Ght2)#blX}RlrzV%3w0dNA3{a+kSmNU7m$E)0f$I}+aQ%{6}p7aa9zx$H7 z<@Kt!KbFwE@t(~6M^DdF&w49 zeF3`xgjwyv$mf4wtP5j?jMVt%ngRlYoJ2M`I~j02i4?>j0;+m2a-Z`Kc%(Q(on!^x z*xeXi`HvgjUK-Ey81Qj_&+|QuIOWA4GI2(oW&NYaby3sqZZ=FL(l%o$!48yb~8P(Hy?WR9yo3m*Y3aMY2$e3^4@rm{3iU9 z7@n>x24t*7PUhIDZocI2A|Y>1f-2eap literal 0 HcmV?d00001 diff --git a/examples/omni-vlm/clip.cpp b/examples/omni-vlm/clip.cpp new file mode 100644 index 000000000..45764f9f3 --- /dev/null +++ b/examples/omni-vlm/clip.cpp @@ -0,0 +1,2461 @@ +// NOTE: This is modified from clip.cpp only for LLaVA, +// so there might be still unnecessary artifacts hanging around +// I'll gradually clean and extend it +// Note: Even when using identical normalized image inputs (see normalize_image_u8_to_f32()) we have a significant difference in resulting embeddings compared to pytorch +#include "clip.h" +#include "ggml.h" +#include "ggml-alloc.h" +#include "ggml-backend.h" + +#ifdef GGML_USE_CUDA +#include "ggml-cuda.h" +#endif + +#ifdef GGML_USE_METAL +#include "ggml-metal.h" +#endif + +#ifdef GGML_USE_CANN +#include "ggml-cann.h" +#endif + +#ifdef GGML_USE_VULKAN +#include "ggml-vulkan.h" +#endif + +#define STB_IMAGE_IMPLEMENTATION +#include "stb_image.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +// #include +// #include +// #include + +// using std::cout; +// using std::endl; + +#define LOG_INF(...) do { fprintf(stdout, __VA_ARGS__); } while (0) +#define LOG_WRN(...) do { fprintf(stderr, __VA_ARGS__); } while (0) +#define LOG_ERR(...) do { fprintf(stderr, __VA_ARGS__); } while (0) +#define LOG_DBG(...) do { fprintf(stderr, __VA_ARGS__); } while (0) + +//#define CLIP_DEBUG_FUNCTIONS + +// RGB uint8 image +struct clip_image_u8 { + int nx; + int ny; + + std::vector buf; +}; + +// RGB float32 image (NHWC) +// Memory layout: RGBRGBRGB... +struct clip_image_f32 { + int nx; + int ny; + + std::vector buf; +}; + +static std::string format(const char * fmt, ...) { + va_list ap; + va_list ap2; + va_start(ap, fmt); + va_copy(ap2, ap); + int size = vsnprintf(NULL, 0, fmt, ap); + GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT + std::vector buf(size + 1); + int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2); + GGML_ASSERT(size2 == size); + va_end(ap2); + va_end(ap); + return std::string(buf.data(), buf.size()); +} + +// +// key constants +// + +#define KEY_FTYPE "general.file_type" +#define KEY_NAME "general.name" +#define KEY_DESCRIPTION "general.description" +// #define KEY_HAS_TEXT_ENC "clip.has_text_encoder" +// #define KEY_HAS_VIS_ENC "clip.has_vision_encoder" +// #define KEY_HAS_LLAVA_PROJ "clip.has_llava_projector" +// #define KEY_HAS_MINICPMV_PROJ "clip.has_minicpmv_projector" +// #define KEY_MINICPMV_VERSION "clip.minicpmv_version" +#define KEY_USE_GELU "siglip.use_gelu" +#define KEY_N_EMBD "siglip.%s.embedding_length" +#define KEY_N_FF "siglip.%s.feed_forward_length" +#define KEY_N_BLOCK "siglip.%s.block_count" +#define KEY_N_HEAD "siglip.%s.attention.head_count" +#define KEY_LAYER_NORM_EPS "siglip.%s.attention.layer_norm_epsilon" +#define KEY_PROJ_DIM "siglip.%s.projection_dim" +#define KEY_TOKENS "tokenizer.ggml.tokens" +#define KEY_N_POSITIONS "clip.text.context_length" +#define KEY_IMAGE_SIZE "siglip.vision.image_size" +#define KEY_PATCH_SIZE "siglip.vision.patch_size" +#define KEY_IMAGE_MEAN "siglip.vision.image_mean" +#define KEY_IMAGE_STD "siglip.vision.image_std" +#define KEY_PROJ_TYPE "siglip.projector_type" + +#define KEY_MM_PATCH_MERGE_TYPE "siglip.vision.mm_patch_merge_type" +#define KEY_IMAGE_GRID_PINPOINTS "siglip.vision.image_grid_pinpoints" +#define KEY_IMAGE_CROP_RESOLUTION "siglip.vision.image_crop_resolution" + + +// +// tensor name constants +// + +#define TN_TOKEN_EMBD "%s.token_embd.weight" +#define TN_POS_EMBD "%s.position_embd.weight" +#define TN_CLASS_EMBD "v.class_embd" +#define TN_PATCH_EMBD "v.patch_embd.weight" +#define TN_PATCH_BIAS "v.patch_embd.bias" +#define TN_ATTN_K "%s.blk.%d.attn_k.%s" +#define TN_ATTN_Q "%s.blk.%d.attn_q.%s" +#define TN_ATTN_V "%s.blk.%d.attn_v.%s" +#define TN_ATTN_OUTPUT "%s.blk.%d.attn_out.%s" +#define TN_FFN_DOWN "%s.blk.%d.ffn_down.%s" +#define TN_FFN_UP "%s.blk.%d.ffn_up.%s" +#define TN_LN_1 "%s.blk.%d.ln1.%s" +#define TN_LN_2 "%s.blk.%d.ln2.%s" +#define TN_LN_PRE "%s.pre_ln.%s" +#define TN_LN_POST "%s.post_ln.%s" +#define TN_TEXT_PROJ "text_projection.weight" +#define TN_VIS_PROJ "visual_projection.weight" +#define TN_LLAVA_PROJ "mm.%d.%s" +#define TN_MVLM_PROJ_MLP "mm.model.mlp.%d.%s" +#define TN_MVLM_PROJ_BLOCK "mm.model.mb_block.%d.block.%d.%s" +#define TN_MVLM_PROJ_PEG "mm.model.peg.%d.%s" +#define TN_IMAGE_NEWLINE "model.image_newline" + +#define TN_MINICPMV_POS_EMBD_K "resampler.pos_embed_k" +#define TN_MINICPMV_QUERY "resampler.query" +#define TN_MINICPMV_PROJ "resampler.proj.weight" +#define TN_MINICPMV_KV_PROJ "resampler.kv.weight" +#define TN_MINICPMV_ATTN "resampler.attn.%s.%s" +#define TN_MINICPMV_LN "resampler.ln_%s.%s" + + +enum projector_type { + PROJECTOR_TYPE_MLP, + PROJECTOR_TYPE_MLP_NORM, + PROJECTOR_TYPE_LDP, + PROJECTOR_TYPE_LDPV2, + PROJECTOR_TYPE_RESAMPLER, + PROJECTOR_TYPE_UNKNOWN, +}; + +static std::map PROJECTOR_TYPE_NAMES = { + { PROJECTOR_TYPE_MLP, "mlp" }, + { PROJECTOR_TYPE_LDP, "ldp" }, + { PROJECTOR_TYPE_LDPV2, "ldpv2"}, + { PROJECTOR_TYPE_RESAMPLER, "resampler"}, +}; + + +// +// utilities to get data from a gguf file +// + +static int get_key_idx(const gguf_context * ctx, const char * key) { + int i = gguf_find_key(ctx, key); + if (i == -1) { + LOG_ERR("key %s not found in file\n", key); + throw std::runtime_error(format("Missing required key: %s", key)); + } + + return i; +} + +static uint32_t get_u32(const gguf_context * ctx, const std::string & key) { + const int i = get_key_idx(ctx, key.c_str()); + + return gguf_get_val_u32(ctx, i); +} + +static float get_f32(const gguf_context * ctx, const std::string & key) { + const int i = get_key_idx(ctx, key.c_str()); + + return gguf_get_val_f32(ctx, i); +} + +static struct ggml_tensor * get_tensor(struct ggml_context * ctx, const std::string & name) { + struct ggml_tensor * cur = ggml_get_tensor(ctx, name.c_str()); + if (!cur) { + throw std::runtime_error(format("%s: unable to find tensor %s\n", __func__, name.c_str())); + } + + return cur; +} + +static std::string get_ftype(int ftype) { + return ggml_type_name(static_cast(ftype)); +} + +static std::string gguf_data_to_str(enum gguf_type type, const void * data, int i) { + switch (type) { + case GGUF_TYPE_UINT8: return std::to_string(((const uint8_t *)data)[i]); + case GGUF_TYPE_INT8: return std::to_string(((const int8_t *)data)[i]); + case GGUF_TYPE_UINT16: return std::to_string(((const uint16_t *)data)[i]); + case GGUF_TYPE_INT16: return std::to_string(((const int16_t *)data)[i]); + case GGUF_TYPE_UINT32: return std::to_string(((const uint32_t *)data)[i]); + case GGUF_TYPE_INT32: return std::to_string(((const int32_t *)data)[i]); + case GGUF_TYPE_UINT64: return std::to_string(((const uint64_t *)data)[i]); + case GGUF_TYPE_INT64: return std::to_string(((const int64_t *)data)[i]); + case GGUF_TYPE_FLOAT32: return std::to_string(((const float *)data)[i]); + case GGUF_TYPE_FLOAT64: return std::to_string(((const double *)data)[i]); + case GGUF_TYPE_BOOL: return ((const bool *)data)[i] ? "true" : "false"; + default: return format("unknown type %d", type); + } +} + +static void replace_all(std::string & s, const std::string & search, const std::string & replace) { + if (search.empty()) { + return; + } + std::string builder; + builder.reserve(s.length()); + size_t pos = 0; + size_t last_pos = 0; + while ((pos = s.find(search, last_pos)) != std::string::npos) { + builder.append(s, last_pos, pos - last_pos); + builder.append(replace); + last_pos = pos + search.length(); + } + builder.append(s, last_pos, std::string::npos); + s = std::move(builder); +} + +static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) { + const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i); + + switch (type) { + case GGUF_TYPE_STRING: + return gguf_get_val_str(ctx_gguf, i); + case GGUF_TYPE_ARRAY: + { + const enum gguf_type arr_type = gguf_get_arr_type(ctx_gguf, i); + int arr_n = gguf_get_arr_n(ctx_gguf, i); + const void * data = gguf_get_arr_data(ctx_gguf, i); + std::stringstream ss; + ss << "["; + for (int j = 0; j < arr_n; j++) { + if (arr_type == GGUF_TYPE_STRING) { + std::string val = gguf_get_arr_str(ctx_gguf, i, j); + // escape quotes + replace_all(val, "\\", "\\\\"); + replace_all(val, "\"", "\\\""); + ss << '"' << val << '"'; + } else if (arr_type == GGUF_TYPE_ARRAY) { + ss << "???"; + } else { + ss << gguf_data_to_str(arr_type, data, j); + } + if (j < arr_n - 1) { + ss << ", "; + } + } + ss << "]"; + return ss.str(); + } + default: + return gguf_data_to_str(type, gguf_get_val_data(ctx_gguf, i), 0); + } +} + +static void print_tensor_info(const ggml_tensor * tensor, const char * prefix = "") { + size_t tensor_size = ggml_nbytes(tensor); + LOG_INF("%s: n_dims = %d, name = %s, tensor_size=%zu, shape:[%" PRId64 ", %" PRId64 ", %" PRId64 ", %" PRId64 "], type = %s\n", + prefix, ggml_n_dims(tensor), tensor->name, tensor_size, + tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3], ggml_type_name(tensor->type)); +} + +static projector_type clip_projector_type_from_string(const std::string & name) { + for (const auto & kv : PROJECTOR_TYPE_NAMES) { // NOLINT + if (kv.second == name) { + return kv.first; + } + } + return PROJECTOR_TYPE_UNKNOWN; +} + +#ifdef CLIP_DEBUG_FUNCTIONS +static void clip_image_write_image_to_ppm(const clip_image_u8& img, const std::string& filename) { + std::ofstream file(filename, std::ios::binary); + if (!file.is_open()) { + LOG_ERR("Failed to open file for writing: %s\n", filename.c_str()); + return; + } + + // PPM header: P6 format, width, height, and max color value + file << "P6\n" << img.nx << " " << img.ny << "\n255\n"; + + // Write pixel data + for (size_t i = 0; i < img.buf.size(); i += 3) { + // PPM expects binary data in RGB format, which matches our image buffer + file.write(reinterpret_cast(&img.buf[i]), 3); + } + + file.close(); +} + +static void clip_image_save_to_bmp(const clip_image_u8& img, const std::string& filename) { + std::ofstream file(filename, std::ios::binary); + if (!file.is_open()) { + LOG_ERR("Failed to open file for writing: %s\n", filename.c_str()); + return; + } + + int fileSize = 54 + 3 * img.nx * img.ny; // File header + info header + pixel data + int bytesPerPixel = 3; + int widthInBytes = img.nx * bytesPerPixel; + int paddingAmount = (4 - (widthInBytes % 4)) % 4; + int stride = widthInBytes + paddingAmount; + + // Bitmap file header + unsigned char fileHeader[14] = { + 'B','M', // Signature + 0,0,0,0, // Image file size in bytes + 0,0,0,0, // Reserved + 54,0,0,0 // Start of pixel array + }; + + // Total file size + fileSize = 54 + (stride * img.ny); + fileHeader[2] = (unsigned char)(fileSize); + fileHeader[3] = (unsigned char)(fileSize >> 8); + fileHeader[4] = (unsigned char)(fileSize >> 16); + fileHeader[5] = (unsigned char)(fileSize >> 24); + + // Bitmap information header (BITMAPINFOHEADER) + unsigned char infoHeader[40] = { + 40,0,0,0, // Size of this header (40 bytes) + 0,0,0,0, // Image width + 0,0,0,0, // Image height + 1,0, // Number of color planes + 24,0, // Bits per pixel + 0,0,0,0, // No compression + 0,0,0,0, // Image size (can be 0 for no compression) + 0,0,0,0, // X pixels per meter (not specified) + 0,0,0,0, // Y pixels per meter (not specified) + 0,0,0,0, // Total colors (color table not used) + 0,0,0,0 // Important colors (all are important) + }; + + // Width and height in the information header + infoHeader[4] = (unsigned char)(img.nx); + infoHeader[5] = (unsigned char)(img.nx >> 8); + infoHeader[6] = (unsigned char)(img.nx >> 16); + infoHeader[7] = (unsigned char)(img.nx >> 24); + infoHeader[8] = (unsigned char)(img.ny); + infoHeader[9] = (unsigned char)(img.ny >> 8); + infoHeader[10] = (unsigned char)(img.ny >> 16); + infoHeader[11] = (unsigned char)(img.ny >> 24); + + // Write file headers + file.write(reinterpret_cast(fileHeader), sizeof(fileHeader)); + file.write(reinterpret_cast(infoHeader), sizeof(infoHeader)); + + // Pixel data + std::vector padding(3, 0); // Max padding size to be added to each row + for (int y = img.ny - 1; y >= 0; --y) { // BMP files are stored bottom-to-top + for (int x = 0; x < img.nx; ++x) { + // Each pixel + size_t pixelIndex = (y * img.nx + x) * 3; + unsigned char pixel[3] = { + img.buf[pixelIndex + 2], // BMP stores pixels in BGR format + img.buf[pixelIndex + 1], + img.buf[pixelIndex] + }; + file.write(reinterpret_cast(pixel), 3); + } + // Write padding for the row + file.write(reinterpret_cast(padding.data()), paddingAmount); + } + + file.close(); +} + +// debug function to convert f32 to u8 +static void clip_image_convert_f32_to_u8(const clip_image_f32& src, clip_image_u8& dst) { + dst.nx = src.nx; + dst.ny = src.ny; + dst.buf.resize(3 * src.nx * src.ny); + for (size_t i = 0; i < src.buf.size(); ++i) { + dst.buf[i] = static_cast(std::min(std::max(int(src.buf[i] * 255.0f), 0), 255)); + } +} +#endif + + +// +// clip layers +// + +struct clip_hparams { + int32_t image_size; + int32_t patch_size; + int32_t hidden_size; + int32_t n_intermediate; + int32_t projection_dim; + int32_t n_head; + int32_t n_layer; + + float eps; + + char mm_patch_merge_type[32] = "flat"; // spatial_unpad or flat (default) + + int32_t image_grid_pinpoints[32]; + int32_t image_crop_resolution; +}; + +struct clip_layer { + // attention + struct ggml_tensor * k_w; + struct ggml_tensor * k_b; + struct ggml_tensor * q_w; + struct ggml_tensor * q_b; + struct ggml_tensor * v_w; + struct ggml_tensor * v_b; + + struct ggml_tensor * o_w; + struct ggml_tensor * o_b; + + // layernorm 1 + struct ggml_tensor * ln_1_w; + struct ggml_tensor * ln_1_b; + + // ff + struct ggml_tensor * ff_i_w; + struct ggml_tensor * ff_i_b; + + struct ggml_tensor * ff_o_w; + struct ggml_tensor * ff_o_b; + + // layernorm 2 + struct ggml_tensor * ln_2_w; + struct ggml_tensor * ln_2_b; +}; + +struct clip_vision_model { + struct clip_hparams hparams; + + // embeddings + struct ggml_tensor * class_embedding; + struct ggml_tensor * patch_embeddings; + struct ggml_tensor * patch_bias; + struct ggml_tensor * position_embeddings; + + struct ggml_tensor * pre_ln_w; + struct ggml_tensor * pre_ln_b; + + std::vector layers; + + struct ggml_tensor * post_ln_w; + struct ggml_tensor * post_ln_b; + + struct ggml_tensor * projection; + + // LLaVA projection + struct ggml_tensor * mm_0_w = NULL; + struct ggml_tensor * mm_0_b = NULL; + struct ggml_tensor * mm_2_w = NULL; + struct ggml_tensor * mm_2_b = NULL; + + struct ggml_tensor * image_newline = NULL; + + // Yi type models with mlp+normalization projection + struct ggml_tensor * mm_1_w = NULL; // Yi type models have 0, 1, 3, 4 + struct ggml_tensor * mm_1_b = NULL; + struct ggml_tensor * mm_3_w = NULL; + struct ggml_tensor * mm_3_b = NULL; + struct ggml_tensor * mm_4_w = NULL; + struct ggml_tensor * mm_4_b = NULL; + + // MobileVLM projection + struct ggml_tensor * mm_model_mlp_1_w; + struct ggml_tensor * mm_model_mlp_1_b; + struct ggml_tensor * mm_model_mlp_3_w; + struct ggml_tensor * mm_model_mlp_3_b; + struct ggml_tensor * mm_model_block_1_block_0_0_w; + struct ggml_tensor * mm_model_block_1_block_0_1_w; + struct ggml_tensor * mm_model_block_1_block_0_1_b; + struct ggml_tensor * mm_model_block_1_block_1_fc1_w; + struct ggml_tensor * mm_model_block_1_block_1_fc1_b; + struct ggml_tensor * mm_model_block_1_block_1_fc2_w; + struct ggml_tensor * mm_model_block_1_block_1_fc2_b; + struct ggml_tensor * mm_model_block_1_block_2_0_w; + struct ggml_tensor * mm_model_block_1_block_2_1_w; + struct ggml_tensor * mm_model_block_1_block_2_1_b; + struct ggml_tensor * mm_model_block_2_block_0_0_w; + struct ggml_tensor * mm_model_block_2_block_0_1_w; + struct ggml_tensor * mm_model_block_2_block_0_1_b; + struct ggml_tensor * mm_model_block_2_block_1_fc1_w; + struct ggml_tensor * mm_model_block_2_block_1_fc1_b; + struct ggml_tensor * mm_model_block_2_block_1_fc2_w; + struct ggml_tensor * mm_model_block_2_block_1_fc2_b; + struct ggml_tensor * mm_model_block_2_block_2_0_w; + struct ggml_tensor * mm_model_block_2_block_2_1_w; + struct ggml_tensor * mm_model_block_2_block_2_1_b; + + // MobileVLM_V2 projection + struct ggml_tensor * mm_model_mlp_0_w; + struct ggml_tensor * mm_model_mlp_0_b; + struct ggml_tensor * mm_model_mlp_2_w; + struct ggml_tensor * mm_model_mlp_2_b; + struct ggml_tensor * mm_model_peg_0_w; + struct ggml_tensor * mm_model_peg_0_b; + + // omni-vlm projection + struct ggml_tensor * mm_head_attn_in_w; // shape is [hidden_dim, 3*hidden_dim] + struct ggml_tensor * mm_head_attn_in_b; + struct ggml_tensor * mm_head_attn_out_w; + struct ggml_tensor * mm_head_attn_out_b; + struct ggml_tensor * mm_head_ln_w; + struct ggml_tensor * mm_head_ln_b; + struct ggml_tensor * mm_head_ffd_w; + struct ggml_tensor * mm_head_ffd_b; + struct ggml_tensor * mm_head_ffu_w; + struct ggml_tensor * mm_head_ffu_b; + struct ggml_tensor * mm_head_prob; + + // MINICPMV projection + struct ggml_tensor * mm_model_pos_embed_k; + struct ggml_tensor * mm_model_query; + struct ggml_tensor * mm_model_proj; + struct ggml_tensor * mm_model_kv_proj; + struct ggml_tensor * mm_model_attn_q_w; + struct ggml_tensor * mm_model_attn_q_b; + struct ggml_tensor * mm_model_attn_k_w; + struct ggml_tensor * mm_model_attn_k_b; + struct ggml_tensor * mm_model_attn_v_w; + struct ggml_tensor * mm_model_attn_v_b; + struct ggml_tensor * mm_model_attn_o_w; + struct ggml_tensor * mm_model_attn_o_b; + struct ggml_tensor * mm_model_ln_q_w; + struct ggml_tensor * mm_model_ln_q_b; + struct ggml_tensor * mm_model_ln_kv_w; + struct ggml_tensor * mm_model_ln_kv_b; + struct ggml_tensor * mm_model_ln_post_w; + struct ggml_tensor * mm_model_ln_post_b; +}; + +struct clip_ctx { + bool has_text_encoder = false; + bool has_vision_encoder = false; + bool has_llava_projector = false; + bool has_minicpmv_projector = false; + int minicpmv_version = 2; + + struct clip_vision_model vision_model; + projector_type proj_type = PROJECTOR_TYPE_MLP; + + float image_mean[3]; + float image_std[3]; + bool use_gelu = false; + int32_t ftype = 1; + + bool has_class_embedding = true; + bool has_pre_norm = true; + bool has_post_norm = false; + bool has_patch_bias = false; + + struct gguf_context * ctx_gguf; + struct ggml_context * ctx_data; + + std::vector buf_compute_meta; + + // memory buffers to evaluate the model + ggml_backend_buffer_t params_buffer = NULL; + + ggml_backend_t backend = NULL; + ggml_gallocr_t compute_alloc = NULL; + + struct clip_image_size * load_image_size; +}; + +static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32_batch * imgs, struct clip_image_size * load_image_size, bool is_inf = false) { + if (!ctx->has_vision_encoder) { + LOG_ERR("This gguf file seems to have no vision encoder\n"); + return nullptr; + } + + const auto & model = ctx->vision_model; + const auto & hparams = model.hparams; + + const int image_size = hparams.image_size; + int image_size_width = image_size; + int image_size_height = image_size; + if (ctx->has_minicpmv_projector) { + if (load_image_size == nullptr) { + load_image_size = clip_image_size_init(); + } + LOG_DBG("%s: %d %d\n", __func__, load_image_size->width, load_image_size->height); + image_size_width = load_image_size->width; + image_size_height = load_image_size->height; + if (is_inf) { + image_size_width = imgs->data->nx; + image_size_height = imgs->data->ny; + } + } + const int patch_size = hparams.patch_size; + const int num_patches = ((image_size_width / patch_size) * (image_size_height / patch_size)); + const int num_positions = num_patches + (ctx->has_class_embedding ? 1 : 0); + const int hidden_size = hparams.hidden_size; + const int n_head = hparams.n_head; + const int d_head = hidden_size / n_head; + int n_layer = hparams.n_layer; + const float eps = hparams.eps; + + const int batch_size = imgs->size; + + if (ctx->has_llava_projector || ctx->has_minicpmv_projector) { + GGML_ASSERT(batch_size == 1); + } + + struct ggml_init_params params = { + /*.mem_size =*/ ctx->buf_compute_meta.size(), + /*.mem_buffer =*/ ctx->buf_compute_meta.data(), + /*.no_alloc =*/ true, + }; + + struct ggml_context * ctx0 = ggml_init(params); + struct ggml_cgraph * gf = ggml_new_graph(ctx0); + + struct ggml_tensor * inp_raw = ggml_new_tensor_4d(ctx0, GGML_TYPE_F32, image_size_width, image_size_height, 3, batch_size); + ggml_set_name(inp_raw, "inp_raw"); + ggml_set_input(inp_raw); + + struct ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings, inp_raw, patch_size, patch_size, 0, 0, 1, 1); + + inp = ggml_reshape_3d(ctx0, inp, num_patches, hidden_size, batch_size); + inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 1, 0, 2, 3)); + + if (ctx->has_patch_bias) { + // inp = ggml_add(ctx0, inp, ggml_repeat(ctx0, model.patch_bias, inp)); + inp = ggml_add(ctx0, inp, model.patch_bias); + } + struct ggml_tensor * embeddings = inp; + struct ggml_tensor * pos_embed = nullptr; + + if (ctx->has_llava_projector) { + // concat class_embeddings and patch_embeddings + if (ctx->has_class_embedding) { + embeddings = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, hidden_size, num_positions, batch_size); + ggml_set_name(embeddings, "embeddings"); + ggml_set_input(embeddings); + embeddings = ggml_acc(ctx0, embeddings, model.class_embedding, + embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], 0); + embeddings = ggml_acc(ctx0, embeddings, inp, + embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], model.class_embedding->nb[1]); + } + } + + struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_positions); + ggml_set_name(positions, "positions"); + ggml_set_input(positions); + + embeddings = + ggml_add(ctx0, embeddings, ggml_get_rows(ctx0, model.position_embeddings, positions)); + + if (ctx->has_minicpmv_projector) { + int pos_w = image_size_width/patch_size; + int pos_h = image_size_height/patch_size; + if (ctx->minicpmv_version == 2) { + pos_embed = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, 4096, pos_w * pos_h, 1); + } + else if (ctx->minicpmv_version == 3) { + pos_embed = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, 3584, pos_w * pos_h, 1); + } + ggml_set_name(pos_embed, "pos_embed"); + ggml_set_input(pos_embed); + } + + // pre-layernorm + if (ctx->has_pre_norm) { + embeddings = ggml_norm(ctx0, embeddings, eps); + ggml_set_name(embeddings, "pre_ln"); + + embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.pre_ln_w), model.pre_ln_b); + } + + // loop over layers + // if (ctx->has_minicpmv_projector) { + // n_layer += 1; + // } + for (int il = 0; il < n_layer; il++) { + struct ggml_tensor * cur = embeddings; // embeddings = residual, cur = hidden_states + + //const size_t nb_q_w = model.layers[il].q_w->nb[0]; + + // layernorm1 + { + cur = ggml_norm(ctx0, cur, eps); + + cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_1_w), + model.layers[il].ln_1_b); + } + + { + + struct ggml_tensor * Q = + ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].q_w, cur), model.layers[il].q_b); + + Q = ggml_scale_inplace(ctx0, Q, 1.0f / sqrt((float)d_head)); + Q = ggml_reshape_4d(ctx0, Q, d_head, n_head, num_positions, batch_size); + Q = ggml_cont(ctx0, ggml_permute(ctx0, Q, 0, 2, 1, 3)); + Q = ggml_reshape_3d(ctx0, Q, d_head, num_positions, n_head * batch_size); + + struct ggml_tensor * K = + ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].k_w, cur), model.layers[il].k_b); + + K = ggml_reshape_4d(ctx0, K, d_head, n_head, num_positions, batch_size); + K = ggml_cont(ctx0, ggml_permute(ctx0, K, 0, 2, 1, 3)); + K = ggml_reshape_3d(ctx0, K, d_head, num_positions, n_head * batch_size); + + struct ggml_tensor * V = + ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].v_w, cur), model.layers[il].v_b); + + V = ggml_reshape_4d(ctx0, V, d_head, n_head, num_positions, batch_size); + V = ggml_cont(ctx0, ggml_permute(ctx0, V, 1, 2, 0, 3)); + V = ggml_reshape_3d(ctx0, V, num_positions, d_head, n_head * batch_size); + + struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); + // KQ = ggml_scale_inplace(ctx0, KQ, 1.0f / sqrt((float)d_head)); + KQ = ggml_soft_max_inplace(ctx0, KQ); + + struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ); + KQV = ggml_reshape_4d(ctx0, KQV, d_head, num_positions, n_head, batch_size); + KQV = ggml_permute(ctx0, KQV, 0, 2, 1, 3); + + cur = ggml_cont_3d(ctx0, KQV, hidden_size, num_positions, batch_size); + } + + // attention output + cur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].o_w, cur), model.layers[il].o_b); + + // re-add the layer input, e.g., residual + cur = ggml_add(ctx0, cur, embeddings); + + embeddings = cur; // embeddings = residual, cur = hidden_states + + // layernorm2 + { + cur = ggml_norm(ctx0, cur, eps); + + cur = ggml_add(ctx0, ggml_mul(ctx0, cur, model.layers[il].ln_2_w), model.layers[il].ln_2_b); + } + cur = ggml_mul_mat(ctx0, model.layers[il].ff_i_w, cur); + cur = ggml_add(ctx0, cur, model.layers[il].ff_i_b); + + if (ctx->use_gelu) { + cur = ggml_gelu_inplace(ctx0, cur); + } else { + cur = ggml_gelu_quick_inplace(ctx0, cur); + } + + cur = ggml_mul_mat(ctx0, model.layers[il].ff_o_w, cur); + cur = ggml_add(ctx0, cur, model.layers[il].ff_o_b); + + // residual 2 + cur = ggml_add(ctx0, embeddings, cur); + + embeddings = cur; + } + + // post-layernorm + if (ctx->has_post_norm) { + embeddings = ggml_norm(ctx0, embeddings, eps); + ggml_set_name(embeddings, "post_ln"); + embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.post_ln_w), model.post_ln_b); + } + + embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings); + embeddings = ggml_add(ctx0, embeddings, model.mm_0_b); + + embeddings = ggml_gelu_inplace(ctx0, embeddings); + + embeddings = ggml_mul_mat(ctx0, model.mm_2_w, embeddings); + embeddings = ggml_add(ctx0, embeddings, model.mm_2_b); + + ggml_build_forward_expand(gf, embeddings); + ggml_free(ctx0); + + return gf; +} + +// read and create ggml_context containing the tensors and their data +struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { + struct ggml_context * meta = NULL; + + struct gguf_init_params params = { + /*.no_alloc = */ true, + /*.ctx = */ &meta, + }; + + struct gguf_context * ctx = gguf_init_from_file(fname, params); + if (!ctx) { + throw std::runtime_error(format("%s: failed to load CLIP model from %s. Does this file exist?\n", __func__, fname)); + } + + if (verbosity >= 1) { + const int n_tensors = gguf_get_n_tensors(ctx); + const int n_kv = gguf_get_n_kv(ctx); + const int ftype = get_u32(ctx, KEY_FTYPE); + const std::string ftype_str = get_ftype(ftype); + const int idx_desc = get_key_idx(ctx, KEY_DESCRIPTION); + const std::string description = gguf_get_val_str(ctx, idx_desc); + const int idx_name = gguf_find_key(ctx, KEY_NAME); + if (idx_name != -1) { // make name optional temporarily as some of the uploaded models missing it due to a bug + const std::string name = gguf_get_val_str(ctx, idx_name); + LOG_INF("%s: model name: %s\n", __func__, name.c_str()); + } + LOG_INF("%s: description: %s\n", __func__, description.c_str()); + LOG_INF("%s: GGUF version: %d\n", __func__, gguf_get_version(ctx)); + LOG_INF("%s: alignment: %zu\n", __func__, gguf_get_alignment(ctx)); + LOG_INF("%s: n_tensors: %d\n", __func__, n_tensors); + LOG_INF("%s: n_kv: %d\n", __func__, n_kv); + LOG_INF("%s: ftype: %s\n", __func__, ftype_str.c_str()); + LOG_INF("\n"); + } + const int n_tensors = gguf_get_n_tensors(ctx); + + // kv + const int n_kv = gguf_get_n_kv(ctx); + LOG_INF("%s: loaded meta data with %d key-value pairs and %d tensors from %s\n", + __func__, n_kv, n_tensors, fname); + { + std::map n_type; + + for (int i = 0; i < n_tensors; i++) { + enum ggml_type type = gguf_get_tensor_type(ctx, i); + + n_type[type]++; + } + + LOG_INF("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__); + for (int i = 0; i < n_kv; i++) { + const char * name = gguf_get_key(ctx, i); + const enum gguf_type type = gguf_get_kv_type(ctx, i); + const std::string type_name = + type == GGUF_TYPE_ARRAY + ? format("%s[%s,%d]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(ctx, i)), gguf_get_arr_n(ctx, i)) + : gguf_type_name(type); + + std::string value = gguf_kv_to_str(ctx, i); + const size_t MAX_VALUE_LEN = 40; + if (value.size() > MAX_VALUE_LEN) { + value = format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str()); + } + replace_all(value, "\n", "\\n"); + + LOG_INF("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), value.c_str()); + } + + // print type counts + for (auto & kv : n_type) { + if (kv.second == 0) { + continue; + } + + LOG_INF("%s: - type %4s: %4d tensors\n", __func__, ggml_type_name(kv.first), kv.second); + } + } + + // data + size_t model_size = 0; + { + for (int i = 0; i < n_tensors; ++i) { + const char * name = gguf_get_tensor_name(ctx, i); + const size_t offset = gguf_get_tensor_offset(ctx, i); + enum ggml_type type = gguf_get_tensor_type(ctx, i); + struct ggml_tensor * cur = ggml_get_tensor(meta, name); + size_t tensor_size = ggml_nbytes(cur); + model_size += tensor_size; + if (verbosity >= 3) { + LOG_INF("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu, shape:[%" PRIu64 ", %" PRIu64 ", %" PRIu64 ", %" PRIu64 "], type = %s\n", + __func__, i, ggml_n_dims(cur), cur->name, tensor_size, offset, cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], ggml_type_name(type)); + } + } + } + + clip_ctx * new_clip = new clip_ctx{}; + + // update projector type + { + int idx = gguf_find_key(ctx, KEY_PROJ_TYPE); + if (idx != -1) { + const std::string proj_type = gguf_get_val_str(ctx, idx); + new_clip->proj_type = clip_projector_type_from_string(proj_type); + } else { + new_clip->proj_type = PROJECTOR_TYPE_MLP; + } + + if (new_clip->proj_type == PROJECTOR_TYPE_MLP) { + if (gguf_find_tensor(ctx, format(TN_LLAVA_PROJ, 3, "weight").c_str()) != -1) { + new_clip->proj_type = PROJECTOR_TYPE_MLP_NORM; + } + } + } + +#ifdef GGML_USE_CUDA + new_clip->backend = ggml_backend_cuda_init(0); + LOG_INF("%s: CLIP using CUDA backend\n", __func__); +#endif + +#ifdef GGML_USE_METAL + new_clip->backend = ggml_backend_metal_init(); + LOG_INF("%s: CLIP using Metal backend\n", __func__); +#endif + +#ifdef GGML_USE_CANN + new_clip->backend = ggml_backend_cann_init(0); + LOG_INF("%s: CLIP using CANN backend\n", __func__); +#endif + +#ifdef GGML_USE_VULKAN + new_clip->backend = ggml_backend_vk_init(0); + LOG_INF("%s: CLIP using Vulkan backend\n", __func__); +#endif + + if (!new_clip->backend) { + new_clip->backend = ggml_backend_cpu_init(); + LOG_INF("%s: CLIP using CPU backend\n", __func__); + } + + // model size and capabilities + { + int idx; + // int idx = get_key_idx(ctx, KEY_HAS_TEXT_ENC); + // new_clip->has_text_encoder = gguf_get_val_bool(ctx, idx); + new_clip->has_text_encoder = false; + + // idx = get_key_idx(ctx, KEY_HAS_VIS_ENC); + // new_clip->has_vision_encoder = gguf_get_val_bool(ctx, idx); + new_clip->has_vision_encoder = true; + + // idx = gguf_find_key(ctx, KEY_HAS_LLAVA_PROJ); + // if (idx != -1) { + // new_clip->has_llava_projector = gguf_get_val_bool(ctx, idx); + // } + new_clip->has_llava_projector = true; + + // idx = gguf_find_key(ctx, KEY_HAS_MINICPMV_PROJ); + // if (idx != -1) { + // new_clip->has_minicpmv_projector = gguf_get_val_bool(ctx, idx); + // } + new_clip->has_minicpmv_projector = false; + + // idx = gguf_find_key(ctx, KEY_MINICPMV_VERSION); + // if (idx != -1) { + // new_clip->minicpmv_version = gguf_get_val_i32(ctx, idx); + // } + + // GGML_ASSERT(new_clip->has_llava_projector); // see monatis/clip.cpp for image and/or text encoding for semantic search + + // TODO: adjust for omni + new_clip->has_class_embedding = false; + + GGML_ASSERT(new_clip->has_vision_encoder); + GGML_ASSERT(!new_clip->has_text_encoder); + + // idx = get_key_idx(ctx, KEY_USE_GELU); + // new_clip->use_gelu = gguf_get_val_bool(ctx, idx); + new_clip->use_gelu = true; // TODO: to be confirmed + + if (verbosity >= 1) { + LOG_INF("%s: text_encoder: %d\n", __func__, new_clip->has_text_encoder); + LOG_INF("%s: vision_encoder: %d\n", __func__, new_clip->has_vision_encoder); + LOG_INF("%s: llava_projector: %d\n", __func__, new_clip->has_llava_projector); + LOG_INF("%s: minicpmv_projector: %d\n", __func__, new_clip->has_minicpmv_projector); + LOG_INF("%s: model size: %.2f MB\n", __func__, model_size / 1024.0 / 1024.0); + LOG_INF("%s: metadata size: %.2f MB\n", __func__, ggml_get_mem_size(meta) / 1024.0 / 1024.0); + } + } + + LOG_INF("%s: params backend buffer size = % 6.2f MB (%i tensors)\n", __func__, model_size / (1024.0 * 1024.0), n_tensors); + + // load tensors + { + std::vector read_buf; + struct ggml_init_params params = { + /*.mem_size =*/ (n_tensors + 1) * ggml_tensor_overhead(), + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, + }; + + new_clip->ctx_data = ggml_init(params); + if (!new_clip->ctx_data) { + LOG_ERR("%s: ggml_init() failed\n", __func__); + clip_free(new_clip); + gguf_free(ctx); + return nullptr; + } + + auto fin = std::ifstream(fname, std::ios::binary); + if (!fin) { + LOG_ERR("cannot open model file for loading tensors\n"); + clip_free(new_clip); + gguf_free(ctx); + return nullptr; + } + + // add tensors to context + for (int i = 0; i < n_tensors; ++i) { + const char * name = gguf_get_tensor_name(ctx, i); + struct ggml_tensor * t = ggml_get_tensor(meta, name); + struct ggml_tensor * cur = ggml_dup_tensor(new_clip->ctx_data, t); + ggml_set_name(cur, name); + } + + // alloc memory and offload data + new_clip->params_buffer = ggml_backend_alloc_ctx_tensors(new_clip->ctx_data, new_clip->backend); + for (int i = 0; i < n_tensors; ++i) { + const char * name = gguf_get_tensor_name(ctx, i); + struct ggml_tensor * cur = ggml_get_tensor(new_clip->ctx_data, name); + const size_t offset = gguf_get_data_offset(ctx) + gguf_get_tensor_offset(ctx, i); + fin.seekg(offset, std::ios::beg); + if (!fin) { + LOG_ERR("%s: failed to seek for tensor %s\n", __func__, name); + clip_free(new_clip); + gguf_free(ctx); + return nullptr; + } + int num_bytes = ggml_nbytes(cur); + if (ggml_backend_buffer_is_host(new_clip->params_buffer)) { + // for the CPU and Metal backend, we can read directly into the tensor + fin.read(reinterpret_cast(cur->data), num_bytes); + } else { + // read into a temporary buffer first, then copy to device memory + read_buf.resize(num_bytes); + fin.read(reinterpret_cast(read_buf.data()), num_bytes); + ggml_backend_tensor_set(cur, read_buf.data(), 0, num_bytes); + } + } + fin.close(); + } + + // vision model + if (new_clip->has_vision_encoder) { + // load vision model + auto & vision_model = new_clip->vision_model; + auto & hparams = vision_model.hparams; + hparams.hidden_size = get_u32(ctx, format(KEY_N_EMBD, "vision")); + hparams.n_head = get_u32(ctx, format(KEY_N_HEAD, "vision")); + hparams.n_intermediate = get_u32(ctx, format(KEY_N_FF, "vision")); + hparams.n_layer = get_u32(ctx, format(KEY_N_BLOCK, "vision")); + hparams.image_size = get_u32(ctx, KEY_IMAGE_SIZE); + hparams.patch_size = get_u32(ctx, KEY_PATCH_SIZE); + hparams.projection_dim = get_u32(ctx, format(KEY_PROJ_DIM, "vision")); + // hparams.eps = get_f32(ctx, format(KEY_LAYER_NORM_EPS, "vision")); + hparams.eps = 1e-06f; + + /* + try { + int idx = get_key_idx(ctx, KEY_IMAGE_GRID_PINPOINTS); + int n = gguf_get_arr_n(ctx, idx); + const int32_t * pinpoints = (const int32_t *)gguf_get_arr_data(ctx, idx); + for (int i = 0; i < 32 && i < n && pinpoints[i] != 0; ++i) { + hparams.image_grid_pinpoints[i] = pinpoints[i]; + } + if (n < 32) + hparams.image_grid_pinpoints[n] = 0; + } catch (std::runtime_error & ) { + hparams.image_grid_pinpoints[0]=0; + } + + try { + int idx = get_key_idx(ctx, KEY_MM_PATCH_MERGE_TYPE); + strcpy(hparams.mm_patch_merge_type, gguf_get_val_str(ctx, idx)); + } catch (std::runtime_error & ) { + strcpy(hparams.mm_patch_merge_type, "flat"); + } + + try { + hparams.image_crop_resolution = get_u32(ctx, KEY_IMAGE_CROP_RESOLUTION); // llava-1.6 + } catch(const std::exception& ) { + hparams.image_crop_resolution = hparams.image_size; + } + */ + + int idx_mean = get_key_idx(ctx, KEY_IMAGE_MEAN); + int idx_std = get_key_idx(ctx, KEY_IMAGE_STD); + + const float * mean_data = (const float *)gguf_get_arr_data(ctx, idx_mean); + const float * std_data = (const float *)gguf_get_arr_data(ctx, idx_std); + + for (int i = 0; i < 3; ++i) { + new_clip->image_mean[i] = mean_data[i]; + new_clip->image_std[i] = std_data[i]; + } + + if (verbosity >= 2) { + LOG_INF("\n%s: vision model hparams\n", __func__); + LOG_INF("image_size %d\n", hparams.image_size); + LOG_INF("patch_size %d\n", hparams.patch_size); + LOG_INF("v_hidden_size %d\n", hparams.hidden_size); + LOG_INF("v_n_intermediate %d\n", hparams.n_intermediate); + LOG_INF("v_projection_dim %d\n", hparams.projection_dim); + LOG_INF("v_n_head %d\n", hparams.n_head); + LOG_INF("v_n_layer %d\n", hparams.n_layer); + LOG_INF("v_eps %f\n", hparams.eps); + LOG_INF("v_image_mean %f %f %f\n", new_clip->image_mean[0], new_clip->image_mean[1], new_clip->image_mean[2]); + LOG_INF("v_image_std %f %f %f\n", new_clip->image_std[0], new_clip->image_std[1], new_clip->image_std[2]); + LOG_INF("v_image_grid_pinpoints: "); + for (int i = 0; i < 32 && (hparams.image_grid_pinpoints[i] != 0); ++i) { + LOG_INF("%d ", hparams.image_grid_pinpoints[i]); + } + LOG_INF("\n"); + LOG_INF("v_mm_patch_merge_type: %s\n", hparams.mm_patch_merge_type); + + } + + try { + vision_model.class_embedding = get_tensor(new_clip->ctx_data, TN_CLASS_EMBD); + new_clip->has_class_embedding = true; + } catch (const std::exception& /*e*/) { + new_clip->has_class_embedding = false; + } + + try { + vision_model.pre_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "weight")); + vision_model.pre_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "bias")); + new_clip->has_pre_norm = true; + } catch (std::exception & /*e*/) { + new_clip->has_pre_norm = false; + } + + try { + vision_model.post_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "weight")); + vision_model.post_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "bias")); + new_clip->has_post_norm = true; + } catch (std::exception & /*e*/) { + new_clip->has_post_norm = false; + } + + try { + vision_model.patch_bias = get_tensor(new_clip->ctx_data, TN_PATCH_BIAS); + new_clip->has_patch_bias = true; + } catch (std::exception & /*e*/) { + new_clip->has_patch_bias = false; + } + + try { + vision_model.patch_embeddings = get_tensor(new_clip->ctx_data, TN_PATCH_EMBD); + vision_model.position_embeddings = get_tensor(new_clip->ctx_data, format(TN_POS_EMBD, "v")); + } catch(const std::exception& /*e*/) { + LOG_ERR("%s: failed to load vision model tensors\n", __func__); + } + + // LLaVA projection + if (new_clip->proj_type == PROJECTOR_TYPE_MLP || new_clip->proj_type == PROJECTOR_TYPE_MLP_NORM) { + vision_model.mm_0_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 0, "weight")); + vision_model.mm_0_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 0, "bias")); + try { + // Yi-type llava + vision_model.mm_1_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 1, "weight")); + vision_model.mm_1_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 1, "bias")); + } catch (std::runtime_error & /*e*/) { } + try { + // missing in Yi-type llava + vision_model.mm_2_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 2, "weight")); + vision_model.mm_2_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 2, "bias")); + } catch (std::runtime_error & /*e*/) { } + try { + // Yi-type llava + vision_model.mm_3_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 3, "weight")); + vision_model.mm_3_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 3, "bias")); + } catch (std::runtime_error & /*e*/) { } + try { + // Yi-type llava + vision_model.mm_4_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 4, "weight")); + vision_model.mm_4_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 4, "bias")); + } catch (std::runtime_error & /*e*/) { } + try { + vision_model.image_newline = get_tensor(new_clip->ctx_data, TN_IMAGE_NEWLINE); + // LOG_INF("%s: image_newline tensor (llava-1.6) found\n", __func__); + } catch (std::runtime_error & /*e*/) { } + // TODO: omni-vlm patch code here: + vision_model.mm_head_attn_in_w = get_tensor(new_clip->ctx_data, "v.head.attention.in_weight"); + vision_model.mm_head_attn_in_b = get_tensor(new_clip->ctx_data, "v.head.attention.in_bias"); + vision_model.mm_head_attn_out_w = get_tensor(new_clip->ctx_data, "v.head.attention.out.weight"); + vision_model.mm_head_attn_out_b = get_tensor(new_clip->ctx_data, "v.head.attention.out.bias"); + vision_model.mm_head_ln_w = get_tensor(new_clip->ctx_data, "v.head.ln.weight"); + vision_model.mm_head_ln_b = get_tensor(new_clip->ctx_data, "v.head.ln.bias"); + vision_model.mm_head_ffd_w = get_tensor(new_clip->ctx_data, "v.head.ffn_down.weight"); + vision_model.mm_head_ffd_b = get_tensor(new_clip->ctx_data, "v.head.ffn_down.bias"); + vision_model.mm_head_ffu_w = get_tensor(new_clip->ctx_data, "v.head.ffn_up.weight"); + vision_model.mm_head_ffu_b = get_tensor(new_clip->ctx_data, "v.head.ffn_up.bias"); + vision_model.mm_head_prob = get_tensor(new_clip->ctx_data, "v.head.probe"); + + } else if (new_clip->proj_type == PROJECTOR_TYPE_LDP) { + // MobileVLM projection + vision_model.mm_model_mlp_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 1, "weight")); + vision_model.mm_model_mlp_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 1, "bias")); + vision_model.mm_model_mlp_3_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 3, "weight")); + vision_model.mm_model_mlp_3_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 3, "bias")); + vision_model.mm_model_block_1_block_0_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 0, "0.weight")); + vision_model.mm_model_block_1_block_0_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.weight")); + vision_model.mm_model_block_1_block_0_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 0, "1.bias")); + vision_model.mm_model_block_1_block_1_fc1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.weight")); + vision_model.mm_model_block_1_block_1_fc1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc1.bias")); + vision_model.mm_model_block_1_block_1_fc2_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.weight")); + vision_model.mm_model_block_1_block_1_fc2_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 1, "fc2.bias")); + vision_model.mm_model_block_1_block_2_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 2, "0.weight")); + vision_model.mm_model_block_1_block_2_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.weight")); + vision_model.mm_model_block_1_block_2_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 1, 2, "1.bias")); + vision_model.mm_model_block_2_block_0_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 0, "0.weight")); + vision_model.mm_model_block_2_block_0_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.weight")); + vision_model.mm_model_block_2_block_0_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 0, "1.bias")); + vision_model.mm_model_block_2_block_1_fc1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.weight")); + vision_model.mm_model_block_2_block_1_fc1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc1.bias")); + vision_model.mm_model_block_2_block_1_fc2_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.weight")); + vision_model.mm_model_block_2_block_1_fc2_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 1, "fc2.bias")); + vision_model.mm_model_block_2_block_2_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "0.weight")); + vision_model.mm_model_block_2_block_2_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.weight")); + vision_model.mm_model_block_2_block_2_1_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_BLOCK, 2, 2, "1.bias")); + } + else if (new_clip->proj_type == PROJECTOR_TYPE_LDPV2) + { + // MobilVLM_V2 projection + vision_model.mm_model_mlp_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 0, "weight")); + vision_model.mm_model_mlp_0_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 0, "bias")); + vision_model.mm_model_mlp_2_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 2, "weight")); + vision_model.mm_model_mlp_2_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 2, "bias")); + vision_model.mm_model_peg_0_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_PEG, 0, "weight")); + vision_model.mm_model_peg_0_b = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_PEG, 0, "bias")); + } + else if (new_clip->proj_type == PROJECTOR_TYPE_RESAMPLER) { + // vision_model.mm_model_pos_embed = get_tensor(new_clip->ctx_data, TN_MINICPMV_POS_EMBD); + vision_model.mm_model_pos_embed_k = get_tensor(new_clip->ctx_data, TN_MINICPMV_POS_EMBD_K); + vision_model.mm_model_query = get_tensor(new_clip->ctx_data, TN_MINICPMV_QUERY); + vision_model.mm_model_proj = get_tensor(new_clip->ctx_data, TN_MINICPMV_PROJ); + vision_model.mm_model_kv_proj = get_tensor(new_clip->ctx_data, TN_MINICPMV_KV_PROJ); + vision_model.mm_model_attn_q_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "q", "weight")); + vision_model.mm_model_attn_k_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "k", "weight")); + vision_model.mm_model_attn_v_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "v", "weight")); + vision_model.mm_model_attn_q_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "q", "bias")); + vision_model.mm_model_attn_k_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "k", "bias")); + vision_model.mm_model_attn_v_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "v", "bias")); + vision_model.mm_model_attn_o_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "out", "weight")); + vision_model.mm_model_attn_o_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_ATTN, "out", "bias")); + vision_model.mm_model_ln_q_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "q", "weight")); + vision_model.mm_model_ln_q_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "q", "bias")); + vision_model.mm_model_ln_kv_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "kv", "weight")); + vision_model.mm_model_ln_kv_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "kv", "bias")); + vision_model.mm_model_ln_post_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "post", "weight")); + vision_model.mm_model_ln_post_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "post", "bias")); + } + else { + std::string proj_type = PROJECTOR_TYPE_NAMES[new_clip->proj_type]; + throw std::runtime_error(format("%s: don't support projector with: %s currently\n", __func__, proj_type.c_str())); + } + + vision_model.layers.resize(hparams.n_layer); + + for (int il = 0; il < hparams.n_layer; ++il) { + auto & layer = vision_model.layers[il]; + layer.k_w = get_tensor(new_clip->ctx_data, format(TN_ATTN_K, "v", il, "weight")); + layer.q_w = get_tensor(new_clip->ctx_data, format(TN_ATTN_Q, "v", il, "weight")); + layer.v_w = get_tensor(new_clip->ctx_data, format(TN_ATTN_V, "v", il, "weight")); + layer.o_w = get_tensor(new_clip->ctx_data, format(TN_ATTN_OUTPUT, "v", il, "weight")); + layer.ln_1_w = get_tensor(new_clip->ctx_data, format(TN_LN_1, "v", il, "weight")); + layer.ln_2_w = get_tensor(new_clip->ctx_data, format(TN_LN_2, "v", il, "weight")); + layer.ff_i_w = get_tensor(new_clip->ctx_data, format(TN_FFN_DOWN, "v", il, "weight")); + layer.ff_o_w = get_tensor(new_clip->ctx_data, format(TN_FFN_UP, "v", il, "weight")); + layer.k_b = get_tensor(new_clip->ctx_data, format(TN_ATTN_K, "v", il, "bias")); + layer.q_b = get_tensor(new_clip->ctx_data, format(TN_ATTN_Q, "v", il, "bias")); + layer.v_b = get_tensor(new_clip->ctx_data, format(TN_ATTN_V, "v", il, "bias")); + layer.o_b = get_tensor(new_clip->ctx_data, format(TN_ATTN_OUTPUT, "v", il, "bias")); + layer.ln_1_b = get_tensor(new_clip->ctx_data, format(TN_LN_1, "v", il, "bias")); + layer.ln_2_b = get_tensor(new_clip->ctx_data, format(TN_LN_2, "v", il, "bias")); + layer.ff_i_b = get_tensor(new_clip->ctx_data, format(TN_FFN_DOWN, "v", il, "bias")); + layer.ff_o_b = get_tensor(new_clip->ctx_data, format(TN_FFN_UP, "v", il, "bias")); + } + } + + ggml_free(meta); + + new_clip->ctx_gguf = ctx; + + // measure mem requirement and allocate + { + new_clip->buf_compute_meta.resize(GGML_DEFAULT_GRAPH_SIZE * ggml_tensor_overhead() + ggml_graph_overhead()); + new_clip->compute_alloc = ggml_gallocr_new(ggml_backend_get_default_buffer_type(new_clip->backend)); + clip_image_f32_batch batch; + batch.size = 1; + ggml_cgraph * gf = clip_image_build_graph(new_clip, &batch, nullptr, false); + ggml_gallocr_reserve(new_clip->compute_alloc, gf); + size_t compute_memory_buffer_size = ggml_gallocr_get_buffer_size(new_clip->compute_alloc, 0); + LOG_INF("%s: compute allocated memory: %.2f MB\n", __func__, compute_memory_buffer_size /1024.0/1024.0); + } + + return new_clip; +} + +void clip_add_load_image_size(struct clip_ctx * ctx_clip, struct clip_image_size * load_image_size) { + ctx_clip->load_image_size = load_image_size; +} + +struct clip_image_size * clip_image_size_init() { + struct clip_image_size * load_image_size = new struct clip_image_size(); + load_image_size->width = 448; + load_image_size->height = 448; + return load_image_size; +} + +struct clip_image_u8 * clip_image_u8_init() { + return new clip_image_u8(); +} + +struct clip_image_f32 * clip_image_f32_init() { + return new clip_image_f32(); +} + +void clip_image_u8_free(struct clip_image_u8 * img) { delete img; } +void clip_image_f32_free(struct clip_image_f32 * img) { delete img; } +void clip_image_u8_batch_free(struct clip_image_u8_batch * batch) { + if (batch->size > 0) { + delete[] batch->data; + batch->size = 0; + } +} +void clip_image_f32_batch_free(struct clip_image_f32_batch * batch) { + if (batch->size > 0) { + delete[] batch->data; + batch->size = 0; + } +} + +static void build_clip_img_from_data(const stbi_uc * data, int nx, int ny, clip_image_u8 * img) { + img->nx = nx; + img->ny = ny; + img->buf.resize(3 * nx * ny); + memcpy(img->buf.data(), data, img->buf.size()); +} + +bool clip_image_load_from_file(const char * fname, clip_image_u8 * img) { + int nx, ny, nc; + auto * data = stbi_load(fname, &nx, &ny, &nc, 3); + if (!data) { + LOG_ERR("%s: failed to load image '%s'\n", __func__, fname); + return false; + } + build_clip_img_from_data(data, nx, ny, img); + stbi_image_free(data); + return true; +} + +bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img) { + int nx, ny, nc; + auto * data = stbi_load_from_memory(bytes, bytes_length, &nx, &ny, &nc, 3); + if (!data) { + LOG_ERR("%s: failed to decode image bytes\n", __func__); + return false; + } + build_clip_img_from_data(data, nx, ny, img); + stbi_image_free(data); + return true; +} + +// Linear interpolation between two points +inline float clip_lerp(float s, float e, float t) { + return s + (e - s) * t; +} +// Bilinear resize function +static void bilinear_resize(const clip_image_u8& src, clip_image_u8& dst, int target_width, int target_height) { + dst.nx = target_width; + dst.ny = target_height; + dst.buf.resize(3 * target_width * target_height); + + float x_ratio = static_cast(src.nx - 1) / target_width; + float y_ratio = static_cast(src.ny - 1) / target_height; + + for (int y = 0; y < target_height; y++) { + for (int x = 0; x < target_width; x++) { + float px = x_ratio * x; + float py = y_ratio * y; + int x_floor = static_cast(px); + int y_floor = static_cast(py); + float x_lerp = px - x_floor; + float y_lerp = py - y_floor; + + for (int c = 0; c < 3; c++) { + float top = clip_lerp( + static_cast(src.buf[3 * (y_floor * src.nx + x_floor) + c]), + static_cast(src.buf[3 * (y_floor * src.nx + (x_floor + 1)) + c]), + x_lerp + ); + float bottom = clip_lerp( + static_cast(src.buf[3 * ((y_floor + 1) * src.nx + x_floor) + c]), + static_cast(src.buf[3 * ((y_floor + 1) * src.nx + (x_floor + 1)) + c]), + x_lerp + ); + dst.buf[3 * (y * target_width + x) + c] = static_cast(clip_lerp(top, bottom, y_lerp)); + } + } + } +} + +// Normalize image to float32 - careful with pytorch .to(model.device, dtype=torch.float16) - this sometimes reduces precision (32>16>32), sometimes not +static void normalize_image_u8_to_f32(const clip_image_u8* src, clip_image_f32* dst, const float mean[3], const float std[3]) { + dst->nx = src->nx; + dst->ny = src->ny; + dst->buf.resize(src->buf.size()); + + for (size_t i = 0; i < src->buf.size(); ++i) { + int c = i % 3; // rgb + dst->buf[i] = (static_cast(src->buf[i]) / 255.0f - mean[c]) / std[c]; + } +} + +inline int clip(int x, int lower, int upper) { + return std::max(lower, std::min(x, upper)); +} + +static bool bicubic_resize(const clip_image_u8 &img, clip_image_u8 &dst, int target_width, int target_height) { + const int nx = img.nx; + const int ny = img.ny; + + dst.nx = target_width; + dst.ny = target_height; + dst.buf.resize(3 * target_width * target_height); + + float Cc; + float C[5]; + float d0, d2, d3, a0, a1, a2, a3; + int i, j, k, jj; + int x, y; + float dx, dy; + float tx, ty; + + tx = (float)nx / (float)target_width; + ty = (float)ny / (float)target_height; + + // Bicubic interpolation; adapted from ViT.cpp, inspired from : + // -> https://github.com/yglukhov/bicubic-interpolation-image-processing/blob/master/libimage.c#L36 + // -> https://en.wikipedia.org/wiki/Bicubic_interpolation + + for (i = 0; i < target_height; i++) { + for (j = 0; j < target_width; j++) { + x = (int)(tx * j); + y = (int)(ty * i); + + dx = tx * j - x; + dy = ty * i - y; + + for (k = 0; k < 3; k++) { + for (jj = 0; jj <= 3; jj++) { + d0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x - 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k]; + d2 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 1, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k]; + d3 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x + 2, 0, nx - 1)) * 3 + k] - img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k]; + a0 = img.buf[(clip(y - 1 + jj, 0, ny - 1) * nx + clip(x, 0, nx - 1)) * 3 + k]; + + a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3; + a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2; + a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3; + + C[jj] = a0 + a1 * dx + a2 * dx * dx + a3 * dx * dx * dx; + + d0 = C[0] - C[1]; + d2 = C[2] - C[1]; + d3 = C[3] - C[1]; + a0 = C[1]; + a1 = -1.0 / 3 * d0 + d2 - 1.0 / 6 * d3; + a2 = 1.0 / 2 * d0 + 1.0 / 2 * d2; + a3 = -1.0 / 6 * d0 - 1.0 / 2 * d2 + 1.0 / 6 * d3; + Cc = a0 + a1 * dy + a2 * dy * dy + a3 * dy * dy * dy; + + const uint8_t Cc2 = std::min(std::max(std::round(Cc), 0.0f), 255.0f); + dst.buf[(i * target_width + j) * 3 + k] = float(Cc2); + } + } + } + } + + return true; +} + +// llava-1.6 type of resize_and_pad (black) +static void resize_and_pad_image(const clip_image_u8& image, clip_image_u8 &image_output, const std::pair& target_resolution) { + int target_width = target_resolution.first; + int target_height = target_resolution.second; + + float scale_w = static_cast(target_width) / image.nx; + float scale_h = static_cast(target_height) / image.ny; + + int new_width, new_height; + + if (scale_w < scale_h) { + new_width = target_width; + new_height = std::min(static_cast(std::ceil(image.ny * scale_w)), target_height); + } else { + new_height = target_height; + new_width = std::min(static_cast(std::ceil(image.nx * scale_h)), target_width); + } + + clip_image_u8 resized_image; + // bilinear_resize(image, resized_image, new_width, new_height); + bicubic_resize(image, resized_image, new_width, new_height); + + clip_image_u8 padded_image; + padded_image.nx = target_width; + padded_image.ny = target_height; + padded_image.buf.resize(3 * target_width * target_height, 0); // Initialize with black + + // Calculate padding offsets + int pad_x = (target_width - new_width) / 2; + int pad_y = (target_height - new_height) / 2; + + // Copy the resized image into the center of the padded buffer + for (int y = 0; y < new_height; ++y) { + for (int x = 0; x < new_width; ++x) { + for (int c = 0; c < 3; ++c) { + padded_image.buf[3 * ((y + pad_y) * target_width + (x + pad_x)) + c] = resized_image.buf[3 * (y * new_width + x) + c]; + } + } + } + image_output = std::move(padded_image); +} + +/** + * Selects the best resolution from a list of possible resolutions based on the original size. + * + * @param original_size The original size of the image in the format (width, height). + * @param possible_resolutions A list of possible resolutions in the format [(width1, height1), (width2, height2), ...]. + * @return The best fit resolution in the format (width, height). + */ +static std::pair select_best_resolution(const std::pair & original_size, const std::vector> & possible_resolutions) { + int original_width = original_size.first; + int original_height = original_size.second; + std::pair best_fit; + int max_effective_resolution = 0; + int min_wasted_resolution = std::numeric_limits::max(); + + for (const auto& resolution : possible_resolutions) { + int width = resolution.first; + int height = resolution.second; + float scale = std::min(static_cast(width) / original_width, static_cast(height) / original_height); + int downscaled_width = static_cast(original_width * scale); + int downscaled_height = static_cast(original_height * scale); + int effective_resolution = std::min(downscaled_width * downscaled_height, original_width * original_height); + int wasted_resolution = (width * height) - effective_resolution; + // LOG_INF("resolution: %d %d, scale: %f, downscaled: %d %d, effective: %d, wasted: %d\n", width, height, scale, downscaled_width, downscaled_height, effective_resolution, wasted_resolution); + if (effective_resolution > max_effective_resolution || (effective_resolution == max_effective_resolution && wasted_resolution < min_wasted_resolution)) { + max_effective_resolution = effective_resolution; + min_wasted_resolution = wasted_resolution; + best_fit = resolution; + } + } + + return best_fit; +} + +static std::vector divide_to_patches_u8(const clip_image_u8 & image, int patch_size) { + std::vector patches; + int width = image.nx; + int height = image.ny; + for (int i = 0; i < height; i += patch_size) { + for (int j = 0; j < width; j += patch_size) { + clip_image_u8 *patch = clip_image_u8_init(); + patch->nx = std::min(patch_size, width - j); + patch->ny = std::min(patch_size, height - i); + patch->buf.resize(3 * patch->nx * patch->ny); + for (int y = 0; y < patch->ny; ++y) { + for (int x = 0; x < patch->nx; ++x) { + for (int c = 0; c < 3; ++c) { + patch->buf[3 * (y * patch->nx + x) + c] = image.buf[3 * ((i + y) * width + (j + x)) + c]; + } + } + } + patches.push_back(patch); + } + } + return patches; +} + +static int ensure_divide(int length, int patch_size) { + return std::max(static_cast(std::round(static_cast(length) / patch_size) * patch_size), patch_size); +} + +static std::pair uhd_find_best_resize(std::pair original_size, int scale_resolution, int patch_size, bool allow_upscale = false) { + int width = original_size.first; + int height = original_size.second; + if ((width * height > scale_resolution * scale_resolution) || allow_upscale) { + float r = static_cast(width) / height; + height = static_cast(scale_resolution / std::sqrt(r)); + width = static_cast(height * r); + } + int best_width = ensure_divide(width, patch_size); + int best_height = ensure_divide(height, patch_size); + return std::make_pair(best_width, best_height); +} + +static std::pair uhd_get_refine_size(std::pair original_size, std::pair grid, int scale_resolution, int patch_size, bool allow_upscale = false) { + int width, height; + std::tie(width, height) = original_size; + int grid_x, grid_y; + std::tie(grid_x, grid_y) = grid; + + int refine_width = ensure_divide(width, grid_x); + int refine_height = ensure_divide(height, grid_y); + + int grid_width = refine_width / grid_x; + int grid_height = refine_height / grid_y; + + // auto best_grid_size = find_best_resize(std::make_tuple(grid_width, grid_height), scale_resolution, patch_size, allow_upscale); (old line) + auto best_grid_size = uhd_find_best_resize(std::make_pair(grid_width, grid_height), scale_resolution, patch_size, allow_upscale); // (new line) => fixes conversion for make_tuple to make_pair + int best_grid_width, best_grid_height; + std::tie(best_grid_width, best_grid_height) = best_grid_size; + + // std::pair refine_size = std::make_tuple(best_grid_width * grid_x, best_grid_height * grid_y); (old line) + std::pair refine_size = std::make_pair(best_grid_width * grid_x, best_grid_height * grid_y); // (new line) + return refine_size; +} + +static std::pair uhd_best_grid(const int max_slice_nums, const int multiple, const float log_ratio) { + std::vector candidate_split_grids_nums; + for (int i : {multiple - 1, multiple, multiple + 1}) { + if (i == 1 || i > max_slice_nums) { + continue; + } + candidate_split_grids_nums.push_back(i); + } + + std::vector> candidate_grids; + for (int split_grids_nums : candidate_split_grids_nums) { + int m = 1; + while (m <= split_grids_nums) { + if (split_grids_nums % m == 0) { + candidate_grids.emplace_back(m, split_grids_nums / m); + } + ++m; + } + } + + std::pair best_grid{1, 1}; + float min_error = std::numeric_limits::infinity(); + for (const auto& grid : candidate_grids) { + float error = std::abs(log_ratio - std::log(1.0 * grid.first / grid.second)); + if (error < min_error) { + best_grid = grid; + min_error = error; + } + } + return best_grid; +} + +// inspired from LLaVA-UHD: +// -> https://arxiv.org/pdf/2403.11703 +// -> https://github.com/thunlp/LLaVA-UHD +// -> https://github.com/thunlp/LLaVA-UHD/blob/302301bc2175f7e717fb8548516188e89f649753/llava_uhd/train/llava-uhd/slice_logic.py#L118 +static std::vector> uhd_slice_image(const clip_image_u8 * img, const int max_slice_nums=9, const int scale_resolution=448, const int patch_size=14) { + const std::pair original_size={img->nx,img->ny}; + const int original_width = img->nx; + const int original_height = img->ny; + const float log_ratio = log(1.0*original_width/original_height); + const float ratio = 1.0 * original_width * original_height/ (scale_resolution * scale_resolution); + const int multiple = fmin(ceil(ratio), max_slice_nums); + + std::vector> images; + LOG_INF("%s: multiple %d\n", __func__, multiple); + images.push_back(std::vector()); + + if (multiple <= 1) { + auto best_size = uhd_find_best_resize(original_size, scale_resolution, patch_size, true); + clip_image_u8 * source_image = clip_image_u8_init(); + bicubic_resize(*img, *source_image, best_size.first, best_size.second); + // source_image = image.resize(best_size, Image.Resampling.BICUBIC) + images[images.size()-1].push_back(source_image); + } + else if (multiple > 1) { + auto best_size = uhd_find_best_resize(original_size, scale_resolution, patch_size); + clip_image_u8 * source_image = clip_image_u8_init(); + bicubic_resize(*img, *source_image, best_size.first, best_size.second); + // source_image = image.copy().resize(best_resize, Image.Resampling.BICUBIC) + LOG_INF("%s: image_size: %d %d; source_image size: %d %d\n", __func__, img->nx, img->ny, best_size.first, best_size.second); + images[images.size()-1].push_back(source_image); + + std::pair best_grid = uhd_best_grid(max_slice_nums, multiple, log_ratio); + LOG_INF("%s: image_size: %d %d; best_grid: %d %d\n", __func__, img->nx, img->ny, best_grid.first, best_grid.second); + + auto refine_size = uhd_get_refine_size(original_size, best_grid, scale_resolution, patch_size, true); + clip_image_u8 * refine_image = clip_image_u8_init(); + bicubic_resize(*img, *refine_image, refine_size.first, refine_size.second); + + LOG_INF("%s: refine_image_size: %d %d; refine_size: %d %d\n", __func__, refine_image->nx, refine_image->ny, refine_size.first, refine_size.second); + + // split_to_patches + int width = refine_image->nx; + int height = refine_image->ny; + int grid_x = int(width / best_grid.first); + int grid_y = int(height / best_grid.second); + for (int patches_i = 0, ic = 0; patches_i < height && ic < best_grid.second; patches_i += grid_y, ic += 1){ + images.push_back(std::vector()); + for(int patches_j = 0, jc = 0; patches_j < width && jc < best_grid.first; patches_j += grid_x, jc += 1){ + clip_image_u8 * patch = clip_image_u8_init(); + patch->nx = grid_x; + patch->ny = grid_y; + patch->buf.resize(3 * patch->nx * patch->ny); + for (int y = patches_i; y < patches_i + grid_y; ++y) { + for (int x = patches_j; x < patches_j + grid_x; ++x) { + const int i = 3 * (y * refine_image->nx + x); + const int j = 3 * ((y-patches_i) * patch->nx + (x-patches_j)); + patch->buf[j] = refine_image->buf[i]; + patch->buf[j+1] = refine_image->buf[i+1]; + patch->buf[j+2] = refine_image->buf[i+2]; + } + } + images[images.size()-1].push_back(patch); + } + } + } + return images; +} + +int clip_uhd_num_image_embeds_col(struct clip_ctx * ctx_clip) { + const int max_slice_nums=9; + const int scale_resolution=448; + const int original_width = ctx_clip->load_image_size->width; + const int original_height = ctx_clip->load_image_size->height; + const float log_ratio = log(1.0*original_width/original_height); + const float ratio = 1.0 * original_width * original_height/ (scale_resolution * scale_resolution); + const int multiple = fmin(ceil(ratio), max_slice_nums); + std::pair best_grid = uhd_best_grid(max_slice_nums, multiple, log_ratio); + return best_grid.first; +} + +// returns the normalized float tensor for llava-1.5, for spatial_unpad with anyres processing for llava-1.6 it returns the normalized image patch tensors as a vector +// res_imgs memory is being allocated here, previous allocations will be freed if found +bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, clip_image_f32_batch * res_imgs) { + // develop for omni-vlm, WIP + // TODO: a better structure + res_imgs->data = new clip_image_f32[1]; + res_imgs->size = 1; + + clip_image_u8 sampled_img; + bool ret = bicubic_resize(*img, sampled_img, ctx->vision_model.hparams.image_size, ctx->vision_model.hparams.image_size); + + clip_image_f32 img_f32; + normalize_image_u8_to_f32(&sampled_img, &img_f32, ctx->image_mean, ctx->image_std); + + res_imgs->data[0] = img_f32; + + return true; + + // result is not exactly same compared to PIL.resample(bicubic) + // need to carefully assess the impact + + // cout << "\t DEBUG DEBUG SAMPLED IMAGE" << endl; + // cout << sampled_img.nx << '\t' << sampled_img.ny << endl; + // cout << sampled_img.buf.size() << endl; + // cout << "\t\tout" << endl; + // for(size_t i=200000; i<200050;i++) { + // cout << (int)(sampled_img.buf[i]) << ' '; + // } + // cout << endl; + // int max_slice_nums = 9; + // + // std::vector> imgs = uhd_slice_image(img, max_slice_nums); + // res_imgs->size = 0; + // for (size_t i = 0; i < imgs.size(); ++i){ + // res_imgs->size += imgs[i].size(); + // } + // res_imgs->data = new clip_image_f32[res_imgs->size]; + // int idx = 0; + // for (size_t i = 0; i < imgs.size(); ++i) { + // for (size_t j = 0; j < imgs[i].size(); ++j) { + // LOG_DBG("%s: %d %d\n", __func__,imgs[i][j]->nx,imgs[i][j]->ny); + // clip_image_f32 * res = clip_image_f32_init(); + // normalize_image_u8_to_f32(imgs[i][j], res, ctx->image_mean, ctx->image_std); + // res_imgs->data[idx++] = *res; + // clip_image_f32_free(res); + // } + // } + // return true; + + // below is just ref + /* + bool pad_to_square = true; + if (!ctx->has_vision_encoder) { + LOG_ERR("This gguf file seems to have no vision encoder\n"); + return false; + } + auto & params = ctx->vision_model.hparams; + // The model config actually contains all we need to decide on how to preprocess, here we automatically switch to the new llava-1.6 preprocessing + if (strcmp(params.mm_patch_merge_type, "spatial_unpad") == 0) { + pad_to_square = false; + } + // free the previous res_imgs if any set + if (res_imgs->size > 0) { + clip_image_f32_batch_free(res_imgs); + } + res_imgs->data = nullptr; + res_imgs->size = 0; + + // the logic below is to pad the shorter side to the longer side with a background color: rgb(122, 116, 104) + // see https://github.com/haotian-liu/LLaVA/blob/e854a2bf85118c504f6f16bf5c3c7c92f8fa8c6b/llava/conversation.py#L113-L156 + + clip_image_u8 * temp = clip_image_u8_init(); // we will keep the input image data here temporarily + if (pad_to_square && img->nx != img->ny) { + int longer_side = std::max(img->nx, img->ny); + temp->nx = longer_side; + temp->ny = longer_side; + temp->buf.resize(3 * longer_side * longer_side); + const uint8_t bc[3] = {122, 116, 104}; // background color in RGB from LLaVA (this is the mean rgb color * 255) + + // fill with background color + for (size_t i = 0; i < temp->buf.size(); i++) { + temp->buf[i] = bc[i % 3]; + } + + // copy from the input image + for (int y = 0; y < img->ny; y++) { + for (int x = 0; x < img->nx; x++) { + const int i = 3 * (y * img->nx + x); + const int j = 3 * (y * temp->nx + x); + temp->buf[j] = img->buf[i]; + temp->buf[j+1] = img->buf[i+1]; + temp->buf[j+2] = img->buf[i+2]; + } + } + } else { + if (params.image_grid_pinpoints[0] != 0) { + // "spatial_unpad" with "anyres" processing for llava-1.6 + std::vector> possible_resolutions; + for (int i = 0; i < 32 && params.image_grid_pinpoints[i] != 0; i+=2) { + possible_resolutions.push_back({params.image_grid_pinpoints[i], params.image_grid_pinpoints[i+1]}); + } + std::pair best_resolution = select_best_resolution({img->nx, img->ny}, possible_resolutions); + // clip_image_save_to_bmp(*img, "input.bmp"); + resize_and_pad_image(*img, *temp, best_resolution); // we do not pad with mean-bg color anymore in llava-1.6 + // clip_image_save_to_bmp(*temp, "resized.bmp"); + // visually verify normalized image: + // normalize_image_u8_to_f32(*temp, *res, ctx->image_mean, ctx->image_std); + // { + // clip_image_u8 * temp2 = clip_image_u8_init(); + // clip_image_convert_f32_to_u8(*res, *temp2); + // clip_image_save_to_bmp(*temp2, "resized_normalized_f32.bmp"); + // clip_image_u8_free(temp2); + // } + + std::vector patches = divide_to_patches_u8(*temp, params.image_size); // prepare spatial sorted main patches of image_size each (336 in llava-1.6) + + clip_image_u8 *image_original_resize = clip_image_u8_init(); + // bilinear_resize(*img, *image_original_resize, params.image_size, params.image_size); // in python this is "shortest_edge", but all CLIP are square + bicubic_resize(*img, *image_original_resize, params.image_size, params.image_size); // in python this is "shortest_edge", but all CLIP are square + patches.insert(patches.begin(), image_original_resize); + // clip_image_f32_batch_init(patches.size()); + res_imgs->size = patches.size(); + res_imgs->data = new clip_image_f32[res_imgs->size]; + int num=0; + for (auto& patch : patches) { + normalize_image_u8_to_f32(patch, &res_imgs->data[num], ctx->image_mean, ctx->image_std); + num++; + } + + for (size_t i = 0; i < patches.size(); i++) { + // LOG_DBG("patch %d: %d %d\n", i, patches[i]->nx, patches[i]->ny); + clip_image_u8_free(patches[i]); + } + + clip_image_u8_free(temp); + + return true; + } else { + temp->nx = img->nx; + temp->ny = img->ny; + temp->buf.resize(img->buf.size()); + memcpy(temp->buf.data(), img->buf.data(), temp->buf.size()); + } + } + + const int nx = temp->nx; + const int ny = temp->ny; + // clip_image_save_to_bmp(*temp, "resized_vanilla.bmp"); + + const int nx2 = ctx->vision_model.hparams.image_size; + const int ny2 = ctx->vision_model.hparams.image_size; + clip_image_f32 * res = clip_image_f32_init(); + res->nx = nx2; + res->ny = ny2; + res->buf.resize(3 * nx2 * ny2); + + const float scale = std::max(nx, ny) / (float)ctx->vision_model.hparams.image_size; + + const int nx3 = int(nx / scale + 0.5f); + const int ny3 = int(ny / scale + 0.5f); + + const auto & m3 = ctx->image_mean; // {0.48145466f, 0.4578275f, 0.40821073f}; + const auto & s3 = ctx->image_std; // {0.26862954f, 0.26130258f, 0.27577711f}; + + for (int y = 0; y < ny3; y++) { + for (int x = 0; x < nx3; x++) { + for (int c = 0; c < 3; c++) { + // linear interpolation + const float sx = (x + 0.5f) * scale - 0.5f; + const float sy = (y + 0.5f) * scale - 0.5f; + + const int x0 = std::max(0, (int)std::floor(sx)); + const int y0 = std::max(0, (int)std::floor(sy)); + + const int x1 = std::min(x0 + 1, nx - 1); + const int y1 = std::min(y0 + 1, ny - 1); + + const float dx = sx - x0; + const float dy = sy - y0; + + const int j00 = 3 * (y0 * nx + x0) + c; + const int j01 = 3 * (y0 * nx + x1) + c; + const int j10 = 3 * (y1 * nx + x0) + c; + const int j11 = 3 * (y1 * nx + x1) + c; + + const float v00 = temp->buf[j00]; + const float v01 = temp->buf[j01]; + const float v10 = temp->buf[j10]; + const float v11 = temp->buf[j11]; + + const float v0 = v00 * (1.0f - dx) + v01 * dx; + const float v1 = v10 * (1.0f - dx) + v11 * dx; + + const float v = v0 * (1.0f - dy) + v1 * dy; + + const uint8_t v2 = std::min(std::max(std::round(v), 0.0f), 255.0f); + + const int i = 3 * (y * nx3 + x) + c; + + res->buf[i] = ((float(v2) / 255.0f) - m3[c]) / s3[c]; + } + } + } + clip_image_u8_free(temp); + + // { + // clip_image_u8 * temp2 = clip_image_u8_init(); + // clip_image_convert_f32_to_u8(*res, *temp2); + // clip_image_save_to_bmp(*temp2, "resized_normalized_f32_vanilla.bmp"); + // clip_image_u8_free(temp2); + // } + // res_imgs.push_back(res); + + res_imgs->size = 1; + res_imgs->data = new clip_image_f32[res_imgs->size]; + res_imgs->data[0] = *res; + clip_image_f32_free(res); + + return true; + */ +} + +ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx) { + return ctx->vision_model.image_newline; +} + +void clip_free(clip_ctx * ctx) { + ggml_free(ctx->ctx_data); + gguf_free(ctx->ctx_gguf); + + ggml_backend_buffer_free(ctx->params_buffer); + ggml_backend_free(ctx->backend); + ggml_gallocr_free(ctx->compute_alloc); + delete ctx; +} + +size_t clip_embd_nbytes(const struct clip_ctx * ctx) { + return clip_n_patches(ctx) * clip_n_mmproj_embd(ctx) * sizeof(float); +} + +int32_t clip_image_size(const struct clip_ctx * ctx) { + return ctx->vision_model.hparams.image_size; +} + +int32_t clip_patch_size(const struct clip_ctx * ctx) { + return ctx->vision_model.hparams.patch_size; +} + +int32_t clip_hidden_size(const struct clip_ctx * ctx) { + return ctx->vision_model.hparams.hidden_size; +} + +const char * clip_patch_merge_type(const struct clip_ctx * ctx) { + return ctx->vision_model.hparams.mm_patch_merge_type; +} + +const int32_t * clip_image_grid(const struct clip_ctx * ctx) { + return ctx->vision_model.hparams.image_grid_pinpoints; +} + +int clip_n_patches(const struct clip_ctx * ctx) { + const auto & params = ctx->vision_model.hparams; + + int n_patches = (params.image_size / params.patch_size) * (params.image_size / params.patch_size); + + if (ctx->proj_type == PROJECTOR_TYPE_LDP || ctx->proj_type == PROJECTOR_TYPE_LDPV2) { + n_patches /= 4; + } else if (ctx->proj_type == PROJECTOR_TYPE_RESAMPLER) { + if (ctx->minicpmv_version == 2) { + n_patches = 96; + } + else if (ctx->minicpmv_version == 3) { + n_patches = 64; + } + } + + return n_patches; +} + +static std::vector>> get_1d_sincos_pos_embed_from_grid_new(int embed_dim, const std::vector> & pos) { + assert(embed_dim % 2 == 0); + int H = pos.size(); + int W = pos[0].size(); + + std::vector omega(embed_dim / 2); + for (int i = 0; i < embed_dim / 2; ++i) { + omega[i] = 1.0 / pow(10000.0, static_cast(i) / (embed_dim / 2)); + } + + std::vector>> emb(H, std::vector>(W, std::vector(embed_dim))); + for (int h = 0; h < H; ++h) { + for (int w = 0; w < W; ++w) { + for (int d = 0; d < embed_dim / 2; ++d) { + float out_value = pos[h][w] * omega[d]; + emb[h][w][d] = sin(out_value); + emb[h][w][d + embed_dim / 2] = cos(out_value); + } + } + } + + return emb; +} + +static std::vector>> get_2d_sincos_pos_embed_from_grid(int embed_dim, const std::vector>> & grid) { + assert(embed_dim % 2 == 0); + std::vector>> emb_h = get_1d_sincos_pos_embed_from_grid_new(embed_dim / 2, grid[0]); // (H, W, D/2) + std::vector>> emb_w = get_1d_sincos_pos_embed_from_grid_new(embed_dim / 2, grid[1]); // (H, W, D/2) + + int H = emb_h.size(); + int W = emb_h[0].size(); + std::vector>> emb(H, std::vector>(W, std::vector(embed_dim))); + + for (int h = 0; h < H; ++h) { + for (int w = 0; w < W; ++w) { + for (int d = 0; d < embed_dim / 2; ++d) { + emb[h][w][d] = emb_h[h][w][d]; + emb[h][w][d + embed_dim / 2] = emb_w[h][w][d]; + } + } + } + return emb; +} + +static std::vector> get_2d_sincos_pos_embed(int embed_dim, const std::pair image_size) { + int grid_h_size = image_size.first; + int grid_w_size = image_size.second; + + std::vector grid_h(grid_h_size); + std::vector grid_w(grid_w_size); + + for (int i = 0; i < grid_h_size; ++i) { + grid_h[i] = static_cast(i); + } + for (int i = 0; i < grid_w_size; ++i) { + grid_w[i] = static_cast(i); + } + + std::vector> grid(grid_h_size, std::vector(grid_w_size)); + for (int h = 0; h < grid_h_size; ++h) { + for (int w = 0; w < grid_w_size; ++w) { + grid[h][w] = grid_w[w]; + } + } + std::vector>> grid_2d = {grid, grid}; + for (int h = 0; h < grid_h_size; ++h) { + for (int w = 0; w < grid_w_size; ++w) { + grid_2d[0][h][w] = grid_h[h]; + grid_2d[1][h][w] = grid_w[w]; + } + } + + std::vector>> pos_embed_3d = get_2d_sincos_pos_embed_from_grid(embed_dim, grid_2d); + + int H = image_size.first; + int W = image_size.second; + std::vector> pos_embed_2d(H * W, std::vector(embed_dim)); + for (int h = 0; h < H; ++h) { + for (int w = 0; w < W; ++w) { + pos_embed_2d[w * H + h] = pos_embed_3d[h][w]; + } + } + + return pos_embed_2d; +} + +bool clip_image_encode(struct clip_ctx * ctx, const int n_threads, clip_image_f32 * img, float * vec) { + if (!ctx->has_vision_encoder) { + LOG_ERR("This gguf file seems to have no vision encoder\n"); + return false; + } + + clip_image_f32_batch imgs{}; + imgs.size = 1; + imgs.data = img; + return clip_image_batch_encode(ctx, n_threads, &imgs, vec); +} + +bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_image_f32_batch * imgs, float * vec) { + if (!ctx->has_vision_encoder) { + LOG_ERR("This gguf file seems to have no vision encoder\n"); + return false; + } + + int batch_size = imgs->size; + if (ctx->has_llava_projector) { + GGML_ASSERT(batch_size == 1); // TODO: support multiple images + } + if (ctx->has_minicpmv_projector) { + GGML_ASSERT(batch_size == 1); + } + + // build the inference graph + ggml_cgraph * gf = clip_image_build_graph(ctx, imgs, ctx->load_image_size, true); + ggml_gallocr_alloc_graph(ctx->compute_alloc, gf); + + // set inputs + const auto & model = ctx->vision_model; + const auto & hparams = model.hparams; + + const int image_size = hparams.image_size; + int image_size_width = image_size; + int image_size_height = image_size; + if (ctx->has_minicpmv_projector) { + image_size_width = imgs->data[0].nx; + image_size_height = imgs->data[0].ny; + } + const int patch_size = hparams.patch_size; + const int num_patches = ((image_size_width / patch_size) * (image_size_height / patch_size)); + const int num_positions = num_patches + (ctx->has_class_embedding ? 1 : 0); + if(ctx->load_image_size==nullptr){ + ctx->load_image_size= clip_image_size_init(); + } + const int pos_w = ctx->load_image_size->width/patch_size; + const int pos_h = ctx->load_image_size->height/patch_size; + + { + struct ggml_tensor * inp_raw = ggml_graph_get_tensor(gf, "inp_raw"); + float * data = (float *)malloc(ggml_nbytes(inp_raw)); + + for (size_t i = 0; i < imgs->size; i++) { + const int nx = imgs->data[i].nx; + const int ny = imgs->data[i].ny; + if (!ctx->has_minicpmv_projector) { + GGML_ASSERT(nx == image_size && ny == image_size); + } + + const int n = nx * ny; + + for (int b = 0; b < batch_size; b++) { + for (int k = 0; k < 3; k++) { + for (int y = 0; y < ny; y++) { + for (int x = 0; x < nx; x++) { + data[(b * 3 * n) + k * n + y * nx + x] = imgs->data[b].buf[3 * (y * nx + x) + k]; + } + } + } + } + } + + // std::ifstream ifs("/Users/liwei/repo/llama-cpp-experiments/huggingface/pixel_values"); + // ifs.read(reinterpret_cast(data), 384*384*3*sizeof(float)); + // cout << "\t\tGGGET" << endl; + // ifs.close(); + + ggml_backend_tensor_set(inp_raw, data, 0, ggml_nbytes(inp_raw)); + free(data); + } + if (ctx->has_minicpmv_projector) { + { + // inspired from siglip: + // -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit + // -> https://huggingface.co/HuggingFaceM4/siglip-so400m-14-980-flash-attn2-navit/blob/d66538faeba44480d0bfaa42145eef26f9423199/modeling_siglip.py#L316 + struct ggml_tensor * positions = ggml_graph_get_tensor(gf, "positions"); + int* positions_data = (int*)malloc(ggml_nbytes(positions)); + int bucket_coords_h[70]; + int bucket_coords_w[70]; + for (int i = 0; i < pos_h; i++){ + bucket_coords_h[i] = std::floor(70.0*i/pos_h); + } + for (int i = 0; i < pos_w; i++){ + bucket_coords_w[i] = std::floor(70.0*i/pos_w); + } + for (int i = 0, id = 0; i < pos_h; i++){ + for (int j = 0; j < pos_w; j++){ + positions_data[id++] = bucket_coords_h[i]*70 + bucket_coords_w[j]; + } + } + ggml_backend_tensor_set(positions, positions_data, 0, ggml_nbytes(positions)); + free(positions_data); + } + + { + // inspired from resampler of Qwen-VL: + // -> https://huggingface.co/Qwen/Qwen-VL/tree/main + // -> https://huggingface.co/Qwen/Qwen-VL/blob/0547ed36a86561e2e42fecec8fd0c4f6953e33c4/visual.py#L23 + struct ggml_tensor * pos_embed = ggml_graph_get_tensor(gf, "pos_embed"); + int embed_dim = 4096; + if (ctx->minicpmv_version == 2) { + embed_dim = 4096; + } + else if (ctx->minicpmv_version == 3) { + embed_dim = 3584; + } + auto pos_embed_t = get_2d_sincos_pos_embed(embed_dim, std::make_pair(pos_w, pos_h)); + + float * pos_embed_data = (float *)malloc(ggml_nbytes(pos_embed)); + for(int i=0;ihas_class_embedding) { + struct ggml_tensor * embeddings = ggml_graph_get_tensor(gf, "embeddings"); + + void* zero_mem = malloc(ggml_nbytes(embeddings)); + memset(zero_mem, 0, ggml_nbytes(embeddings)); + ggml_backend_tensor_set(embeddings, zero_mem, 0, ggml_nbytes(embeddings)); + free(zero_mem); + } + } + + { + struct ggml_tensor * positions = ggml_graph_get_tensor(gf, "positions"); + + int* positions_data = (int*)malloc(ggml_nbytes(positions)); + for (int i = 0; i < num_positions; i++) { + positions_data[i] = i; + } + ggml_backend_tensor_set(positions, positions_data, 0, ggml_nbytes(positions)); + free(positions_data); + } + + /* + { + struct ggml_tensor * patches = ggml_graph_get_tensor(gf, "patches"); + int* patches_data = (int*)malloc(ggml_nbytes(patches)); + for (int i = 0; i < num_patches; i++) { + patches_data[i] = i + 1; + } + ggml_backend_tensor_set(patches, patches_data, 0, ggml_nbytes(patches)); + free(patches_data); + } + */ + + } + + if (ggml_backend_is_cpu(ctx->backend)) { + ggml_backend_cpu_set_n_threads(ctx->backend, n_threads); + } + +#ifdef GGML_USE_METAL + if (ggml_backend_is_metal(ctx->backend)) { + ggml_backend_metal_set_n_cb(ctx->backend, n_threads); + } +#endif + + ggml_backend_graph_compute(ctx->backend, gf); + + // the last node is the embedding tensor + struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 1]; + + // copy the embeddings to the location passed by the user + ggml_backend_tensor_get(embeddings, vec, 0, ggml_nbytes(embeddings)); + + return true; +} + +bool clip_model_quantize(const char * fname_inp, const char * fname_out, const int itype) { + ggml_type type = GGML_TYPE_Q4_1; + + assert(itype < GGML_TYPE_COUNT); + type = static_cast(itype); + + auto * ctx_clip = clip_model_load(fname_inp, 2); + + const auto & ctx_src = ctx_clip->ctx_gguf; + const auto & ctx_data = ctx_clip->ctx_data; + + auto * ctx_out = gguf_init_empty(); + gguf_set_kv(ctx_out, ctx_src); + gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION); + gguf_set_val_u32(ctx_out, "general.file_type", itype); + + auto fout = std::ofstream(fname_out, std::ios::binary); + + const int n_tensors = gguf_get_n_tensors(ctx_src); + + for (int i = 0; i < n_tensors; ++i) { + const char * name = gguf_get_tensor_name(ctx_src, i); + struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name); + gguf_add_tensor(ctx_out, cur); + } + + const size_t meta_size = gguf_get_meta_size(ctx_out); + for (size_t i = 0; i < meta_size; ++i) { + fout.put(0); + } + + // regexes of tensor names to be quantized + const std::vector k_names = { + ".*weight", + }; + + std::vector work(512); + std::vector conv_buf(512); + size_t total_size_org = 0; + size_t total_size_new = 0; + + for (int i = 0; i < n_tensors; ++i) { + const std::string name = gguf_get_tensor_name(ctx_src, i); + struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name.c_str()); + + enum ggml_type new_type; + void * new_data; + size_t new_size; + + bool quantize = false; + for (const auto & s : k_names) { + if (std::regex_match(name, std::regex(s))) { + quantize = true; + break; + } + } + + // quantize only 2D tensors + quantize &= (ggml_n_dims(cur) == 2); + + if (quantize) { + new_type = type; + if (new_type >= GGML_TYPE_Q2_K && name.find("embd") != std::string::npos) { + new_type = GGML_TYPE_Q8_0; // ggml_get_rows needs non K type + // LOG_ERR("%s: quantizing %s to %s\n", __func__, name.c_str(), ggml_type_name(new_type)); + } + const size_t n_elms = ggml_nelements(cur); + float * f32_data; + + switch (cur->type) { + case GGML_TYPE_F32: + f32_data = (float *)cur->data; + break; + case GGML_TYPE_F16: + if (conv_buf.size() < n_elms) { + conv_buf.resize(n_elms); + } + for (size_t j = 0; j < n_elms; ++j) { + conv_buf[j] = ggml_fp16_to_fp32(((ggml_fp16_t *)cur->data)[j]); + } + f32_data = (float *)conv_buf.data(); + break; + default: + LOG_ERR("Please use an input file in f32 or f16\n"); + gguf_free(ctx_out); + return false; + } + + if (work.size() < n_elms * 4) { + work.resize(n_elms * 4); + } + new_data = work.data(); + + new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, n_elms/cur->ne[0], cur->ne[0], nullptr); + } else { + new_type = cur->type; + new_data = cur->data; + new_size = ggml_nbytes(cur); + } + const size_t orig_size = ggml_nbytes(cur); + total_size_org += orig_size; + total_size_new += new_size; + gguf_set_tensor_type(ctx_out, name.c_str(), new_type); + gguf_set_tensor_data(ctx_out, name.c_str(), new_data, new_size); + fout.write((const char *)new_data, new_size); + size_t pad = GGML_PAD(new_size, gguf_get_alignment(ctx_out)) - new_size; + for (size_t j = 0; j < pad; ++j) { + fout.put(0); + } + + LOG_INF("%s: n_dims = %d | quantize=%d | size = %f MB -> %f MB\n", name.c_str(), ggml_n_dims(cur), quantize, + orig_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0); + } + + // go back to beginning of file and write the updated metadata + fout.seekp(0, std::ios::beg); + std::vector meta(meta_size); + gguf_get_meta_data(ctx_out, meta.data()); + fout.write((const char *)meta.data(), meta_size); + + fout.close(); + + clip_free(ctx_clip); + gguf_free(ctx_out); + + { + LOG_INF("%s: original size = %8.2f MB\n", __func__, total_size_org / 1024.0 / 1024.0); + LOG_INF("%s: quantized size = %8.2f MB\n", __func__, total_size_new / 1024.0 / 1024.0); + } + + return true; +} + +int clip_n_mmproj_embd(const struct clip_ctx * ctx) { + if (ctx->proj_type == PROJECTOR_TYPE_LDP) { + return ctx->vision_model.mm_model_block_1_block_2_1_b->ne[0]; + } + if (ctx->proj_type == PROJECTOR_TYPE_LDPV2) { + return ctx->vision_model.mm_model_peg_0_b->ne[0]; + } + if (ctx->proj_type == PROJECTOR_TYPE_MLP) { + return ctx->vision_model.mm_2_b->ne[0]; + } + if (ctx->proj_type == PROJECTOR_TYPE_MLP_NORM) { + return ctx->vision_model.mm_3_b->ne[0]; + } + if (ctx->proj_type == PROJECTOR_TYPE_RESAMPLER) { + if (ctx->minicpmv_version == 2) { + return 4096; + } + else if (ctx->minicpmv_version == 3) { + return 3584; + } + } + + std::string proj_type = PROJECTOR_TYPE_NAMES[ctx->proj_type]; + throw std::runtime_error(format("%s: don't support projector with: %s currently\n", __func__, proj_type.c_str())); +} + +int clip_is_minicpmv(const struct clip_ctx * ctx) { + if (ctx->has_minicpmv_projector) { + return ctx->minicpmv_version; + } + return 0; +} diff --git a/examples/omni-vlm/clip.h b/examples/omni-vlm/clip.h new file mode 100644 index 000000000..78588bdf1 --- /dev/null +++ b/examples/omni-vlm/clip.h @@ -0,0 +1,94 @@ +#ifndef CLIP_H +#define CLIP_H + +#include +#include + +#ifdef LLAMA_SHARED +# if defined(_WIN32) && !defined(__MINGW32__) +# ifdef LLAMA_BUILD +# define CLIP_API __declspec(dllexport) +# else +# define CLIP_API __declspec(dllimport) +# endif +# else +# define CLIP_API __attribute__ ((visibility ("default"))) +# endif +#else +# define CLIP_API +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +struct clip_ctx; + +struct clip_image_size { + int width; + int height; +}; + +struct clip_image_u8_batch { + struct clip_image_u8 * data; + size_t size; +}; + +struct clip_image_f32_batch { + struct clip_image_f32 * data; + size_t size; +}; + +CLIP_API struct clip_ctx * clip_model_load (const char * fname, int verbosity); +CLIP_API struct clip_ctx * clip_model_load_cpu(const char * fname, int verbosity); + +CLIP_API void clip_free(struct clip_ctx * ctx); + +CLIP_API size_t clip_embd_nbytes(const struct clip_ctx * ctx); + +CLIP_API int32_t clip_image_size (const struct clip_ctx * ctx); +CLIP_API int32_t clip_patch_size (const struct clip_ctx * ctx); +CLIP_API int32_t clip_hidden_size(const struct clip_ctx * ctx); + +// TODO: should be enum, not string +CLIP_API const char * clip_patch_merge_type(const struct clip_ctx * ctx); + +CLIP_API const int32_t * clip_image_grid(const struct clip_ctx * ctx); + +CLIP_API int clip_n_patches (const struct clip_ctx * ctx); +CLIP_API int clip_n_mmproj_embd(const struct clip_ctx * ctx); + +CLIP_API int clip_uhd_num_image_embeds_col(struct clip_ctx * ctx_clip); +CLIP_API void clip_add_load_image_size(struct clip_ctx * ctx_clip, struct clip_image_size * load_image_size); + +CLIP_API struct clip_image_size * clip_image_size_init(); +CLIP_API struct clip_image_u8 * clip_image_u8_init (); +CLIP_API struct clip_image_f32 * clip_image_f32_init(); + +CLIP_API void clip_image_u8_free (struct clip_image_u8 * img); +CLIP_API void clip_image_f32_free(struct clip_image_f32 * img); +CLIP_API void clip_image_u8_batch_free (struct clip_image_u8_batch * batch); +CLIP_API void clip_image_f32_batch_free(struct clip_image_f32_batch * batch); + +CLIP_API bool clip_image_load_from_file(const char * fname, struct clip_image_u8 * img); + +/** interpret bytes as an image file with length bytes_length, and use the result to populate img */ +CLIP_API bool clip_image_load_from_bytes(const unsigned char * bytes, size_t bytes_length, struct clip_image_u8 * img); + +/** preprocess img and store the result in res_imgs, pad_to_square may be overridden to false depending on model configuration */ +CLIP_API bool clip_image_preprocess(struct clip_ctx * ctx, const struct clip_image_u8 * img, struct clip_image_f32_batch * res_imgs ); + +CLIP_API struct ggml_tensor * clip_get_newline_tensor(const struct clip_ctx * ctx); + +CLIP_API bool clip_image_encode (struct clip_ctx * ctx, int n_threads, struct clip_image_f32 * img, float * vec); +CLIP_API bool clip_image_batch_encode(struct clip_ctx * ctx, int n_threads, const struct clip_image_f32_batch * imgs, float * vec); + +CLIP_API bool clip_model_quantize(const char * fname_inp, const char * fname_out, int itype); + +CLIP_API int clip_is_minicpmv(const struct clip_ctx * ctx); + +#ifdef __cplusplus +} +#endif + +#endif // CLIP_H diff --git a/examples/omni-vlm/convert_image_encoder_to_gguf.py b/examples/omni-vlm/convert_image_encoder_to_gguf.py new file mode 100644 index 000000000..9db385400 --- /dev/null +++ b/examples/omni-vlm/convert_image_encoder_to_gguf.py @@ -0,0 +1,208 @@ +import argparse +import os +import json +import re + +import torch +import numpy as np +from gguf import * +# from transformers import CLIPModel, CLIPProcessor, CLIPVisionModel + +VISION = "siglip.vision" + + +def k(raw_key: str, arch: str) -> str: + return raw_key.format(arch=arch) + + +def should_skip_tensor(name: str, has_text: bool, has_vision: bool, has_omni_vlm: bool) -> bool: + if name in ( + "logit_scale", + "text_model.embeddings.position_ids", + "vision_model.embeddings.position_ids", + ): + return True + + # if name.startswith("vision_model.post_layernorm") or name.startswith("vision_model.head"): + # return True + + if name.startswith("v") and not has_vision: + return True + + if name.startswith("t") and not has_text: + return True + + return False + + +def get_tensor_name(name: str) -> str: + if "projection" in name: + return name + if "multi_modal_projector" in name: + name = name.replace("multi_modal_projector", "mm") + name = re.sub(r'mm\.mlp\.mlp', 'mm.model.mlp', name, count=1) + name = re.sub(r'mm\.peg\.peg', 'mm.model.peg', name, count=1) + return name + + return name.replace("text_model", "t").replace("vision_model", "v").replace("encoder.layers", "blk").replace("embeddings.", "").replace("_proj", "").replace("self_attn.", "attn_").replace("layer_norm", "ln").replace("layernorm", "ln").replace("mlp.fc1", "ffn_down").replace("mlp.fc2", "ffn_up").replace("embedding", "embd").replace("final", "post").replace("layrnorm", "ln") + + +ap = argparse.ArgumentParser() +ap.add_argument("-m", "--model-dir", help="Path to model directory cloned from HF Hub", required=True) +ap.add_argument("-p", "--processor-dir", help="Path to vlm-processor directory cloned from HF Hub", required=True) +ap.add_argument("-o", "--output-dir", help="Directory to save GGUF files. Default is the original model directory", default=None) +ap.add_argument("--use-f32", action="store_true", default=False, help="Use f32 instead of f16") +# TODO: whether update this info? +# default_image_mean = [0.48145466, 0.4578275, 0.40821073] +# default_image_std = [0.26862954, 0.26130258, 0.27577711] +default_image_mean = [0.5, 0.5, 0.5] +default_image_std = [0.5, 0.5, 0.5] + +# with proper +args = ap.parse_args() + +if args.use_f32: + print("WARNING: Weights for the convolution op is always saved in f16, as the convolution op in GGML does not support 32-bit kernel weights yet.") + +# output in the same directory as the model if output_dir is None +dir_model = args.model_dir +dir_processor = args.processor_dir + +with open(dir_processor + "/preprocessor_config.json", "r", encoding="utf-8") as f: + hparams = json.load(f) + +# possible data types +# ftype == 0 -> float32 +# ftype == 1 -> float16 +# +# map from ftype to string +ftype_str = ["f32", "f16"] + +ftype = 1 +if args.use_f32: + ftype = 0 + +has_omni_vlm_projector = True +fname_middle = "mmproj-" +output_dir = args.output_dir if args.output_dir is not None else dir_model +os.makedirs(output_dir, exist_ok=True) + +fname_out = os.path.join(output_dir, f"{fname_middle}omni-vlm-{ftype_str[ftype]}.gguf") +fout = GGUFWriter(path=fname_out, arch="siglip") + +fout.add_bool("siglip.has_omni_vlm_projector", has_omni_vlm_projector) +fout.add_file_type(ftype) +fout.add_name("omni-vlm") +fout.add_description("image encoder for omni-vlm") + + +fout.add_uint32("siglip.vision.image_size", 384) +fout.add_uint32(k(KEY_EMBEDDING_LENGTH, VISION), 1152) +fout.add_uint32(k(KEY_ATTENTION_HEAD_COUNT, VISION), 16) #TODO: to be confirmed +fout.add_uint32("siglip.vision.patch_size", 14) +# block_count = (27 - 1) +block_count = 27 +fout.add_uint32(k(KEY_BLOCK_COUNT, VISION), block_count) + +fout.add_uint32(k(KEY_FEED_FORWARD_LENGTH, VISION), 4304) +fout.add_uint32("siglip.vision.projection_dim", 4096) + # fout.add_float32(k(KEY_ATTENTION_LAYERNORM_EPS, VISION), v_hparams["layer_norm_eps"]) + # if "image_grid_pinpoints" in v_hparams: + # # flatten it + # image_grid_pinpoints = [] + # for pinpoint in v_hparams["image_grid_pinpoints"]: + # for p in pinpoint: + # image_grid_pinpoints.append(p) +# fout.add_array("clip.vision.image_grid_pinpoints", image_grid_pinpoints) +# if "image_crop_resolution" in v_hparams: +# fout.add_uint32("clip.vision.image_crop_resolution", v_hparams["image_crop_resolution"]) +# if "image_aspect_ratio" in v_hparams: +# fout.add_string("clip.vision.image_aspect_ratio", v_hparams["image_aspect_ratio"]) +# if "image_split_resolution" in v_hparams: +# fout.add_uint32("clip.vision.image_split_resolution", v_hparams["image_split_resolution"]) +# if "mm_patch_merge_type" in v_hparams: +# fout.add_string("clip.vision.mm_patch_merge_type", v_hparams["mm_patch_merge_type"]) +# if "mm_projector_type" in v_hparams: +# fout.add_string("clip.vision.mm_projector_type", v_hparams["mm_projector_type"]) +# +# +# if processor is not None: +# image_mean = processor.image_processor.image_mean if args.image_mean is None or args.image_mean == default_image_mean else args.image_mean # pyright: ignore[reportAttributeAccessIssue] +# image_std = processor.image_processor.image_std if args.image_std is None or args.image_std == default_image_std else args.image_std # pyright: ignore[reportAttributeAccessIssue] +# else: +# image_mean = args.image_mean if args.image_mean is not None else default_image_mean +# image_std = args.image_std if args.image_std is not None else default_image_std +# fout.add_array("clip.vision.image_mean", image_mean) +# fout.add_array("clip.vision.image_std", image_std) +# +fout.add_array("siglip.vision.image_mean", default_image_mean) +fout.add_array("siglip.vision.image_std", default_image_std) + +# use_gelu = v_hparams["hidden_act"] == "gelu" +# fout.add_bool("clip.use_gelu", use_gelu) + +model = torch.load(os.path.join(dir_model, "omni_vlm.clip"), map_location='cpu') + # model.vision_model.encoder.layers.pop(-1) +projector = torch.load(os.path.join(dir_model, "omni_vlm.projector"), map_location='cpu') +for name, data in projector.items(): + name = get_tensor_name(name) + # pw and dw conv ndim==4 + if data.ndim == 2 or data.ndim == 4: + data = data.squeeze().cpu().numpy().astype(np.float16) + else: + data = data.squeeze().cpu().numpy().astype(np.float32) + + fout.add_tensor(name, data) + +print("Projector tensors added\n") + + +# state_dict = model.state_dict() +state_dict = dict(model) +for name, data in state_dict.items(): + if should_skip_tensor(name, False, True, True): + # we don't need this + print(f"skipping parameter: {name}") + continue + + # if name.startswith(f"vision_model.encoder.layers.{block_count}"): + # continue + + name = get_tensor_name(name) + # data = data.astype(np.float16) + # print(data) + data = data.squeeze().float().numpy() + + n_dims = len(data.shape) + + # ftype == 0 -> float32, ftype == 1 -> float16 + ftype_cur = 0 + if n_dims == 4: + print(f"tensor {name} is always saved in f16") + data = data.astype(np.float16) + ftype_cur = 1 + elif ftype == 1: + if name[-7:] == ".weight" and n_dims == 2: + print(" Converting to float16") + data = data.astype(np.float16) + ftype_cur = 1 + else: + print(" Converting to float32") + data = data.astype(np.float32) + ftype_cur = 0 + else: + if data.dtype != np.float32: + print(" Converting to float32") + data = data.astype(np.float32) + ftype_cur = 0 + + print(f"{name} - {ftype_str[ftype_cur]} - shape = {data.shape}") + fout.add_tensor(name, data) + + +fout.write_header_to_file() +fout.write_kv_data_to_file() +fout.write_tensors_to_file() +fout.close() + +print("Done. Output file: " + fname_out) diff --git a/examples/omni-vlm/omni-vlm-cli.cpp b/examples/omni-vlm/omni-vlm-cli.cpp new file mode 100644 index 000000000..68e833182 --- /dev/null +++ b/examples/omni-vlm/omni-vlm-cli.cpp @@ -0,0 +1,293 @@ +// #include "arg.h" +#include "base64.hpp" +#include "log.h" +#include "common.h" +#include "sampling.h" +#include "clip.h" +#include "omni-vlm.h" +#include "llama.h" +#include "ggml.h" + +#include +#include +#include +#include + +static bool eval_tokens(struct llama_context * ctx_llama, std::vector tokens, int n_batch, int * n_past) { + int N = (int) tokens.size(); + for (int i = 0; i < N; i += n_batch) { + int n_eval = (int) tokens.size() - i; + if (n_eval > n_batch) { + n_eval = n_batch; + } + if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval, *n_past, 0))) { + LOG_TEE("%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past); + return false; + } + *n_past += n_eval; + } + return true; +} + +static bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) { + std::vector tokens; + tokens.push_back(id); + return eval_tokens(ctx_llama, tokens, 1, n_past); +} + +static bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){ + std::string str2 = str; + std::vector embd_inp = ::llama_tokenize(ctx_llama, str2, add_bos, true); + eval_tokens(ctx_llama, embd_inp, n_batch, n_past); + return true; +} + +static const char * sample(struct llama_sampling_context * ctx_sampling, + struct llama_context * ctx_llama, + int * n_past) { + const llama_token id = llama_sampling_sample(ctx_sampling, ctx_llama, NULL); + llama_sampling_accept(ctx_sampling, ctx_llama, id, true); + static std::string ret; + if (llama_token_is_eog(llama_get_model(ctx_llama), id)) { + ret = ""; + } else { + ret = llama_token_to_piece(ctx_llama, id); + } + eval_id(ctx_llama, id, n_past); + return ret.c_str(); +} + +static const std::string IMG_PAD = "<|image_pad|>"; + +static void find_image_tag_in_prompt(const std::string& prompt, size_t& idx) { + // begin_out = prompt.find(IMG_BASE64_TAG_BEGIN); + // end_out = prompt.find(IMG_BASE64_TAG_END, (begin_out == std::string::npos) ? 0UL : begin_out); + idx = prompt.find(IMG_PAD); +} + +static bool prompt_contains_image(const std::string& prompt) { + size_t begin; + find_image_tag_in_prompt(prompt, begin); + return (begin != std::string::npos); +} + +// replaces the base64 image tag in the prompt with `replacement` +static omni_image_embed * omnivlm_image_embed_make_with_prompt(struct clip_ctx * ctx_clip, int n_threads, const std::string& prompt) { + size_t idx; + find_image_tag_in_prompt(prompt, idx); + if (idx == std::string::npos) { + LOG_TEE("%s: invalid base64 image tag. must be %s\n", __func__, IMG_PAD.c_str()); + return NULL; + } + + auto base64_str = prompt.substr(idx, IMG_PAD.size()); + + auto required_bytes = base64::required_encode_size(base64_str.size()); + auto img_bytes = std::vector(required_bytes); + base64::decode(base64_str.begin(), base64_str.end(), img_bytes.begin()); + + auto embed = omnivlm_image_embed_make_with_bytes(ctx_clip, n_threads, img_bytes.data(), img_bytes.size()); + if (!embed) { + LOG_TEE("%s: could not load image from base64 string.\n", __func__); + return NULL; + } + + return embed; +} + +static std::string remove_image_from_prompt(const std::string& prompt, const char * replacement = "") { + size_t begin; + find_image_tag_in_prompt(prompt, begin); + if (begin == std::string::npos) { + return prompt; + } + auto pre = prompt.substr(0, begin); + auto post = prompt.substr(begin + IMG_PAD.size()); + return pre + replacement + post; +} + +struct omnivlm_context { + struct clip_ctx * ctx_clip = NULL; + struct llama_context * ctx_llama = NULL; + struct llama_model * model = NULL; +}; + +static void print_usage(int argc, char ** argv, const gpt_params & params) { + gpt_params_print_usage(argc, argv, params); + + LOG_TEE("\n example usage:\n"); + LOG_TEE("\n %s -m --mmproj --image --image [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]); + LOG_TEE("\n note: a lower temperature value like 0.1 is recommended for better quality.\n"); +} + +static struct omni_image_embed * load_image(omnivlm_context * ctx_omnivlm, gpt_params * params, const std::string & fname) { + + // load and preprocess the image + omni_image_embed * embed = NULL; + embed = omnivlm_image_embed_make_with_filename(ctx_omnivlm->ctx_clip, params->n_threads, fname.c_str()); + if (!embed) { + fprintf(stderr, "%s: is %s really an image file?\n", __func__, fname.c_str()); + return NULL; + } + + return embed; +} + +static void process_prompt(struct omnivlm_context * ctx_omnivlm, struct omni_image_embed * image_embed, gpt_params * params, const std::string & prompt) { + int n_past = 0; + + const int max_tgt_len = params->n_predict < 0 ? 256 : params->n_predict; + + std::string system_prompt, user_prompt; + size_t image_pos = prompt.find("<|image_pad|>"); + // new templating mode: Provide the full prompt including system message and use as a placeholder for the image + system_prompt = prompt.substr(0, image_pos); + user_prompt = prompt.substr(image_pos + std::string("<|image_pad|>").length()); + if (params->verbose_prompt) { + auto tmp = ::llama_tokenize(ctx_omnivlm->ctx_llama, system_prompt, true, true); + for (int i = 0; i < (int) tmp.size(); i++) { + LOG_TEE("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx_omnivlm->ctx_llama, tmp[i]).c_str()); + } + } + LOG_TEE("user_prompt: %s\n", user_prompt.c_str()); + if (params->verbose_prompt) { + auto tmp = ::llama_tokenize(ctx_omnivlm->ctx_llama, user_prompt, true, true); + for (int i = 0; i < (int) tmp.size(); i++) { + LOG_TEE("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx_omnivlm->ctx_llama, tmp[i]).c_str()); + } + } + + eval_string(ctx_omnivlm->ctx_llama, system_prompt.c_str(), params->n_batch, &n_past, true); + omnivlm_eval_image_embed(ctx_omnivlm->ctx_llama, image_embed, params->n_batch, &n_past); + eval_string(ctx_omnivlm->ctx_llama, user_prompt.c_str(), params->n_batch, &n_past, false); + + // generate the response + + LOG("\n"); + + struct llama_sampling_context * ctx_sampling = llama_sampling_init(params->sparams); + if (!ctx_sampling) { + LOG_TEE("%s: failed to initialize sampling subsystem\n", __func__); + exit(1); + } + + std::string response = ""; + for (int i = 0; i < max_tgt_len; i++) { + const char * tmp = sample(ctx_sampling, ctx_omnivlm->ctx_llama, &n_past); + response += tmp; + if (strcmp(tmp, "<|im_end|>") == 0) break; + if (strcmp(tmp, "") == 0) break; + // if (strstr(tmp, "###")) break; // Yi-VL behavior + printf("%s", tmp); + // if (strstr(response.c_str(), "<|im_end|>")) break; // Yi-34B llava-1.6 - for some reason those decode not as the correct token (tokenizer works) + // if (strstr(response.c_str(), "<|im_start|>")) break; // Yi-34B llava-1.6 + // if (strstr(response.c_str(), "USER:")) break; // mistral llava-1.6 + + fflush(stdout); + } + + llama_sampling_free(ctx_sampling); + printf("\n"); +} + +static struct llama_model * omnivlm_init(gpt_params * params) { + llama_backend_init(); + llama_numa_init(params->numa); + + llama_model_params model_params = llama_model_params_from_gpt_params(*params); + + llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params); + if (model == NULL) { + LOG_TEE("%s: unable to load model\n" , __func__); + return NULL; + } + return model; +} + +static struct omnivlm_context * omnivlm_init_context(gpt_params * params, llama_model * model) { + const char * clip_path = params->mmproj.c_str(); + + auto prompt = params->prompt; + if (prompt.empty()) { + prompt = "describe the image in detail."; + } + + auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 10); + + + llama_context_params ctx_params = llama_context_params_from_gpt_params(*params); + ctx_params.n_ctx = params->n_ctx < 2048 ? 2048 : params->n_ctx; // we need a longer context size to process image embeddings + + llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params); + + if (ctx_llama == NULL) { + LOG_TEE("%s: failed to create the llama_context\n" , __func__); + return NULL; + } + + auto * ctx_omnivlm = (struct omnivlm_context *)malloc(sizeof(omnivlm_context)); + + ctx_omnivlm->ctx_llama = ctx_llama; + ctx_omnivlm->ctx_clip = ctx_clip; + ctx_omnivlm->model = model; + return ctx_omnivlm; +} + +static void omnivlm_free(struct omnivlm_context * ctx_omnivlm) { + if (ctx_omnivlm->ctx_clip) { + clip_free(ctx_omnivlm->ctx_clip); + ctx_omnivlm->ctx_clip = NULL; + } + + llama_free(ctx_omnivlm->ctx_llama); + llama_free_model(ctx_omnivlm->model); + llama_backend_free(); +} + +int main(int argc, char ** argv) { + ggml_time_init(); + + gpt_params params; + + // if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_LLAVA, print_usage)) { + // return 1; + // } + if (!gpt_params_parse(argc, argv, params)) { + print_usage(argc, argv, params); + return 1; + } + + if (params.mmproj.empty() || (params.image.empty() && !prompt_contains_image(params.prompt))) { + print_usage(argc, argv, {}); + return 1; + } + + params.prompt = "<|im_start|>system\nYou are Nano-Omni-VLM, created by Nexa AI. You are a helpful assistant.<|im_end|>\n<|im_start|>user\nDescribe this image for me\n<|vision_start|><|image_pad|><|vision_end|><|im_end|>"; + + auto * model = omnivlm_init(¶ms); + if (model == NULL) { + fprintf(stderr, "%s: error: failed to init omnivlm model\n", __func__); + return 1; + } + + + auto * ctx_omnivlm = omnivlm_init_context(¶ms, model); + for (auto & image : params.image) { + auto * image_embed = load_image(ctx_omnivlm, ¶ms, image); + if (!image_embed) { + LOG_TEE("%s: failed to load image %s. Terminating\n\n", __func__, image.c_str()); + return 1; + } + // process the prompt + process_prompt(ctx_omnivlm, image_embed, ¶ms, params.prompt); + + llama_print_timings(ctx_omnivlm->ctx_llama); + omnivlm_image_embed_free(image_embed); + } + ctx_omnivlm->model = NULL; + omnivlm_free(ctx_omnivlm); + + llama_free_model(model); + + return 0; +} diff --git a/examples/omni-vlm/omni-vlm-wrapper-cli.cpp b/examples/omni-vlm/omni-vlm-wrapper-cli.cpp new file mode 100644 index 000000000..731b7791e --- /dev/null +++ b/examples/omni-vlm/omni-vlm-wrapper-cli.cpp @@ -0,0 +1,16 @@ +// WARNING: this .cpp file is only for debugging. do not user directly. +#include "omni-vlm-wrapper.h" + +int main(int argc, char ** argv) { + const char* llm_model = ""; + const char* mmproj_model = ""; + const char* image_path = ""; + const char* prompt = ""; + + omnivlm_init(llm_model, mmproj_model); + omnivlm_inference(prompt, image_path); + omnivlm_inference(prompt, image_path); + omnivlm_free(); + + return 0; +} diff --git a/examples/omni-vlm/omni-vlm-wrapper.cpp b/examples/omni-vlm/omni-vlm-wrapper.cpp new file mode 100644 index 000000000..81178205e --- /dev/null +++ b/examples/omni-vlm/omni-vlm-wrapper.cpp @@ -0,0 +1,247 @@ +#include "base64.hpp" +#include "log.h" +#include "common.h" +#include "sampling.h" +#include "clip.h" +#include "omni-vlm.h" +#include "llama.h" +#include "ggml.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "omni-vlm-wrapper.h" + + +struct omnivlm_context { + struct clip_ctx * ctx_clip = NULL; + struct llama_context * ctx_llama = NULL; + struct llama_model * model = NULL; +}; + +static struct gpt_params params; +static struct llama_model* model; +static struct omnivlm_context* ctx_omnivlm; + +static struct omni_image_embed * load_image(omnivlm_context * ctx_omnivlm, gpt_params * params, const std::string & fname) { + + // load and preprocess the image + omni_image_embed * embed = NULL; + embed = omnivlm_image_embed_make_with_filename(ctx_omnivlm->ctx_clip, params->n_threads, fname.c_str()); + if (!embed) { + fprintf(stderr, "%s: is %s really an image file?\n", __func__, fname.c_str()); + return NULL; + } + + return embed; +} + +static struct llama_model * omnivlm_init(gpt_params * params) { + llama_backend_init(); + llama_numa_init(params->numa); + + llama_model_params model_params = llama_model_params_from_gpt_params(*params); + + llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params); + if (model == NULL) { + LOG_TEE("%s: unable to load model\n" , __func__); + return NULL; + } + return model; +} + +static struct omnivlm_context * omnivlm_init_context(gpt_params * params, llama_model * model) { + const char * clip_path = params->mmproj.c_str(); + + auto prompt = params->prompt; + if (prompt.empty()) { + prompt = "describe the image in detail."; + } + + auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 10); + + + llama_context_params ctx_params = llama_context_params_from_gpt_params(*params); + ctx_params.n_ctx = params->n_ctx < 2048 ? 2048 : params->n_ctx; // we need a longer context size to process image embeddings + + llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params); + + if (ctx_llama == NULL) { + LOG_TEE("%s: failed to create the llama_context\n" , __func__); + return NULL; + } + + ctx_omnivlm = (struct omnivlm_context *)malloc(sizeof(omnivlm_context)); + + ctx_omnivlm->ctx_llama = ctx_llama; + ctx_omnivlm->ctx_clip = ctx_clip; + ctx_omnivlm->model = model; + return ctx_omnivlm; +} + +static bool eval_tokens(struct llama_context * ctx_llama, std::vector tokens, int n_batch, int * n_past) { + int N = (int) tokens.size(); + for (int i = 0; i < N; i += n_batch) { + int n_eval = (int) tokens.size() - i; + if (n_eval > n_batch) { + n_eval = n_batch; + } + if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval, *n_past, 0))) { + LOG_TEE("%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past); + return false; + } + *n_past += n_eval; + } + return true; +} + +static bool eval_id(struct llama_context * ctx_llama, int id, int * n_past) { + std::vector tokens; + tokens.push_back(id); + return eval_tokens(ctx_llama, tokens, 1, n_past); +} + +static bool eval_string(struct llama_context * ctx_llama, const char* str, int n_batch, int * n_past, bool add_bos){ + std::string str2 = str; + std::vector embd_inp = ::llama_tokenize(ctx_llama, str2, add_bos, true); + eval_tokens(ctx_llama, embd_inp, n_batch, n_past); + return true; +} + +static const char * sample(struct llama_sampling_context * ctx_sampling, + struct llama_context * ctx_llama, + int * n_past) { + const llama_token id = llama_sampling_sample(ctx_sampling, ctx_llama, NULL); + llama_sampling_accept(ctx_sampling, ctx_llama, id, true); + static std::string ret; + if (llama_token_is_eog(llama_get_model(ctx_llama), id)) { + ret = ""; + } else { + ret = llama_token_to_piece(ctx_llama, id); + } + eval_id(ctx_llama, id, n_past); + return ret.c_str(); +} + +static void process_prompt(struct omnivlm_context * ctx_omnivlm, struct omni_image_embed * image_embed, gpt_params * params, const std::string & prompt) { + int n_past = 0; + + const int max_tgt_len = params->n_predict < 0 ? 256 : params->n_predict; + + std::string full_prompt = "<|im_start|>system\nYou are Nano-Omni-VLM, created by Nexa AI. You are a helpful assistant.<|im_end|>\n<|im_start|>user\n" \ + + prompt + "\n<|vision_start|><|image_pad|><|vision_end|><|im_end|>"; + size_t image_pos = full_prompt.find("<|image_pad|>"); + std::string system_prompt, user_prompt; + + // new templating mode: Provide the full prompt including system message and use as a placeholder for the image + system_prompt = full_prompt.substr(0, image_pos); + user_prompt = full_prompt.substr(image_pos + std::string("<|image_pad|>").length()); + if (params->verbose_prompt) { + auto tmp = ::llama_tokenize(ctx_omnivlm->ctx_llama, system_prompt, true, true); + for (int i = 0; i < (int) tmp.size(); i++) { + LOG_TEE("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx_omnivlm->ctx_llama, tmp[i]).c_str()); + } + } + // LOG_TEE("user_prompt: %s\n", user_prompt.c_str()); + if (params->verbose_prompt) { + auto tmp = ::llama_tokenize(ctx_omnivlm->ctx_llama, user_prompt, true, true); + for (int i = 0; i < (int) tmp.size(); i++) { + LOG_TEE("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx_omnivlm->ctx_llama, tmp[i]).c_str()); + } + } + + eval_string(ctx_omnivlm->ctx_llama, system_prompt.c_str(), params->n_batch, &n_past, true); + omnivlm_eval_image_embed(ctx_omnivlm->ctx_llama, image_embed, params->n_batch, &n_past); + eval_string(ctx_omnivlm->ctx_llama, user_prompt.c_str(), params->n_batch, &n_past, false); + + // generate the response + + LOG("\n"); + + struct llama_sampling_context * ctx_sampling = llama_sampling_init(params->sparams); + if (!ctx_sampling) { + LOG_TEE("%s: failed to initialize sampling subsystem\n", __func__); + exit(1); + } + + std::string response = ""; + for (int i = 0; i < max_tgt_len; i++) { + const char * tmp = sample(ctx_sampling, ctx_omnivlm->ctx_llama, &n_past); + response += tmp; + if (strcmp(tmp, "<|im_end|>") == 0) break; + if (strcmp(tmp, "") == 0) break; + // if (strstr(tmp, "###")) break; // Yi-VL behavior + printf("%s", tmp); + // if (strstr(response.c_str(), "<|im_end|>")) break; // Yi-34B llava-1.6 - for some reason those decode not as the correct token (tokenizer works) + // if (strstr(response.c_str(), "<|im_start|>")) break; // Yi-34B llava-1.6 + // if (strstr(response.c_str(), "USER:")) break; // mistral llava-1.6 + + fflush(stdout); + } + + llama_sampling_free(ctx_sampling); + printf("\n"); +} + +static void omnivlm_free(struct omnivlm_context * ctx_omnivlm) { + if (ctx_omnivlm->ctx_clip) { + clip_free(ctx_omnivlm->ctx_clip); + ctx_omnivlm->ctx_clip = NULL; + } + + llama_free(ctx_omnivlm->ctx_llama); + llama_free_model(ctx_omnivlm->model); + llama_backend_free(); +} + +static void print_usage(int argc, char ** argv, const gpt_params & params) { + gpt_params_print_usage(argc, argv, params); + + LOG_TEE("\n example usage:\n"); + LOG_TEE("\n %s -m --mmproj --image --image [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]); + LOG_TEE("\n note: a lower temperature value like 0.1 is recommended for better quality.\n"); +} + +// inference interface definition +void omnivlm_init(const char* llm_model_path, const char* projector_model_path) { + const char* argv = "hello-omni-vlm-wrapper-cli"; + char* nc_argv = const_cast(argv); + if (!gpt_params_parse(1, &nc_argv, params)) { + print_usage(1, &nc_argv, {}); + throw std::runtime_error("init params error."); + } + params.model = llm_model_path; + params.mmproj = projector_model_path; + model = omnivlm_init(¶ms); + if (model == nullptr) { + fprintf(stderr, "%s: error: failed to init omnivlm model\n", __func__); + throw std::runtime_error("Failed to init omnivlm model"); + } + ctx_omnivlm = omnivlm_init_context(¶ms, model); +} + +void omnivlm_inference(const char *prompt, const char *imag_path) { + std::string image = imag_path; + params.prompt = prompt; + auto * image_embed = load_image(ctx_omnivlm, ¶ms, image); + if (!image_embed) { + LOG_TEE("%s: failed to load image %s. Terminating\n\n", __func__, image.c_str()); + throw std::runtime_error("failed to load image " + image); + } + // process the prompt + process_prompt(ctx_omnivlm, image_embed, ¶ms, params.prompt); + + // llama_perf_print(ctx_omnivlm->ctx_llama, LLAMA_PERF_TYPE_CONTEXT); + omnivlm_image_embed_free(image_embed); +} + +void omnivlm_free() { + ctx_omnivlm->model = NULL; + omnivlm_free(ctx_omnivlm); + llama_free_model(model); +} diff --git a/examples/omni-vlm/omni-vlm-wrapper.h b/examples/omni-vlm/omni-vlm-wrapper.h new file mode 100644 index 000000000..4ab2c234c --- /dev/null +++ b/examples/omni-vlm/omni-vlm-wrapper.h @@ -0,0 +1,33 @@ + +#ifndef OMNIVLMWRAPPER_H +#define OMNIVLMWRAPPER_H + +#ifdef LLAMA_SHARED +# if defined(_WIN32) && !defined(__MINGW32__) +# ifdef LLAMA_BUILD +# define OMNIVLM_API __declspec(dllexport) +# else +# define OMNIVLM_API __declspec(dllimport) +# endif +# else +# define OMNIVLM_API __attribute__ ((visibility ("default"))) +# endif +#else +# define OMNIVLM_API +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +OMNIVLM_API void omnivlm_init(const char* llm_model_path, const char* projector_model_path); + +OMNIVLM_API void omnivlm_inference(const char* prompt, const char* imag_path); + +OMNIVLM_API void omnivlm_free(); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/examples/omni-vlm/omni-vlm.cpp b/examples/omni-vlm/omni-vlm.cpp new file mode 100644 index 000000000..539b300bf --- /dev/null +++ b/examples/omni-vlm/omni-vlm.cpp @@ -0,0 +1,539 @@ +#include "clip.h" +#include "omni-vlm.h" + +#include "llama.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#define die(msg) do { fputs("error: " msg "\n", stderr); exit(1); } while (0) +#define die_fmt(fmt, ...) do { fprintf(stderr, "error: " fmt "\n", __VA_ARGS__); exit(1); } while (0) + +// #define LOG_INF(...) do { fprintf(stdout, __VA_ARGS__); } while (0) +// #define LOG_WRN(...) do { fprintf(stderr, __VA_ARGS__); } while (0) +// #define LOG_ERR(...) do { fprintf(stderr, __VA_ARGS__); } while (0) +// #define LOG_DBG(...) do { fprintf(stdout, __VA_ARGS__); } while (0) + +// RGB uint8 image +struct clip_image_u8 { + int nx; + int ny; + + std::vector buf; +}; + +// RGB float32 image (NHWC) +// Memory layout: RGBRGBRGB... +struct clip_image_f32 { + int nx; + int ny; + + std::vector buf; +}; + +struct clip_image_grid_shape { + int first; + int second; +}; + +/** + * Selects the best resolution from a list of possible resolutions based on the original size. + * + * @param original_size The original size of the image in the format (width, height). + * @param possible_resolutions A list of possible resolutions in the format [(width1, height1), (width2, height2), ...]. + * @return The best fit resolution in the format (width, height). + */ +static std::pair select_best_resolution(const std::pair& original_size, const std::vector>& possible_resolutions) { + int original_width = original_size.first; + int original_height = original_size.second; + + std::pair best_fit; + int max_effective_resolution = 0; + int min_wasted_resolution = std::numeric_limits::max(); + + for (const auto& resolution : possible_resolutions) { + int width = resolution.first; + int height = resolution.second; + float scale = std::min(static_cast(width) / original_width, static_cast(height) / original_height); + int downscaled_width = static_cast(original_width * scale); + int downscaled_height = static_cast(original_height * scale); + int effective_resolution = std::min(downscaled_width * downscaled_height, original_width * original_height); + int wasted_resolution = (width * height) - effective_resolution; + // LOG_DBG("resolution: %d %d, scale: %f, downscaled: %d %d, effective: %d, wasted: %d\n", width, height, scale, downscaled_width, downscaled_height, effective_resolution, wasted_resolution); + if (effective_resolution > max_effective_resolution || (effective_resolution == max_effective_resolution && wasted_resolution < min_wasted_resolution)) { + max_effective_resolution = effective_resolution; + min_wasted_resolution = wasted_resolution; + best_fit = resolution; + } + } + + return best_fit; +} + +/** + * @brief Get the anyres image grid shape object + * + * @param image_size + * @param grid_pinpoints + * @param image_patch_size + * @return + */ +static struct clip_image_grid_shape get_anyres_image_grid_shape(const std::pair & image_size, const std::vector> & grid_pinpoints, int image_patch_size) { + /** + Conversion from gguf flat array to vector: + std::vector> possible_resolutions; + for (int i = 0; i < 32 && params.image_grid_pinpoints[i] != 0; i+=2) { + possible_resolutions.push_back({params.image_grid_pinpoints[i], params.image_grid_pinpoints[i+1]}); + } + */ + auto best_resolution = select_best_resolution(image_size, grid_pinpoints); + return {best_resolution.first / image_patch_size, best_resolution.second / image_patch_size}; +} + +// Take the image segments in a grid configuration and return the embeddings and the number of embeddings into preallocated memory (image_embd_out) +static bool clip_omnivlm_handle_patches(clip_ctx * ctx_clip, std::vector & image_embd_v, struct clip_image_grid_shape grid_shape, float * image_embd_out, int * n_img_pos_out) { + struct { + struct ggml_context * ctx; + } model; + + const int32_t image_size = clip_image_size(ctx_clip); + const int32_t patch_size = clip_patch_size(ctx_clip); + + int32_t num_patches_per_side = image_size / patch_size; // 336 / 14 = 24 - used for embedding-patching boxes (24*24 = 576 patches) + + int num_patches_width = grid_shape.first; // grid 1-4 + int num_patches_height = grid_shape.second; // grid 1-4 + + const size_t num_images = num_patches_width * num_patches_height + 1; + + // TODO: size calculation is not calculated - it's only tens of MB + size_t ctx_size = 0; + + { + ctx_size += clip_embd_nbytes(ctx_clip) * num_images * 8; // image_features + ctx_size += 1024*1024 * ggml_type_size(GGML_TYPE_F32); + } + + struct ggml_init_params params { + /*.mem_size =*/ ctx_size, + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ false, // NOTE: this should be false when using the legacy API + }; + + // Python reference code for full unpad: + /* + base_image_feature = image_feature[0] + image_feature = image_feature[1:] + image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous() + image_feature = image_feature.flatten(1, 2).flatten(2, 3) + image_feature = unpad_image(image_feature, image_sizes[image_idx]) + image_feature = torch.cat(( + image_feature, + self.model.image_newline[:, None, None].expand(*image_feature.shape[:-1], 1) + ), dim=-1) + image_feature = image_feature.flatten(1, 2).transpose(0, 1) + image_feature = torch.cat((base_image_feature, image_feature), dim=0) + */ + // We now have two options: unpad or no unpad. Unpad removes tokens for faster llm eval. + // In terms of result quality it appears to make no difference, so we'll start with the easier approach given 5D tensors are not supported in ggml yet. + // Without unpad we have to split the sub-image embeddings into patches of 24 features each and permute them. + // Once all images are processed to prepended the base_image_features without any changes. + + // Pytorch reference simplified, modified for ggml compatibility - confirmed identical output in python (for a 2x2 grid image (676x676 scaling)) + /* + image_feature = image_feature.view(2, 2, 24, 24, 4096) + image_feature = image_feature.permute(0, 2, 1, 3, 4).contiguous() + image_feature = image_feature.view(2, 24, 2, 24, 4096) + image_feature = image_feature.flatten(0, 3) + + // Reshape to 4D tensor by merging the last two dimensions + image_feature = image_feature.view(2, 2, 24, 24*4096) + image_feature = image_feature.permute(0, 2, 1, 3).contiguous() + image_feature = image_feature.view(-1, 4096) + */ + + model.ctx = ggml_init(params); + + struct ggml_tensor * image_features = ggml_new_tensor_3d(model.ctx, GGML_TYPE_F32, clip_n_mmproj_embd(ctx_clip), clip_n_patches(ctx_clip), num_images - 1); // example: 4096 x 576 x 4 + // ggml_tensor_printf(image_features,"image_features",__LINE__,false,false); + // fill it with the image embeddings, ignoring the base + for (size_t i = 1; i < num_images; i++) { + size_t offset = (i-1) * clip_embd_nbytes(ctx_clip); + memcpy((uint8_t *)(image_features->data) + offset, image_embd_v[i], clip_embd_nbytes(ctx_clip)); + } + + struct ggml_cgraph * gf = ggml_new_graph(model.ctx); + size_t size_ele = ggml_type_size(GGML_TYPE_F32); + + struct ggml_tensor *image_features_patchview = ggml_view_4d(model.ctx, image_features, + num_patches_per_side * clip_n_mmproj_embd(ctx_clip), + num_patches_per_side, + num_patches_width, + num_patches_height, + size_ele * num_patches_per_side * clip_n_mmproj_embd(ctx_clip), + size_ele * num_patches_per_side * clip_n_mmproj_embd(ctx_clip) * num_patches_per_side, + size_ele * num_patches_per_side * clip_n_mmproj_embd(ctx_clip) * num_patches_per_side * num_patches_width, 0); + // ggml_tensor_printf(image_features_patchview,"image_features_patchview",__LINE__,false,false); + struct ggml_tensor *permuted_cont = ggml_cont(model.ctx, ggml_permute(model.ctx, image_features_patchview, 0, 2, 1, 3)); + /** + At the end of each row we have to add the row_end embeddings, which are the same as the newline embeddings + image_feature = torch.cat(( + image_feature, + self.model.image_newline[:, None, None].expand(*image_feature.shape[:-1], 1).to(image_feature.device) + ), dim=-1) + * + */ + + // ggml_tensor_printf(permuted_cont,"permuted_cont",__LINE__,false,false); + struct ggml_tensor *flatten = ggml_view_2d(model.ctx, permuted_cont, clip_n_mmproj_embd(ctx_clip), num_patches_height * num_patches_width * num_patches_per_side * num_patches_per_side, size_ele * clip_n_mmproj_embd(ctx_clip), 0); + // ggml_tensor_printf(flatten,"flatten",__LINE__,false,false); + ggml_build_forward_expand(gf, flatten); + ggml_graph_compute_with_ctx(model.ctx, gf, 1); + struct ggml_tensor* result = gf->nodes[gf->n_nodes - 1]; + + memcpy(image_embd_out, image_embd_v[0], clip_embd_nbytes(ctx_clip)); // main image as global context + // append without newline tokens (default behavior in llava_arch when not using unpad ): + memcpy(image_embd_out + clip_n_patches(ctx_clip) * clip_n_mmproj_embd(ctx_clip), (float*)result->data, clip_embd_nbytes(ctx_clip) * (num_images-1)); // grid patches + *n_img_pos_out = static_cast(result->ne[1]+clip_n_patches(ctx_clip)); + + // Debug: Test single segments + // Current findings: sending base image, sending a segment embedding all works similar to python + // However, permuted embeddings do not work yet (stride issue?) + // memcpy(image_embd_out, image_embd_v[0], clip_embd_nbytes(ctx_clip)); // main image as context + // memcpy(image_embd_out, (float*)prepared_cont->data, clip_embd_nbytes(ctx_clip)); // main image as context + // *n_img_pos_out=576; + + ggml_free(model.ctx); + return true; +} + +static clip_image_f32 * only_v2_5_reshape_by_patch(clip_image_f32 * image, int patch_size) { + int width = image->nx; + int height = image->ny; + int num_patches = (height / patch_size) * (width / patch_size); + clip_image_f32 * patch = clip_image_f32_init(); + patch->nx = patch_size * num_patches; + patch->ny = patch_size; + patch->buf.resize(3 * patch->nx * patch->ny); + + int patch_index = 0; + + for (int i = 0; i < height; i += patch_size) { + for (int j = 0; j < width; j += patch_size) { + for (int pi = 0; pi < patch_size; ++pi) { + for (int pj = 0; pj < patch_size; ++pj) { + int input_index = ((i + pi) * width + (j + pj)) * 3; + int output_index = (pi * patch_size * num_patches + patch_index * patch_size + pj) * 3; + patch->buf[output_index] = image->buf[input_index]; + patch->buf[output_index+1] = image->buf[input_index+1]; + patch->buf[output_index+2] = image->buf[input_index+2]; + } + } + patch_index++; + } + } + return patch; +} + +static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float * image_embd, int * n_img_pos) { + // std::vector img_res_v; // format VectN x H x W x RGB (N x 336 x 336 x 3), so interleaved RGB - different to the python implementation which is N x 3 x 336 x 336 + clip_image_f32_batch img_res_v; + img_res_v.size = 0; + img_res_v.data = nullptr; + if (!clip_image_preprocess(ctx_clip, img, &img_res_v)) { + LOG_TEE("%s: unable to preprocess image\n", __func__); + delete[] img_res_v.data; + return false; + } + + const int64_t t_img_enc_start_us = ggml_time_us(); + + const char * mm_patch_merge_type = clip_patch_merge_type(ctx_clip); + + *n_img_pos = clip_n_patches(ctx_clip); + bool encoded = clip_image_encode(ctx_clip, n_threads, &img_res_v.data[0], image_embd); + // cout << "\t\t A NICE START" << endl; + // cout << "\t\t" << *n_img_pos << endl; + /* + if (clip_is_minicpmv(ctx_clip)) { + std::vector image_embd_v; + image_embd_v.resize(img_res_v.size); + struct clip_image_size * load_image_size = clip_image_size_init(); + for (size_t i = 0; i < img_res_v.size; i++) { + const int64_t t_img_enc_step_start_us = ggml_time_us(); + image_embd_v[i] = (float *)malloc(clip_embd_nbytes(ctx_clip)); + int patch_size=14; + load_image_size->width = img_res_v.data[i].nx; + load_image_size->height = img_res_v.data[i].ny; + clip_add_load_image_size(ctx_clip, load_image_size); + bool encoded = false; + int has_minicpmv_projector = clip_is_minicpmv(ctx_clip); + if (has_minicpmv_projector == 2) { + encoded = clip_image_encode(ctx_clip, n_threads, only_v2_5_reshape_by_patch(&img_res_v.data[i], patch_size), image_embd_v[i]); + } + else if (has_minicpmv_projector == 3) { + encoded = clip_image_encode(ctx_clip, n_threads, &img_res_v.data[i], image_embd_v[i]); + } + if (!encoded) { + LOG_ERR("Unable to encode image - spatial_unpad - subimage %d of %d\n", (int) i+1, (int) img_res_v.size); + return false; + } + const int64_t t_img_enc_steop_batch_us = ggml_time_us(); + LOG_INF("%s: step %d of %d encoded in %8.2f ms\n", __func__, (int)i+1, (int)img_res_v.size, (t_img_enc_steop_batch_us - t_img_enc_step_start_us) / 1000.0); + } + const int64_t t_img_enc_batch_us = ggml_time_us(); + LOG_INF("%s: all %d segments encoded in %8.2f ms\n", __func__, (int)img_res_v.size, (t_img_enc_batch_us - t_img_enc_start_us) / 1000.0); + + int n_img_pos_out = 0; + for (size_t i = 0; i < image_embd_v.size(); i++) { + std::memcpy(image_embd + n_img_pos_out * clip_n_mmproj_embd(ctx_clip), image_embd_v[i], clip_embd_nbytes(ctx_clip)); + n_img_pos_out += clip_n_patches(ctx_clip); + } + *n_img_pos = n_img_pos_out; + for (size_t i = 0; i < image_embd_v.size(); i++) { + free(image_embd_v[i]); + } + image_embd_v.clear(); + load_image_size->width = img->nx; + load_image_size->height = img->ny; + clip_add_load_image_size(ctx_clip, load_image_size); + LOG_INF("%s: load_image_size %d %d\n", __func__, load_image_size->width, load_image_size->height); + } + else if (strcmp(mm_patch_merge_type, "spatial_unpad") != 0) { + // flat / default llava-1.5 type embedding + *n_img_pos = clip_n_patches(ctx_clip); + bool encoded = clip_image_encode(ctx_clip, n_threads, &img_res_v.data[0], image_embd); // image_embd shape is 576 x 4096 + delete[] img_res_v.data; + if (!encoded) { + LOG_ERR("Unable to encode image\n"); + + return false; + } + } + else { + // spatial_unpad llava-1.6 type embedding + // TODO: CLIP needs batching support - in HF the llm projection is separate after encoding, which might be a solution to quickly get batching working + std::vector image_embd_v; + image_embd_v.resize(img_res_v.size); + for (size_t i = 0; i < img_res_v.size; i++) { + image_embd_v[i] = (float *)malloc(clip_embd_nbytes(ctx_clip)); // 576 patches * 4096 embeddings * 4 bytes = 9437184 + const bool encoded = clip_image_encode(ctx_clip, n_threads, &img_res_v.data[i], image_embd_v[i]); // image data is in 3x336x336 format and will be converted to 336x336x3 inside + if (!encoded) { + LOG_ERR("Unable to encode image - spatial_unpad - subimage %d of %d\n", (int) i+1, (int) img_res_v.size); + return false; + } + } + const int64_t t_img_enc_batch_us = ggml_time_us(); + LOG_INF("%s: %d segments encoded in %8.2f ms\n", __func__, (int)img_res_v.size, (t_img_enc_batch_us - t_img_enc_start_us) / 1000.0); + + const int32_t * image_grid = clip_image_grid(ctx_clip); + + std::vector> grid_pinpoints; + for (int i = 0; i < 32 && image_grid[i] != 0; i += 2) { + grid_pinpoints.push_back({image_grid[i], image_grid[i+1]}); + } + + // free all img_res_v - not needed anymore + delete[] img_res_v.data; + img_res_v.size = 0; + img_res_v.data = nullptr; + + const int32_t image_size = clip_image_size(ctx_clip); + + struct clip_image_grid_shape grid_shape = get_anyres_image_grid_shape({img->nx,img->ny}, grid_pinpoints, image_size); + + int n_img_pos_out; + clip_llava_handle_patches(ctx_clip, image_embd_v, grid_shape, image_embd, &n_img_pos_out); + *n_img_pos = n_img_pos_out; + + for (size_t i = 0; i < image_embd_v.size(); i++) { + free(image_embd_v[i]); + } + image_embd_v.clear(); + + // debug image/segment/normalization content: + // clip_image_u8 * tmp = clip_image_u8_init(); + // clip_image_convert_f32_to_u8(*image_feature, *tmp); + // clip_image_save_to_bmp(*tmp, "image_feature.bmp"); + } + */ + + LOG("%s: image embedding created: %d tokens\n", __func__, *n_img_pos); + + const int64_t t_img_enc_end_us = ggml_time_us(); + float t_img_enc_ms = (t_img_enc_end_us - t_img_enc_start_us) / 1000.0; + + LOG("\n%s: image encoded in %8.2f ms by CLIP (%8.2f ms per image patch)\n", __func__, t_img_enc_ms, t_img_enc_ms / *n_img_pos); + + return true; +} + +bool omnivlm_validate_embed_size(const llama_context * ctx_llama, const clip_ctx * ctx_clip) { + // make sure that the correct mmproj was used, i.e., compare apples to apples + int n_llama_embd = llama_n_embd(llama_get_model(ctx_llama)); + auto n_image_embd = clip_n_mmproj_embd(ctx_clip); + if (n_image_embd != n_llama_embd) { + LOG_TEE("%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you use the correct mmproj file.\n", __func__, n_image_embd, n_llama_embd); + return false; + } + return true; +} + +bool omnivlm_image_embed_make_with_clip_img(clip_ctx * ctx_clip, int n_threads, const clip_image_u8 * img, float ** image_embd_out, int * n_img_pos_out) { + int num_max_patches = 6; + if (clip_is_minicpmv(ctx_clip)) { + num_max_patches = 10; + } + + float * image_embd = (float *)malloc(clip_embd_nbytes(ctx_clip)*num_max_patches); // TODO: base on gridsize/llava model + if (!image_embd) { + LOG_TEE("Unable to allocate memory for image embeddings\n"); + return false; + } + + int n_img_pos; + if (!encode_image_with_clip(ctx_clip, n_threads, img, image_embd, &n_img_pos)) { + LOG_TEE("%s: cannot encode image, aborting\n", __func__); + free(image_embd); + return false; + } + *image_embd_out = image_embd; + *n_img_pos_out = n_img_pos; + + return true; +} + +struct omnivlm_embd_batch { + std::vector pos; + std::vector n_seq_id; + std::vector seq_id_0; + std::vector seq_ids; + std::vector logits; + llama_batch batch; + omnivlm_embd_batch(float * embd, int32_t n_tokens, llama_pos pos_0, llama_seq_id seq_id) { + pos .resize(n_tokens); + n_seq_id.resize(n_tokens); + seq_ids .resize(n_tokens + 1); + logits .resize(n_tokens); + seq_id_0.resize(1); + seq_id_0[0] = seq_id; + seq_ids [n_tokens] = nullptr; + batch = { + /*n_tokens =*/ n_tokens, + /*tokens =*/ nullptr, + /*embd =*/ embd, + /*pos =*/ pos.data(), + /*n_seq_id =*/ n_seq_id.data(), + /*seq_id =*/ seq_ids.data(), + /*logits =*/ logits.data(), + }; + for (int i = 0; i < n_tokens; i++) { + batch.pos [i] = pos_0 + i; + batch.n_seq_id[i] = 1; + batch.seq_id [i] = seq_id_0.data(); + batch.logits [i] = false; + } + } +}; + +bool omnivlm_eval_image_embed(llama_context * ctx_llama, const struct omni_image_embed * image_embed, int n_batch, int * n_past) { + int n_embd = llama_n_embd(llama_get_model(ctx_llama)); + + for (int i = 0; i < image_embed->n_image_pos; i += n_batch) { + int n_eval = image_embed->n_image_pos - i; + if (n_eval > n_batch) { + n_eval = n_batch; + } + float * embd = image_embed->embed+i*n_embd; + llama_batch batch = {int32_t(n_eval), nullptr, embd, nullptr, nullptr, nullptr, nullptr, *n_past, 1, 0, }; + if (llama_decode(ctx_llama, batch)) { + LOG_TEE("%s : failed to eval\n", __func__); + return false; + } + *n_past += n_eval; + } + return true; +} + +struct omni_image_embed * omnivlm_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length) { + clip_image_u8 * img = clip_image_u8_init(); + if (!clip_image_load_from_bytes(image_bytes, image_bytes_length, img)) { + clip_image_u8_free(img); + LOG_TEE("%s: can't load image from bytes, is it a valid image?", __func__); + return NULL; + } + + float* image_embed = NULL; + int n_image_pos = 0; + bool image_embed_result = omnivlm_image_embed_make_with_clip_img(ctx_clip, n_threads, img, &image_embed, &n_image_pos); + if (!image_embed_result) { + clip_image_u8_free(img); + LOG_TEE("%s: couldn't embed the image\n", __func__); + return NULL; + } + + clip_image_u8_free(img); + auto result = (omni_image_embed*)malloc(sizeof(omni_image_embed)); + result->embed = image_embed; + result->n_image_pos = n_image_pos; + return result; +} + +static bool load_file_to_bytes(const char* path, unsigned char** bytesOut, long *sizeOut) { + auto* file = fopen(path, "rb"); + if (file == NULL) { + LOG_TEE("%s: can't read file %s\n", __func__, path); + return false; + } + + fseek(file, 0, SEEK_END); + auto fileSize = ftell(file); + fseek(file, 0, SEEK_SET); + + auto buffer = (unsigned char *)malloc(fileSize); // Allocate memory to hold the file data + if (buffer == NULL) { + LOG_TEE("%s: failed to alloc %ld bytes for file %s\n", __func__, fileSize, path); + perror("Memory allocation error"); + fclose(file); + return false; + } + errno = 0; + size_t ret = fread(buffer, 1, fileSize, file); // Read the file into the buffer + if (ferror(file)) { + die_fmt("read error: %s", strerror(errno)); + } + if (ret != (size_t) fileSize) { + die("unexpectedly reached end of file"); + } + fclose(file); // Close the file + + *bytesOut = buffer; + *sizeOut = fileSize; + return true; +} + +struct omni_image_embed * omnivlm_image_embed_make_with_filename(struct clip_ctx * ctx_clip, int n_threads, const char * image_path) { + unsigned char* image_bytes; + long image_bytes_length; + auto loaded = load_file_to_bytes(image_path, &image_bytes, &image_bytes_length); + if (!loaded) { + LOG_TEE("%s: failed to load %s\n", __func__, image_path); + return NULL; + } + + omni_image_embed *embed = omnivlm_image_embed_make_with_bytes(ctx_clip, n_threads, image_bytes, image_bytes_length); + free(image_bytes); + + return embed; +} + +void omnivlm_image_embed_free(struct omni_image_embed * embed) { + free(embed->embed); + free(embed); +} diff --git a/examples/omni-vlm/omni-vlm.h b/examples/omni-vlm/omni-vlm.h new file mode 100644 index 000000000..90b20c651 --- /dev/null +++ b/examples/omni-vlm/omni-vlm.h @@ -0,0 +1,46 @@ +#ifndef OMNIVLM_H +#define OMNIVLM_H + +#ifdef LLAMA_SHARED +# if defined(_WIN32) && !defined(__MINGW32__) +# ifdef LLAMA_BUILD +# define OMNIVLM_API __declspec(dllexport) +# else +# define OMNIVLM_API __declspec(dllimport) +# endif +# else +# define OMNIVLM_API __attribute__ ((visibility ("default"))) +# endif +#else +# define OMNIVLM_API +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +struct clip_ctx; +struct omni_image_embed { + float * embed; + int n_image_pos; +}; + +OMNIVLM_API bool omnivlm_validate_embed_size(const struct llama_context * ctx_llama, const struct clip_ctx * ctx_clip); + +OMNIVLM_API bool omnivlm_image_embed_make_with_clip_img(struct clip_ctx * ctx_clip, int n_threads, const struct clip_image_u8 * img, float ** image_embd_out, int * n_img_pos_out); + +/** build an image embed from image file bytes */ +OMNIVLM_API struct omni_image_embed * omnivlm_image_embed_make_with_bytes(struct clip_ctx * ctx_clip, int n_threads, const unsigned char * image_bytes, int image_bytes_length); +/** build an image embed from a path to an image filename */ +OMNIVLM_API struct omni_image_embed * omnivlm_image_embed_make_with_filename(struct clip_ctx * ctx_clip, int n_threads, const char * image_path); +/** free an embedding made with OMNIVLM_image_embed_make_* */ +OMNIVLM_API void omnivlm_image_embed_free(struct omni_image_embed * embed); + +/** write the image represented by embed into the llama context with batch size n_batch, starting at context pos n_past. on completion, n_past points to the next position in the context after the image embed. */ +OMNIVLM_API bool omnivlm_eval_image_embed(struct llama_context * ctx_llama, const struct omni_image_embed * embed, int n_batch, int * n_past); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/examples/omni-vlm/omni_vlm_cpp.py b/examples/omni-vlm/omni_vlm_cpp.py new file mode 100644 index 000000000..6f23f7c4c --- /dev/null +++ b/examples/omni-vlm/omni_vlm_cpp.py @@ -0,0 +1,84 @@ +import ctypes +import os +import sys +from pathlib import Path + + +# Load the library +def _load_shared_library(lib_base_name: str, base_path: Path = None): + # Determine the file extension based on the platform + if sys.platform.startswith("linux"): + lib_ext = ".so" + elif sys.platform == "darwin": + lib_ext = ".dylib" + elif sys.platform == "win32": + lib_ext = ".dll" + else: + raise RuntimeError("Unsupported platform") + + # Construct the paths to the possible shared library names + if base_path is None: + _base_path = Path(__file__).parent.parent.resolve() + else: + print(f"Using base path: {base_path}") + _base_path = base_path + _lib_paths = [ + _base_path / f"lib{lib_base_name}{lib_ext}", + _base_path / f"{lib_base_name}{lib_ext}", + ] + + # Add the library directory to the DLL search path on Windows (if needed) + if sys.platform == "win32" and sys.version_info >= (3, 8): + os.add_dll_directory(str(_base_path)) + + # Try to load the shared library, handling potential errors + for _lib_path in _lib_paths: + print(f"Trying to load shared library '{_lib_path}'") + if _lib_path.exists(): + try: + return ctypes.CDLL(str(_lib_path)) + except Exception as e: + print(f"Failed to load shared library '{_lib_path}': {e}") + + raise FileNotFoundError( + f"Shared library with base name '{lib_base_name}' not found" + ) + + +# Specify the base name of the shared library to load +_lib_base_name = "omni_vlm_wrapper_shared" +base_path = ( + Path(__file__).parent.parent.parent.resolve() + / "build" + / "examples" + / "omni-vlm" +) + +# Load the library +_lib = _load_shared_library(_lib_base_name, base_path) + +omni_char_p = ctypes.c_char_p + + +def omnivlm_init(llm_model_path: omni_char_p, mmproj_model_path: omni_char_p): + return _lib.omnivlm_init(llm_model_path, mmproj_model_path) + + +_lib.omnivlm_init.argtypes = [omni_char_p, omni_char_p] +_lib.omnivlm_init.restype = None + + +def omnivlm_inference(prompt: omni_char_p, image_path: omni_char_p): + return _lib.omnivlm_inference(prompt, image_path) + + +_lib.omnivlm_inference.argtypes = [omni_char_p, omni_char_p] +_lib.omnivlm_inference.restype = None + + +def omnivlm_free(): + return _lib.omnivlm_free() + + +_lib.omnivlm_free.argtypes = [] +_lib.omnivlm_free.restype = None diff --git a/examples/omni-vlm/omni_vlm_demo.py b/examples/omni-vlm/omni_vlm_demo.py new file mode 100644 index 000000000..4f8c5998f --- /dev/null +++ b/examples/omni-vlm/omni_vlm_demo.py @@ -0,0 +1,55 @@ + +import ctypes +import logging +import os + +import omni_vlm_cpp + + +class NexaOmniVlmInference: + """ + A class used for vision language model inference. + """ + + def __init__(self, llm_model_path: str, mmproj_model_path: str): + self.llm_model = ctypes.c_char_p(llm_model_path.encode("utf-8")) + self.mmproj_model = ctypes.c_char_p(mmproj_model_path.encode("utf-8")) + + omni_vlm_cpp.omnivlm_init(self.llm_model, self.mmproj_model) + + def inference(self, prompt: str, image_path: str): + prompt = ctypes.c_char_p(prompt.encode("utf-8")) + image_path = ctypes.c_char_p(image_path.encode("utf-8")) + omni_vlm_cpp.omnivlm_inference(prompt, image_path) + + def __del__(self): + omni_vlm_cpp.omnivlm_free() + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser( + description="Run omni vision language model generation" + ) + parser.add_argument("--model", type=str, help="Path to the llm model file") + parser.add_argument("--mmproj", type=str, help="Path to the mmproj file") + # parser.add_argument("--prompt", type=str, help="prompt string.") + # parser.add_argument("--image-path", type=str, help="Path to the image.") + + args = parser.parse_args() + + omni_vlm_obj = NexaOmniVlmInference(args.model, args.mmproj) + # omni_vlm_obj.inference(args.prompt, args.image_path) + while True: + print("Input your prompt:") + prompt = input() + if prompt == "": + print("ERROR: you input an empty prompt, try again.") + continue + print("Input your image path:") + image_path = input() + while not os.path.exists(image_path): + print("ERROR: can not find image in your input path, please check and input agian.") + image_path = input() + omni_vlm_obj.inference(prompt, image_path) diff --git a/examples/omni-vlm/omni_vlm_surgery.py b/examples/omni-vlm/omni_vlm_surgery.py new file mode 100644 index 000000000..a0cac5616 --- /dev/null +++ b/examples/omni-vlm/omni_vlm_surgery.py @@ -0,0 +1,161 @@ +import argparse +import glob +import os +import torch +from safetensors import safe_open +from safetensors.torch import save_file +from typing import Any, ContextManager, cast + +# Function to determine if file is a SafeTensor file +def is_safetensor_file(file_path): + return file_path.endswith('.safetensors') + + +# Unified loading function +def load_model(file_path): + if is_safetensor_file(file_path): + tensors = {} + with cast(ContextManager[Any], safe_open(file_path, framework="pt", device="cpu")) as f: + for key in f.keys(): + tensors[key] = f.get_tensor(key).clone() + # output shape + print(f"{key} : {tensors[key].shape}") + return tensors, 'safetensor' + else: + return torch.load(file_path, map_location=torch.device('cpu')), 'pytorch' + + +# Unified saving function +def save_model(model, file_path, file_type): + if file_type == 'safetensor': + # safe_save(model, file_path) + save_file(model, file_path) + else: + torch.save(model, file_path) + + +# Adapted function to clean vision tower from checkpoint +def clean_vision_tower_from_checkpoint(checkpoint_path): + checkpoint, file_type = load_model(checkpoint_path) + # file_type = 'pytorch' + model_path = os.path.dirname(checkpoint_path) + print(f"Searching for vision tower tensors in {checkpoint_path}") + clip_tensors = [k for k, v in checkpoint.items() if k.startswith("vision_tower.vision_model")] + + if len(clip_tensors) > 0: + print(f"Found {len(clip_tensors)} tensors to extract from {checkpoint_path}") + # Adapted for file type + clip_path = os.path.join(model_path, "omni_vlm.clip") + + if os.path.exists(clip_path): + print(f"Loading existing omni_vlm.clip from {clip_path}") + existing_clip, _ = load_model(clip_path) + else: + print(f"Creating new omni_vlm.clip at {clip_path}") + existing_clip = {} + # Update existing_clip with new tensors, avoid duplicates + for name in clip_tensors: + simple_name = name[name.index('vision_model.'):] if 'vision_model.' in name else name + print(f"Adding {simple_name} to omni_vlm.clip") + if simple_name not in existing_clip: + existing_clip[simple_name] = checkpoint[name] + + # Save the updated clip tensors back to omni_vlm.clip + save_model(existing_clip, clip_path, 'pytorch') + + # Remove the tensors from the original checkpoint + for name in clip_tensors: + del checkpoint[name] + + checkpoint_path = checkpoint_path + return True + return False + +def find_relevant_checkpoints(checkpoint_paths, newline_criteria, projector): + newline_checkpoint_path = None + projector_checkpoint_path = None + + for path in checkpoint_paths: + checkpoint, _ = load_model(path) + if newline_criteria(checkpoint) and newline_checkpoint_path is None: + newline_checkpoint_path = path + if projector(checkpoint): + projector_checkpoint_path = path + + return newline_checkpoint_path, projector_checkpoint_path + +def newline_criteria(checkpoint): + return any(k.startswith("model.image_newline") for k in checkpoint.keys()) + +def proj_criteria(checkpoint): + # return any(k.startswith("multi_modal_projector.") or k.startswith("vision_proj.") for k in checkpoint.keys()) + return any(k.startswith("multi_modal_projector.") for k in checkpoint.keys()) + + +# Command-line interface setup +ap = argparse.ArgumentParser() +ap.add_argument("-m", "--model", required=True, help="Path to omni-vlm model") +ap.add_argument("-C", "--clean-vision-tower", action="store_true", help="Remove any vision tower from the model files") +args = ap.parse_args() + +if args.clean_vision_tower: + # Generalized to handle both PyTorch and SafeTensors models + model_files = sorted(glob.glob(f"{args.model}/*"), key=os.path.getmtime, reverse=True) + # checkpoint_paths = [path for path in model_files if (path.endswith('.bin') and path.startswith('pytorch')) or (path.endswith('.safetensors') and path.startswith('model'))] + checkpoint_paths = [path for path in model_files if (path.endswith('.bin') and 'pytorch' in path.split('/')[-1].split('\\')[-1]) or (path.endswith('.safetensors') and 'model' in path.split('/')[-1].split('\\')[-1])] + for projector_checkpoint_path in checkpoint_paths: + print(f"Cleaning {projector_checkpoint_path}") + if not clean_vision_tower_from_checkpoint(projector_checkpoint_path): + print(f"No vision tower found in {projector_checkpoint_path}") + # we break once none is found, so far all models append them at the end + # break + print("Done! All vision tower tensors are removed from the model files and stored in omni_vlm.clip file.") + +# Now we look for the projector in the last checkpoint +model_files = sorted(glob.glob(f"{args.model}/*"), key=os.path.getmtime, reverse=True) +checkpoint_paths = [path for path in model_files if (path.endswith('.bin') and 'pytorch' in path.split('/')[-1].split('\\')[-1]) or (path.endswith('.safetensors') and 'model' in path.split('/')[-1].split('\\')[-1])] +# last_checkpoint_path = checkpoint_paths[0] +# first_checkpoint_path = checkpoint_paths[-1] +newline_checkpoint_path, projector_checkpoint_path = find_relevant_checkpoints(checkpoint_paths, newline_criteria, proj_criteria) + +print(f"Taking projector from {projector_checkpoint_path}") +first_mm_tensors = [] +first_checkpoint = None +if newline_checkpoint_path is not None: + print(f"Taking newline from {newline_checkpoint_path}") + first_checkpoint, file_type = load_model(newline_checkpoint_path) + first_mm_tensors = [k for k, v in first_checkpoint.items() if k.startswith("model.image_newline")] + +# Load the checkpoint +mm_tensors = [] +last_checkpoint = None +if projector_checkpoint_path is not None: + last_checkpoint, file_type = load_model(projector_checkpoint_path) + # mm_tensors = [k for k, v in last_checkpoint.items() if k.startswith("multi_modal_projector.") or k.startswith("vision_proj.")] + mm_tensors = [k for k, v in last_checkpoint.items() if k.startswith("multi_modal_projector.")] + +if len(mm_tensors) == 0: + if last_checkpoint is not None: + for k, v in last_checkpoint.items(): + print(k) + print(f"Found {len(mm_tensors)} tensors to extract out of {len(last_checkpoint) if last_checkpoint is not None else 0} tensors.") + print("No tensors found. Is this omni-vlm model?") + exit() + +print(f"Found {len(mm_tensors)} tensors to extract.") +print(f"Found additional {len(first_mm_tensors)} tensors to extract.") +# projector = {name: checkpoint.[name].float() for name in mm_tensors} +projector = {} +for name in mm_tensors: + assert last_checkpoint is not None + projector[name] = last_checkpoint[name].float() +for name in first_mm_tensors: + assert first_checkpoint is not None + projector[name] = first_checkpoint[name].float() + +if len(projector) > 0: + save_model(projector, f"{args.model}/omni_vlm.projector", 'pytorch') + +print("Done!") +print(f"Now you can convert {args.model} to a a regular LLaMA GGUF file.") +print(f"Also, use {args.model}/omni_vlm.projector to prepare a omni-vlm-encoder.gguf file.") diff --git a/examples/omni-vlm/requirements.txt b/examples/omni-vlm/requirements.txt new file mode 100644 index 000000000..40266eddf --- /dev/null +++ b/examples/omni-vlm/requirements.txt @@ -0,0 +1,5 @@ +-r ../../requirements/requirements-convert_legacy_llama.txt +# --extra-index-url https://download.pytorch.org/whl/cpu +pillow +torch +torchvision