diff --git a/.devops/main-intel.Dockerfile b/.devops/main-intel.Dockerfile new file mode 100644 index 000000000..572e5d8ea --- /dev/null +++ b/.devops/main-intel.Dockerfile @@ -0,0 +1,28 @@ +ARG ONEAPI_VERSION=2024.0.1-devel-ubuntu22.04 + +FROM intel/oneapi-basekit:$ONEAPI_VERSION as build + +ARG LLAMA_SYCL_F16=OFF +RUN apt-get update && \ + apt-get install -y git + +WORKDIR /app + +COPY . . + +RUN mkdir build && \ + cd build && \ + if [ "${LLAMA_SYCL_F16}" = "ON" ]; then \ + echo "LLAMA_SYCL_F16 is set" && \ + export OPT_SYCL_F16="-DLLAMA_SYCL_F16=ON"; \ + fi && \ + cmake .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ${OPT_SYCL_F16} && \ + cmake --build . --config Release --target main + +FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime + +COPY --from=build /app/build/bin/main /main + +ENV LC_ALL=C.utf8 + +ENTRYPOINT [ "/main" ] diff --git a/.devops/main-vulkan.Dockerfile b/.devops/main-vulkan.Dockerfile new file mode 100644 index 000000000..bca460365 --- /dev/null +++ b/.devops/main-vulkan.Dockerfile @@ -0,0 +1,29 @@ +ARG UBUNTU_VERSION=jammy + +FROM ubuntu:$UBUNTU_VERSION as build + +# Install build tools +RUN apt update && apt install -y git build-essential cmake wget + +# Install Vulkan SDK +RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \ + wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \ + apt update -y && \ + apt-get install -y vulkan-sdk + +# Build it +WORKDIR /app +COPY . . +RUN mkdir build && \ + cd build && \ + cmake .. -DLLAMA_VULKAN=1 && \ + cmake --build . --config Release --target main + +# Clean up +WORKDIR / +RUN cp /app/build/bin/main /main && \ + rm -rf /app + +ENV LC_ALL=C.utf8 + +ENTRYPOINT [ "/main" ] diff --git a/.devops/nix/nixpkgs-instances.nix b/.devops/nix/nixpkgs-instances.nix index 6e9872b28..4a2f81c4b 100644 --- a/.devops/nix/nixpkgs-instances.nix +++ b/.devops/nix/nixpkgs-instances.nix @@ -7,6 +7,18 @@ { system, ... }: { _module.args = { + # Note: bringing up https://zimbatm.com/notes/1000-instances-of-nixpkgs + # again, the below creates several nixpkgs instances which the + # flake-centric CLI will be forced to evaluate e.g. on `nix flake show`. + # + # This is currently "slow" and "expensive", on a certain scale. + # This also isn't "right" in that this hinders dependency injection at + # the level of flake inputs. This might get removed in the foreseeable + # future. + # + # Note that you can use these expressions without Nix + # (`pkgs.callPackage ./devops/nix/scope.nix { }` is the entry point). + pkgsCuda = import inputs.nixpkgs { inherit system; # Ensure dependencies use CUDA consistently (e.g. that openmpi, ucc, diff --git a/.devops/nix/package.nix b/.devops/nix/package.nix index 43bdbd755..ad23f7dd7 100644 --- a/.devops/nix/package.nix +++ b/.devops/nix/package.nix @@ -13,18 +13,22 @@ cudaPackages, darwin, rocmPackages, + vulkan-headers, + vulkan-loader, clblast, useBlas ? builtins.all (x: !x) [ useCuda useMetalKit useOpenCL useRocm + useVulkan ], useCuda ? config.cudaSupport, useMetalKit ? stdenv.isAarch64 && stdenv.isDarwin && !useOpenCL, useMpi ? false, # Increases the runtime closure size by ~700M useOpenCL ? false, useRocm ? config.rocmSupport, + useVulkan ? false, llamaVersion ? "0.0.0", # Arbitrary version, substituted by the flake }@inputs: @@ -48,7 +52,8 @@ let ++ lib.optionals useMetalKit [ "MetalKit" ] ++ lib.optionals useMpi [ "MPI" ] ++ lib.optionals useOpenCL [ "OpenCL" ] - ++ lib.optionals useRocm [ "ROCm" ]; + ++ lib.optionals useRocm [ "ROCm" ] + ++ lib.optionals useVulkan [ "Vulkan" ]; pnameSuffix = strings.optionalString (suffices != [ ]) @@ -73,6 +78,7 @@ let ps: [ ps.numpy ps.sentencepiece + ps.tiktoken ps.torchWithoutCuda ps.transformers ] @@ -107,6 +113,11 @@ let hipblas rocblas ]; + + vulkanBuildInputs = [ + vulkan-headers + vulkan-loader + ]; in effectiveStdenv.mkDerivation ( @@ -114,14 +125,22 @@ effectiveStdenv.mkDerivation ( pname = "llama-cpp${pnameSuffix}"; version = llamaVersion; + # Note: none of the files discarded here are visible in the sandbox or + # affect the output hash. This also means they can be modified without + # triggering a rebuild. src = lib.cleanSourceWith { filter = name: type: - !(builtins.any (_: _) [ + let + noneOf = builtins.all (x: !x); + baseName = baseNameOf name; + in + noneOf [ (lib.hasSuffix ".nix" name) # Ignore *.nix files when computing outPaths - (name == "README.md") # Ignore *.md changes whe computing outPaths - (lib.hasPrefix "." name) # Skip hidden files and directories - ]); + (lib.hasSuffix ".md" name) # Ignore *.md changes whe computing outPaths + (lib.hasPrefix "." baseName) # Skip hidden files and directories + (baseName == "flake.lock") + ]; src = lib.cleanSource ../../.; }; @@ -155,11 +174,12 @@ effectiveStdenv.mkDerivation ( ++ optionals useCuda cudaBuildInputs ++ optionals useMpi [ mpi ] ++ optionals useOpenCL [ clblast ] - ++ optionals useRocm rocmBuildInputs; + ++ optionals useRocm rocmBuildInputs + ++ optionals useVulkan vulkanBuildInputs; cmakeFlags = [ - (cmakeBool "LLAMA_NATIVE" true) + (cmakeBool "LLAMA_NATIVE" false) (cmakeBool "LLAMA_BUILD_SERVER" true) (cmakeBool "BUILD_SHARED_LIBS" true) (cmakeBool "CMAKE_SKIP_BUILD_RPATH" true) @@ -169,6 +189,7 @@ effectiveStdenv.mkDerivation ( (cmakeBool "LLAMA_HIPBLAS" useRocm) (cmakeBool "LLAMA_METAL" useMetalKit) (cmakeBool "LLAMA_MPI" useMpi) + (cmakeBool "LLAMA_VULKAN" useVulkan) ] ++ optionals useCuda [ ( @@ -209,6 +230,7 @@ effectiveStdenv.mkDerivation ( useMpi useOpenCL useRocm + useVulkan ; shell = mkShell { @@ -216,6 +238,9 @@ effectiveStdenv.mkDerivation ( description = "contains numpy and sentencepiece"; buildInputs = [ llama-python ]; inputsFrom = [ finalAttrs.finalPackage ]; + shellHook = '' + addToSearchPath "LD_LIBRARY_PATH" "${lib.getLib effectiveStdenv.cc.cc}/lib" + ''; }; shell-extra = mkShell { @@ -230,11 +255,11 @@ effectiveStdenv.mkDerivation ( # Configurations we don't want even the CI to evaluate. Results in the # "unsupported platform" messages. This is mostly a no-op, because # cudaPackages would've refused to evaluate anyway. - badPlatforms = optionals (useCuda || useOpenCL) lib.platforms.darwin; + badPlatforms = optionals (useCuda || useOpenCL || useVulkan) lib.platforms.darwin; # Configurations that are known to result in build failures. Can be # overridden by importing Nixpkgs with `allowBroken = true`. - broken = (useMetalKit && !effectiveStdenv.isDarwin); + broken = (useMetalKit && !effectiveStdenv.isDarwin) || (useVulkan && effectiveStdenv.isDarwin); description = "Inference of LLaMA model in pure C/C++${descriptionSuffix}"; homepage = "https://github.com/ggerganov/llama.cpp/"; diff --git a/.devops/nix/scope.nix b/.devops/nix/scope.nix index 7932ac1e8..d295995a4 100644 --- a/.devops/nix/scope.nix +++ b/.devops/nix/scope.nix @@ -4,6 +4,10 @@ llamaVersion ? "0.0.0", }: +# We're using `makeScope` instead of just writing out an attrset +# because it allows users to apply overlays later using `overrideScope'`. +# Cf. https://noogle.dev/f/lib/makeScope + lib.makeScope newScope ( self: { inherit llamaVersion; diff --git a/.devops/server-cuda.Dockerfile b/.devops/server-cuda.Dockerfile new file mode 100644 index 000000000..4f83904bc --- /dev/null +++ b/.devops/server-cuda.Dockerfile @@ -0,0 +1,32 @@ +ARG UBUNTU_VERSION=22.04 +# This needs to generally match the container host's environment. +ARG CUDA_VERSION=11.7.1 +# Target the CUDA build image +ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION} +# Target the CUDA runtime image +ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION} + +FROM ${BASE_CUDA_DEV_CONTAINER} as build + +# Unless otherwise specified, we make a fat build. +ARG CUDA_DOCKER_ARCH=all + +RUN apt-get update && \ + apt-get install -y build-essential git + +WORKDIR /app + +COPY . . + +# Set nvcc architecture +ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH} +# Enable cuBLAS +ENV LLAMA_CUBLAS=1 + +RUN make + +FROM ${BASE_CUDA_RUN_CONTAINER} as runtime + +COPY --from=build /app/server /server + +ENTRYPOINT [ "/server" ] diff --git a/.devops/server-intel.Dockerfile b/.devops/server-intel.Dockerfile new file mode 100644 index 000000000..312f2df80 --- /dev/null +++ b/.devops/server-intel.Dockerfile @@ -0,0 +1,28 @@ +ARG ONEAPI_VERSION=2024.0.1-devel-ubuntu22.04 + +FROM intel/oneapi-basekit:$ONEAPI_VERSION as build + +ARG LLAMA_SYCL_F16=OFF +RUN apt-get update && \ + apt-get install -y git + +WORKDIR /app + +COPY . . + +RUN mkdir build && \ + cd build && \ + if [ "${LLAMA_SYCL_F16}" = "ON" ]; then \ + echo "LLAMA_SYCL_F16 is set" && \ + export OPT_SYCL_F16="-DLLAMA_SYCL_F16=ON"; \ + fi && \ + cmake .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ${OPT_SYCL_F16} && \ + cmake --build . --config Release --target server + +FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime + +COPY --from=build /app/build/bin/server /server + +ENV LC_ALL=C.utf8 + +ENTRYPOINT [ "/server" ] diff --git a/.devops/server-rocm.Dockerfile b/.devops/server-rocm.Dockerfile new file mode 100644 index 000000000..e9a31647c --- /dev/null +++ b/.devops/server-rocm.Dockerfile @@ -0,0 +1,45 @@ +ARG UBUNTU_VERSION=22.04 + +# This needs to generally match the container host's environment. +ARG ROCM_VERSION=5.6 + +# Target the CUDA build image +ARG BASE_ROCM_DEV_CONTAINER=rocm/dev-ubuntu-${UBUNTU_VERSION}:${ROCM_VERSION}-complete + +FROM ${BASE_ROCM_DEV_CONTAINER} as build + +# Unless otherwise specified, we make a fat build. +# List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878 +# This is mostly tied to rocBLAS supported archs. +ARG ROCM_DOCKER_ARCH=\ + gfx803 \ + gfx900 \ + gfx906 \ + gfx908 \ + gfx90a \ + gfx1010 \ + gfx1030 \ + gfx1100 \ + gfx1101 \ + gfx1102 + +COPY requirements.txt requirements.txt +COPY requirements requirements + +RUN pip install --upgrade pip setuptools wheel \ + && pip install -r requirements.txt + +WORKDIR /app + +COPY . . + +# Set nvcc architecture +ENV GPU_TARGETS=${ROCM_DOCKER_ARCH} +# Enable ROCm +ENV LLAMA_HIPBLAS=1 +ENV CC=/opt/rocm/llvm/bin/clang +ENV CXX=/opt/rocm/llvm/bin/clang++ + +RUN make + +ENTRYPOINT [ "/app/server" ] diff --git a/.devops/server-vulkan.Dockerfile b/.devops/server-vulkan.Dockerfile new file mode 100644 index 000000000..e0add6fc3 --- /dev/null +++ b/.devops/server-vulkan.Dockerfile @@ -0,0 +1,29 @@ +ARG UBUNTU_VERSION=jammy + +FROM ubuntu:$UBUNTU_VERSION as build + +# Install build tools +RUN apt update && apt install -y git build-essential cmake wget + +# Install Vulkan SDK +RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \ + wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \ + apt update -y && \ + apt-get install -y vulkan-sdk + +# Build it +WORKDIR /app +COPY . . +RUN mkdir build && \ + cd build && \ + cmake .. -DLLAMA_VULKAN=1 && \ + cmake --build . --config Release --target server + +# Clean up +WORKDIR / +RUN cp /app/build/bin/server /server && \ + rm -rf /app + +ENV LC_ALL=C.utf8 + +ENTRYPOINT [ "/server" ] diff --git a/.devops/server.Dockerfile b/.devops/server.Dockerfile new file mode 100644 index 000000000..134588fe2 --- /dev/null +++ b/.devops/server.Dockerfile @@ -0,0 +1,20 @@ +ARG UBUNTU_VERSION=22.04 + +FROM ubuntu:$UBUNTU_VERSION as build + +RUN apt-get update && \ + apt-get install -y build-essential git + +WORKDIR /app + +COPY . . + +RUN make + +FROM ubuntu:$UBUNTU_VERSION as runtime + +COPY --from=build /app/server /server + +ENV LC_ALL=C.utf8 + +ENTRYPOINT [ "/server" ] diff --git a/.ecrc b/.ecrc index b682057dd..a3351f4e6 100644 --- a/.ecrc +++ b/.ecrc @@ -1,4 +1,5 @@ { + "Exclude": ["^\\.gitmodules$"], "Disable": { "IndentSize": true } diff --git a/.flake8 b/.flake8 index 113ca5fd3..18fba2c15 100644 --- a/.flake8 +++ b/.flake8 @@ -1,2 +1,3 @@ [flake8] max-line-length = 125 +ignore = W503 diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 0a28a1111..ed292d6b8 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -72,7 +72,7 @@ jobs: id: cmake_test run: | cd build - ctest --verbose --timeout 900 + ctest -L main --verbose --timeout 900 ubuntu-latest-cmake-sanitizer: runs-on: ubuntu-latest @@ -107,7 +107,7 @@ jobs: id: cmake_test run: | cd build - ctest --verbose --timeout 900 + ctest -L main --verbose --timeout 900 ubuntu-latest-cmake-mpi: runs-on: ubuntu-latest @@ -141,7 +141,89 @@ jobs: id: cmake_test run: | cd build - ctest --verbose + ctest -L main --verbose + + ubuntu-22-cmake-sycl: + runs-on: ubuntu-22.04 + + continue-on-error: true + + steps: + - uses: actions/checkout@v2 + + - name: add oneAPI to apt + shell: bash + run: | + cd /tmp + wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB + sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB + rm GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB + sudo add-apt-repository "deb https://apt.repos.intel.com/oneapi all main" + + - name: install oneAPI dpcpp compiler + shell: bash + run: | + sudo apt update + sudo apt install intel-oneapi-compiler-dpcpp-cpp + + - name: install oneAPI MKL library + shell: bash + run: | + sudo apt install intel-oneapi-mkl-devel + + - name: Clone + id: checkout + uses: actions/checkout@v3 + + - name: Build + id: cmake_build + run: | + source /opt/intel/oneapi/setvars.sh + mkdir build + cd build + cmake -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx .. + cmake --build . --config Release -j $(nproc) + + ubuntu-22-cmake-sycl-fp16: + runs-on: ubuntu-22.04 + + continue-on-error: true + + steps: + - uses: actions/checkout@v2 + + - name: add oneAPI to apt + shell: bash + run: | + cd /tmp + wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB + sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB + rm GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB + sudo add-apt-repository "deb https://apt.repos.intel.com/oneapi all main" + + - name: install oneAPI dpcpp compiler + shell: bash + run: | + sudo apt update + sudo apt install intel-oneapi-compiler-dpcpp-cpp + + - name: install oneAPI MKL library + shell: bash + run: | + sudo apt install intel-oneapi-mkl-devel + + - name: Clone + id: checkout + uses: actions/checkout@v3 + + - name: Build + id: cmake_build + run: | + source /opt/intel/oneapi/setvars.sh + mkdir build + cd build + cmake -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_SYCL_F16=ON .. + cmake --build . --config Release -j $(nproc) # TODO: build with LLAMA_NO_METAL because test-backend-ops fail on "Apple Paravirtual device" and I don't know # how to debug it. @@ -202,7 +284,7 @@ jobs: id: cmake_test run: | cd build - ctest --verbose --timeout 900 + ctest -L main --verbose --timeout 900 macOS-latest-cmake-ios: runs-on: macos-latest @@ -295,7 +377,8 @@ jobs: OPENBLAS_VERSION: 0.3.23 OPENCL_VERSION: 2023.04.17 CLBLAST_VERSION: 1.6.0 - SDE_VERSION: 9.21.1-2023-04-24 + SDE_VERSION: 9.33.0-2024-01-07 + VULKAN_VERSION: 1.3.261.1 strategy: matrix: @@ -312,6 +395,10 @@ jobs: defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_CLBLAST=ON -DBUILD_SHARED_LIBS=ON -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/clblast"' - build: 'openblas' defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_BLAS=ON -DBUILD_SHARED_LIBS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS -DBLAS_INCLUDE_DIRS="$env:RUNNER_TEMP/openblas/include" -DBLAS_LIBRARIES="$env:RUNNER_TEMP/openblas/lib/openblas.lib"' + - build: 'kompute' + defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_KOMPUTE=ON -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON -DBUILD_SHARED_LIBS=ON' + - build: 'vulkan' + defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_VULKAN=ON -DBUILD_SHARED_LIBS=ON' steps: - name: Clone @@ -320,6 +407,12 @@ jobs: with: fetch-depth: 0 + - name: Clone Kompute submodule + id: clone_kompute + if: ${{ matrix.build == 'kompute' }} + run: | + git submodule update --init kompute + - name: Download OpenCL SDK id: get_opencl if: ${{ matrix.build == 'clblast' }} @@ -354,6 +447,15 @@ jobs: $lib = $(join-path $msvc 'bin\Hostx64\x64\lib.exe') & $lib /machine:x64 "/def:${env:RUNNER_TEMP}/openblas/lib/libopenblas.def" "/out:${env:RUNNER_TEMP}/openblas/lib/openblas.lib" /name:openblas.dll + - name: Install Vulkan SDK + id: get_vulkan + if: ${{ matrix.build == 'kompute' || matrix.build == 'vulkan' }} + run: | + curl.exe -o $env:RUNNER_TEMP/VulkanSDK-Installer.exe -L "https://sdk.lunarg.com/sdk/download/${env:VULKAN_VERSION}/windows/VulkanSDK-${env:VULKAN_VERSION}-Installer.exe" + & "$env:RUNNER_TEMP\VulkanSDK-Installer.exe" --accept-licenses --default-answer --confirm-command install + Add-Content $env:GITHUB_ENV "VULKAN_SDK=C:\VulkanSDK\${env:VULKAN_VERSION}" + Add-Content $env:GITHUB_PATH "C:\VulkanSDK\${env:VULKAN_VERSION}\bin" + - name: Build id: cmake_build run: | @@ -391,22 +493,23 @@ jobs: - name: Test id: cmake_test - if: ${{ matrix.build != 'clblast' && (matrix.build != 'avx512' || env.HAS_AVX512F == '1') }} # not all machines have native AVX-512 + # not all machines have native AVX-512 + if: ${{ matrix.build != 'clblast' && matrix.build != 'kompute' && matrix.build != 'vulkan' && (matrix.build != 'avx512' || env.HAS_AVX512F == '1') }} run: | cd build - ctest -C Release --verbose --timeout 900 + ctest -L main -C Release --verbose --timeout 900 - name: Test (Intel SDE) id: cmake_test_sde if: ${{ matrix.build == 'avx512' && env.HAS_AVX512F == '0' }} # use Intel SDE for AVX-512 emulation run: | - curl.exe -o $env:RUNNER_TEMP/sde.tar.xz -L "https://downloadmirror.intel.com/777395/sde-external-${env:SDE_VERSION}-win.tar.xz" + curl.exe -o $env:RUNNER_TEMP/sde.tar.xz -L "https://downloadmirror.intel.com/813591/sde-external-${env:SDE_VERSION}-win.tar.xz" # for some weird reason windows tar doesn't like sde tar.xz 7z x "-o${env:RUNNER_TEMP}" $env:RUNNER_TEMP/sde.tar.xz 7z x "-o${env:RUNNER_TEMP}" $env:RUNNER_TEMP/sde.tar $sde = $(join-path $env:RUNNER_TEMP sde-external-${env:SDE_VERSION}-win/sde.exe) cd build - & $sde -future -- ctest -C Release --verbose --timeout 900 + & $sde -future -- ctest -L main -C Release --verbose --timeout 900 - name: Determine tag name id: tag @@ -505,6 +608,31 @@ jobs: path: | cudart-llama-bin-win-cu${{ matrix.cuda }}-x64.zip + windows-latest-cmake-sycl: + runs-on: windows-latest + defaults: + run: + shell: bash + + env: + WINDOWS_BASEKIT_URL: https://registrationcenter-download.intel.com/akdlm/IRC_NAS/62641e01-1e8d-4ace-91d6-ae03f7f8a71f/w_BaseKit_p_2024.0.0.49563_offline.exe + WINDOWS_DPCPP_MKL: intel.oneapi.win.cpp-dpcpp-common:intel.oneapi.win.mkl.devel + + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Install + run: scripts/install-oneapi.bat $WINDOWS_BASEKIT_URL $WINDOWS_DPCPP_MKL + + - name: Build + id: cmake_build + run: examples/sycl/win-build-sycl.bat + ios-xcode-build: runs-on: macos-latest @@ -515,6 +643,31 @@ jobs: - name: Build Xcode project run: xcodebuild -project examples/llama.swiftui/llama.swiftui.xcodeproj -scheme llama.swiftui -sdk iphoneos CODE_SIGNING_REQUIRED=NO CODE_SIGN_IDENTITY= -destination 'generic/platform=iOS' build + android-build: + runs-on: ubuntu-latest + + steps: + - name: Clone + uses: actions/checkout@v3 + + - name: Set up JDK + uses: actions/setup-java@v3 + with: + java-version: 17 + distribution: zulu + + - name: Setup Android SDK + uses: android-actions/setup-android@v3 + with: + log-accepted-android-sdk-licenses: false + + - name: Build + run: | + cd examples/llama.android + + # Skip armeabi-v7a for now (https://github.com/llvm/llvm-project/issues/65820). + ./gradlew build --no-daemon -Pskip-armeabi-v7a + # freeBSD-latest: # runs-on: macos-12 # steps: diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 87904b75e..94f9161fc 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -28,13 +28,18 @@ jobs: config: - { tag: "light", dockerfile: ".devops/main.Dockerfile", platforms: "linux/amd64,linux/arm64" } - { tag: "full", dockerfile: ".devops/full.Dockerfile", platforms: "linux/amd64,linux/arm64" } + - { tag: "server", dockerfile: ".devops/server.Dockerfile", platforms: "linux/amd64,linux/arm64" } # NOTE(canardletter): The CUDA builds on arm64 are very slow, so I # have disabled them for now until the reason why # is understood. - { tag: "light-cuda", dockerfile: ".devops/main-cuda.Dockerfile", platforms: "linux/amd64" } - { tag: "full-cuda", dockerfile: ".devops/full-cuda.Dockerfile", platforms: "linux/amd64" } + - { tag: "server-cuda", dockerfile: ".devops/server-cuda.Dockerfile", platforms: "linux/amd64" } - { tag: "light-rocm", dockerfile: ".devops/main-rocm.Dockerfile", platforms: "linux/amd64,linux/arm64" } - { tag: "full-rocm", dockerfile: ".devops/full-rocm.Dockerfile", platforms: "linux/amd64,linux/arm64" } + - { tag: "server-rocm", dockerfile: ".devops/server-rocm.Dockerfile", platforms: "linux/amd64,linux/arm64" } + - { tag: "light-intel", dockerfile: ".devops/main-intel.Dockerfile", platforms: "linux/amd64" } + - { tag: "server-intel", dockerfile: ".devops/server-intel.Dockerfile", platforms: "linux/amd64" } steps: - name: Check out the repo uses: actions/checkout@v3 diff --git a/.github/workflows/editorconfig.yml b/.github/workflows/editorconfig.yml index b4e535acf..0e0993cd4 100644 --- a/.github/workflows/editorconfig.yml +++ b/.github/workflows/editorconfig.yml @@ -1,6 +1,12 @@ name: EditorConfig Checker on: + workflow_dispatch: # allows manual triggering + inputs: + create_release: + description: 'Create new release' + required: true + type: boolean push: branches: - master diff --git a/.github/workflows/nix-ci-aarch64.yml b/.github/workflows/nix-ci-aarch64.yml new file mode 100644 index 000000000..0c6cf5f09 --- /dev/null +++ b/.github/workflows/nix-ci-aarch64.yml @@ -0,0 +1,62 @@ +name: Nix aarch64 builds + +on: + workflow_dispatch: # allows manual triggering + schedule: + # Rebuild daily rather than on every push because QEMU is expensive (e.g. + # 1.5h instead of minutes with the cold cache). + # + # randint(0, 59), randint(0, 23) + - cron: '26 12 * * *' + # But also rebuild if we touched any of the Nix expressions: + push: + branches: + - master + paths: ['**/*.nix', 'flake.lock'] + pull_request: + types: [opened, synchronize, reopened] + paths: ['**/*.nix', 'flake.lock'] + +jobs: + nix-build-aarch64: + if: ${{ vars.CACHIX_NAME != '' }} + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Install QEMU + # Copy-paste from https://github.com/orgs/community/discussions/8305#discussioncomment-5888654 + run: | + sudo apt-get update + sudo apt-get install -y qemu-user-static qemu-system-aarch64 + sudo usermod -a -G kvm $USER + - name: Install Nix + uses: DeterminateSystems/nix-installer-action@v9 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + extra-conf: | + extra-platforms = aarch64-linux + extra-system-features = nixos-test kvm + extra-substituters = https://${{ vars.CACHIX_NAME }}.cachix.org https://cuda-maintainers.cachix.org + extra-trusted-public-keys = ${{ vars.CACHIX_PUBLIC_KEY }} cuda-maintainers.cachix.org-1:0dq3bujKpuEPMCX6U4WylrUDZ9JyUG0VpVZa7CNfq5E= + - uses: DeterminateSystems/magic-nix-cache-action@v2 + with: + upstream-cache: https://${{ matrix.cachixName }}.cachix.org + - name: Set-up cachix to push the results to + uses: cachix/cachix-action@v13 + with: + authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}' + name: ${{ vars.CACHIX_NAME }} + - name: Show all output paths + run: > + nix run github:nix-community/nix-eval-jobs + -- --gc-roots-dir gcroot + --flake + ".#packages.aarch64-linux" + - name: Build + run: > + nix run github:Mic92/nix-fast-build + -- --skip-cached --no-nom + --systems aarch64-linux + --flake + ".#checks.aarch64-linux" diff --git a/.github/workflows/nix-ci.yml b/.github/workflows/nix-ci.yml index a38c6ead4..d19c7a576 100644 --- a/.github/workflows/nix-ci.yml +++ b/.github/workflows/nix-ci.yml @@ -5,10 +5,8 @@ on: push: branches: - master - paths: ['.github/workflows/**', '**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.swift', '**/*.m', '**/*.sh', '**/*.py', '**/*.nix'] pull_request: types: [opened, synchronize, reopened] - paths: ['**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.swift', '**/*.m', '**/*.sh', '**/*.py', '**/*.nix'] jobs: nix-eval: @@ -69,44 +67,3 @@ jobs: -- --skip-cached --no-nom --flake ".#checks.$(nix eval --raw --impure --expr builtins.currentSystem)" - nix-build-aarch64: - if: ${{ vars.CACHIX_NAME != '' }} - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - name: Install QEMU - # Copy-paste from https://github.com/orgs/community/discussions/8305#discussioncomment-5888654 - run: | - sudo apt-get install -y qemu-user-static qemu-system-aarch64 - sudo usermod -a -G kvm $USER - - name: Install Nix - uses: DeterminateSystems/nix-installer-action@v9 - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - extra-conf: | - extra-platforms = aarch64-linux - extra-system-features = nixos-test kvm - extra-substituters = https://${{ vars.CACHIX_NAME }}.cachix.org https://cuda-maintainers.cachix.org - extra-trusted-public-keys = ${{ vars.CACHIX_PUBLIC_KEY }} cuda-maintainers.cachix.org-1:0dq3bujKpuEPMCX6U4WylrUDZ9JyUG0VpVZa7CNfq5E= - - uses: DeterminateSystems/magic-nix-cache-action@v2 - with: - upstream-cache: https://${{ matrix.cachixName }}.cachix.org - - name: Set-up cachix to push the results to - uses: cachix/cachix-action@v13 - with: - authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}' - name: ${{ vars.CACHIX_NAME }} - - name: Show all output paths - run: > - nix run github:nix-community/nix-eval-jobs - -- --gc-roots-dir gcroot - --flake - ".#packages.aarch64-linux" - - name: Build - run: > - nix run github:Mic92/nix-fast-build - -- --skip-cached --no-nom - --systems aarch64-linux - --flake - ".#checks.aarch64-linux" diff --git a/.github/workflows/python-lint.yml b/.github/workflows/python-lint.yml index 56d17b66c..ea0a05ea1 100644 --- a/.github/workflows/python-lint.yml +++ b/.github/workflows/python-lint.yml @@ -16,5 +16,5 @@ jobs: - name: flake8 Lint uses: py-actions/flake8@v2 with: - ignore: "E203,E211,E221,E225,E231,E241,E251,E261,E266,E501,E701,E704" + ignore: "E203,E211,E221,E225,E231,E241,E251,E261,E266,E501,E701,E704,W503" exclude: "examples/*,examples/*/**,*/**/__init__.py" diff --git a/.gitignore b/.gitignore index cf1b692e9..b84459b92 100644 --- a/.gitignore +++ b/.gitignore @@ -27,7 +27,7 @@ lcov-report/ gcovr-report/ -build*/ +build* out/ tmp/ @@ -43,6 +43,7 @@ models-mnt /embedding /gguf /gguf-llama-simple +/imatrix /infill /libllama.so /llama-bench @@ -88,19 +89,4 @@ examples/jeopardy/results.txt poetry.lock poetry.toml - -# Test binaries -/tests/test-grammar-parser -/tests/test-llama-grammar -/tests/test-double-float -/tests/test-grad0 -/tests/test-opt -/tests/test-quantize-fns -/tests/test-quantize-perf -/tests/test-sampling -/tests/test-tokenizer-0-llama -/tests/test-tokenizer-0-falcon -/tests/test-tokenizer-1-llama -/tests/test-tokenizer-1-bpe -/tests/test-rope -/tests/test-backend-ops +nppBackup diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..b7e8b8ff2 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "kompute"] + path = kompute + url = https://github.com/nomic-ai/kompute.git diff --git a/CMakeLists.txt b/CMakeLists.txt index 668669c6d..2a922fdb3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,5 +1,6 @@ -cmake_minimum_required(VERSION 3.13) # for add_link_options +cmake_minimum_required(VERSION 3.14) # for add_link_options and implicit target directories. project("llama.cpp" C CXX) +include(CheckIncludeFileCXX) set(CMAKE_EXPORT_COMPILE_COMMANDS ON) @@ -47,6 +48,7 @@ option(BUILD_SHARED_LIBS "build shared libraries" option(LLAMA_STATIC "llama: static link libraries" OFF) option(LLAMA_NATIVE "llama: enable -march=native flag" ON) option(LLAMA_LTO "llama: enable link time optimization" OFF) +option(LLAMA_CCACHE "llama: use ccache if available" ON) # debug option(LLAMA_ALL_WARNINGS "llama: enable all compiler warnings" ON) @@ -76,6 +78,10 @@ if (NOT MSVC) option(LLAMA_F16C "llama: enable F16C" ${INS_ENB}) endif() +if (WIN32) + set(LLAMA_WIN_VER "0x602" CACHE STRING "llama: Windows Version") +endif() + # 3rd party libs option(LLAMA_ACCELERATE "llama: enable Accelerate framework" ON) option(LLAMA_BLAS "llama: use BLAS" OFF) @@ -93,16 +99,28 @@ set(LLAMA_CUDA_PEER_MAX_BATCH_SIZE "128" CACHE STRING option(LLAMA_HIPBLAS "llama: use hipBLAS" OFF) option(LLAMA_HIP_UMA "llama: use HIP unified memory architecture" OFF) option(LLAMA_CLBLAST "llama: use CLBlast" OFF) +option(LLAMA_VULKAN "llama: use Vulkan" OFF) +option(LLAMA_VULKAN_CHECK_RESULTS "llama: run Vulkan op checks" OFF) +option(LLAMA_VULKAN_DEBUG "llama: enable Vulkan debug output" OFF) +option(LLAMA_VULKAN_VALIDATE "llama: enable Vulkan validation" OFF) +option(LLAMA_VULKAN_RUN_TESTS "llama: run Vulkan tests" OFF) option(LLAMA_METAL "llama: use Metal" ${LLAMA_METAL_DEFAULT}) option(LLAMA_METAL_NDEBUG "llama: disable Metal debugging" OFF) option(LLAMA_METAL_SHADER_DEBUG "llama: compile Metal with -fno-fast-math" OFF) +option(LLAMA_KOMPUTE "llama: use Kompute" OFF) option(LLAMA_MPI "llama: use MPI" OFF) option(LLAMA_QKK_64 "llama: use super-block size of 64 for k-quants" OFF) +option(LLAMA_SYCL "llama: use SYCL" OFF) +option(LLAMA_SYCL_F16 "llama: use 16 bit floats for sycl calculations" OFF) +option(LLAMA_CPU_HBM "llama: use memkind for CPU HBM" OFF) option(LLAMA_BUILD_TESTS "llama: build tests" ${LLAMA_STANDALONE}) option(LLAMA_BUILD_EXAMPLES "llama: build examples" ${LLAMA_STANDALONE}) option(LLAMA_BUILD_SERVER "llama: build server example" ON) +# add perf arguments +option(LLAMA_PERF "llama: enable perf" OFF) + # Required for relocatable CMake package include(${CMAKE_CURRENT_SOURCE_DIR}/scripts/build-info.cmake) @@ -110,11 +128,17 @@ include(${CMAKE_CURRENT_SOURCE_DIR}/scripts/build-info.cmake) # Compile flags # -set(CMAKE_CXX_STANDARD 11) +if (LLAMA_SYCL) + set(CMAKE_CXX_STANDARD 17) +else() + set(CMAKE_CXX_STANDARD 11) +endif() + set(CMAKE_CXX_STANDARD_REQUIRED true) set(CMAKE_C_STANDARD 11) set(CMAKE_C_STANDARD_REQUIRED true) set(THREADS_PREFER_PTHREAD_FLAG ON) + find_package(Threads REQUIRED) include(CheckCXXCompilerFlag) @@ -126,17 +150,17 @@ endif() if (NOT MSVC) if (LLAMA_SANITIZE_THREAD) add_compile_options(-fsanitize=thread) - link_libraries(-fsanitize=thread) + link_libraries (-fsanitize=thread) endif() if (LLAMA_SANITIZE_ADDRESS) add_compile_options(-fsanitize=address -fno-omit-frame-pointer) - link_libraries(-fsanitize=address) + link_libraries (-fsanitize=address) endif() if (LLAMA_SANITIZE_UNDEFINED) add_compile_options(-fsanitize=undefined) - link_libraries(-fsanitize=undefined) + link_libraries (-fsanitize=undefined) endif() endif() @@ -273,14 +297,17 @@ if (LLAMA_BLAS) endif() message(STATUS "BLAS found, Includes: ${BLAS_INCLUDE_DIRS}") + add_compile_options(${BLAS_LINKER_FLAGS}) + add_compile_definitions(GGML_USE_OPENBLAS) + if (${BLAS_INCLUDE_DIRS} MATCHES "mkl" AND (${LLAMA_BLAS_VENDOR} MATCHES "Generic" OR ${LLAMA_BLAS_VENDOR} MATCHES "Intel")) add_compile_definitions(GGML_BLAS_USE_MKL) endif() - set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} ${BLAS_LIBRARIES}) - set(LLAMA_EXTRA_INCLUDES ${LLAMA_EXTRA_INCLUDES} ${BLAS_INCLUDE_DIRS}) + set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} ${BLAS_LIBRARIES}) + set(LLAMA_EXTRA_INCLUDES ${LLAMA_EXTRA_INCLUDES} ${BLAS_INCLUDE_DIRS}) else() message(WARNING "BLAS not found, please refer to " "https://cmake.org/cmake/help/latest/module/FindBLAS.html#blas-lapack-vendors" @@ -305,9 +332,6 @@ if (LLAMA_CUBLAS) set(GGML_SOURCES_CUDA ggml-cuda.cu) add_compile_definitions(GGML_USE_CUBLAS) -# if (LLAMA_CUDA_CUBLAS) -# add_compile_definitions(GGML_CUDA_CUBLAS) -# endif() if (LLAMA_CUDA_FORCE_DMMV) add_compile_definitions(GGML_CUDA_FORCE_DMMV) endif() @@ -362,15 +386,20 @@ if (LLAMA_MPI) find_package(MPI) if (MPI_C_FOUND) message(STATUS "MPI found") + set(GGML_HEADERS_MPI ggml-mpi.h) - set(GGML_SOURCES_MPI ggml-mpi.c ggml-mpi.h) + set(GGML_SOURCES_MPI ggml-mpi.c) + add_compile_definitions(GGML_USE_MPI) add_compile_definitions(${MPI_C_COMPILE_DEFINITIONS}) + if (NOT MSVC) add_compile_options(-Wno-cast-qual) endif() + set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} ${MPI_C_LIBRARIES}) set(LLAMA_EXTRA_INCLUDES ${LLAMA_EXTRA_INCLUDES} ${MPI_C_INCLUDE_DIRS}) + # Even if you're only using the C header, C++ programs may bring in MPI # C++ functions, so more linkage is needed if (MPI_CXX_FOUND) @@ -397,49 +426,284 @@ if (LLAMA_CLBLAST) endif() endif() +if (LLAMA_VULKAN) + find_package(Vulkan) + if (Vulkan_FOUND) + message(STATUS "Vulkan found") + + set(GGML_HEADERS_VULKAN ggml-vulkan.h) + set(GGML_SOURCES_VULKAN ggml-vulkan.cpp) + + add_compile_definitions(GGML_USE_VULKAN) + + if (LLAMA_VULKAN_CHECK_RESULTS) + add_compile_definitions(GGML_VULKAN_CHECK_RESULTS) + endif() + + if (LLAMA_VULKAN_DEBUG) + add_compile_definitions(GGML_VULKAN_DEBUG) + endif() + + if (LLAMA_VULKAN_VALIDATE) + add_compile_definitions(GGML_VULKAN_VALIDATE) + endif() + + if (LLAMA_VULKAN_RUN_TESTS) + add_compile_definitions(GGML_VULKAN_RUN_TESTS) + endif() + + set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} Vulkan::Vulkan) + else() + message(WARNING "Vulkan not found") + endif() +endif() + if (LLAMA_HIPBLAS) list(APPEND CMAKE_PREFIX_PATH /opt/rocm) if (NOT ${CMAKE_C_COMPILER_ID} MATCHES "Clang") message(WARNING "Only LLVM is supported for HIP, hint: CC=/opt/rocm/llvm/bin/clang") endif() + if (NOT ${CMAKE_CXX_COMPILER_ID} MATCHES "Clang") message(WARNING "Only LLVM is supported for HIP, hint: CXX=/opt/rocm/llvm/bin/clang++") endif() - find_package(hip) - find_package(hipblas) - find_package(rocblas) + find_package(hip REQUIRED) + find_package(hipblas REQUIRED) + find_package(rocblas REQUIRED) - if (${hipblas_FOUND} AND ${hip_FOUND}) - message(STATUS "HIP and hipBLAS found") - add_compile_definitions(GGML_USE_HIPBLAS GGML_USE_CUBLAS) - if (LLAMA_HIP_UMA) - add_compile_definitions(GGML_HIP_UMA) - endif() - add_library(ggml-rocm OBJECT ggml-cuda.cu ggml-cuda.h) - if (BUILD_SHARED_LIBS) - set_target_properties(ggml-rocm PROPERTIES POSITION_INDEPENDENT_CODE ON) - endif() - if (LLAMA_CUDA_FORCE_DMMV) - target_compile_definitions(ggml-rocm PRIVATE GGML_CUDA_FORCE_DMMV) - endif() - if (LLAMA_CUDA_FORCE_MMQ) - target_compile_definitions(ggml-rocm PRIVATE GGML_CUDA_FORCE_MMQ) - endif() - target_compile_definitions(ggml-rocm PRIVATE GGML_CUDA_DMMV_X=${LLAMA_CUDA_DMMV_X}) - target_compile_definitions(ggml-rocm PRIVATE GGML_CUDA_MMV_Y=${LLAMA_CUDA_MMV_Y}) - target_compile_definitions(ggml-rocm PRIVATE K_QUANTS_PER_ITERATION=${LLAMA_CUDA_KQUANTS_ITER}) - set_source_files_properties(ggml-cuda.cu PROPERTIES LANGUAGE CXX) - target_link_libraries(ggml-rocm PRIVATE hip::device PUBLIC hip::host roc::rocblas roc::hipblas) + message(STATUS "HIP and hipBLAS found") - if (LLAMA_STATIC) - message(FATAL_ERROR "Static linking not supported for HIP/ROCm") - endif() - set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} ggml-rocm) - else() - message(WARNING "hipBLAS or HIP not found. Try setting CMAKE_PREFIX_PATH=/opt/rocm") + set(GGML_HEADERS_ROCM ggml-cuda.h) + set(GGML_SOURCES_ROCM ggml-cuda.cu) + + add_compile_definitions(GGML_USE_HIPBLAS GGML_USE_CUBLAS) + + if (LLAMA_HIP_UMA) + add_compile_definitions(GGML_HIP_UMA) endif() + + if (LLAMA_CUDA_FORCE_DMMV) + add_compile_definitions(GGML_CUDA_FORCE_DMMV) + endif() + + if (LLAMA_CUDA_FORCE_MMQ) + add_compile_definitions(GGML_CUDA_FORCE_MMQ) + endif() + + add_compile_definitions(GGML_CUDA_DMMV_X=${LLAMA_CUDA_DMMV_X}) + add_compile_definitions(GGML_CUDA_MMV_Y=${LLAMA_CUDA_MMV_Y}) + add_compile_definitions(K_QUANTS_PER_ITERATION=${LLAMA_CUDA_KQUANTS_ITER}) + + set_source_files_properties(ggml-cuda.cu PROPERTIES LANGUAGE CXX) + + if (LLAMA_STATIC) + message(FATAL_ERROR "Static linking not supported for HIP/ROCm") + endif() + + set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} hip::device PUBLIC hip::host roc::rocblas roc::hipblas) +endif() + +if (LLAMA_SYCL) + if ( NOT DEFINED ENV{ONEAPI_ROOT}) + message(FATAL_ERROR "Not detect ENV {ONEAPI_ROOT}, please install oneAPI & source it, like: source /opt/intel/oneapi/setvars.sh") + endif() + #todo: AOT + + find_package(IntelSYCL REQUIRED) + + message(STATUS "SYCL found") + + add_compile_definitions(GML_USE_SYCL) + + if (LLAMA_SYCL_F16) + add_compile_definitions(GGML_SYCL_F16) + endif() + + add_compile_options(-I./) #include DPCT + add_compile_options(-I/${SYCL_INCLUDE_DIR}) + + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-narrowing") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsycl -L${MKLROOT}/lib") + + set(GGML_HEADERS_SYCL ggml-sycl.h) + set(GGML_SOURCES_SYCL ggml-sycl.cpp) + + if (WIN32) + set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} -fsycl sycl7 OpenCL mkl_sycl_blas_dll.lib mkl_intel_ilp64_dll.lib mkl_sequential_dll.lib mkl_core_dll.lib) + else() + set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} -fsycl OpenCL mkl_core pthread m dl mkl_sycl_blas mkl_intel_ilp64 mkl_tbb_thread) + endif() +endif() + +if (LLAMA_KOMPUTE) + add_compile_definitions(VULKAN_HPP_DISPATCH_LOADER_DYNAMIC=1) + find_package(Vulkan COMPONENTS glslc REQUIRED) + find_program(glslc_executable NAMES glslc HINTS Vulkan::glslc) + if (NOT glslc_executable) + message(FATAL_ERROR "glslc not found") + endif() + + function(compile_shader) + set(options) + set(oneValueArgs) + set(multiValueArgs SOURCES) + cmake_parse_arguments(compile_shader "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + foreach(source ${compile_shader_SOURCES}) + get_filename_component(filename ${source} NAME) + set(spv_file ${filename}.spv) + add_custom_command( + OUTPUT ${spv_file} + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/${source} + ${CMAKE_CURRENT_SOURCE_DIR}/kompute-shaders/common.comp + ${CMAKE_CURRENT_SOURCE_DIR}/kompute-shaders/op_getrows.comp + ${CMAKE_CURRENT_SOURCE_DIR}/kompute-shaders/op_mul_mv_q_n_pre.comp + ${CMAKE_CURRENT_SOURCE_DIR}/kompute-shaders/op_mul_mv_q_n.comp + COMMAND ${glslc_executable} --target-env=vulkan1.2 -o ${spv_file} ${CMAKE_CURRENT_SOURCE_DIR}/${source} + COMMENT "Compiling ${source} to ${spv_file}" + ) + + get_filename_component(RAW_FILE_NAME ${spv_file} NAME) + set(FILE_NAME "shader${RAW_FILE_NAME}") + string(REPLACE ".comp.spv" ".h" HEADER_FILE ${FILE_NAME}) + string(TOUPPER ${HEADER_FILE} HEADER_FILE_DEFINE) + string(REPLACE "." "_" HEADER_FILE_DEFINE "${HEADER_FILE_DEFINE}") + set(OUTPUT_HEADER_FILE "${HEADER_FILE}") + message(STATUS "${HEADER_FILE} generating ${HEADER_FILE_DEFINE}") + if(CMAKE_GENERATOR MATCHES "Visual Studio") + add_custom_command( + OUTPUT ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo "/*THIS FILE HAS BEEN AUTOMATICALLY GENERATED - DO NOT EDIT*/" > ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo \"\#ifndef ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo \"\#define ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo "namespace kp {" >> ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo "namespace shader_data {" >> ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_BINARY_DIR}/bin/$/xxd -i ${RAW_FILE_NAME} >> ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo "}}" >> ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo \"\#endif // define ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE} + DEPENDS ${spv_file} xxd + COMMENT "Converting to hpp: ${FILE_NAME} ${CMAKE_BINARY_DIR}/bin/$/xxd" + ) + else() + add_custom_command( + OUTPUT ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo "/*THIS FILE HAS BEEN AUTOMATICALLY GENERATED - DO NOT EDIT*/" > ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo \"\#ifndef ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo \"\#define ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo "namespace kp {" >> ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo "namespace shader_data {" >> ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_BINARY_DIR}/bin/xxd -i ${RAW_FILE_NAME} >> ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo "}}" >> ${OUTPUT_HEADER_FILE} + COMMAND ${CMAKE_COMMAND} -E echo \"\#endif // define ${HEADER_FILE_DEFINE}\" >> ${OUTPUT_HEADER_FILE} + DEPENDS ${spv_file} xxd + COMMENT "Converting to hpp: ${FILE_NAME} ${CMAKE_BINARY_DIR}/bin/xxd" + ) + endif() + endforeach() + endfunction() + + if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/kompute/CMakeLists.txt") + message(STATUS "Kompute found") + set(KOMPUTE_OPT_LOG_LEVEL Error CACHE STRING "Kompute log level") + add_subdirectory(kompute) + + # Compile our shaders + compile_shader(SOURCES + kompute-shaders/op_scale.comp + kompute-shaders/op_scale_8.comp + kompute-shaders/op_add.comp + kompute-shaders/op_addrow.comp + kompute-shaders/op_mul.comp + kompute-shaders/op_silu.comp + kompute-shaders/op_relu.comp + kompute-shaders/op_gelu.comp + kompute-shaders/op_softmax.comp + kompute-shaders/op_norm.comp + kompute-shaders/op_rmsnorm.comp + kompute-shaders/op_diagmask.comp + kompute-shaders/op_mul_mat_mat_f32.comp + kompute-shaders/op_mul_mat_f16.comp + kompute-shaders/op_mul_mat_q8_0.comp + kompute-shaders/op_mul_mat_q4_0.comp + kompute-shaders/op_mul_mat_q4_1.comp + kompute-shaders/op_mul_mat_q6_k.comp + kompute-shaders/op_getrows_f16.comp + kompute-shaders/op_getrows_q4_0.comp + kompute-shaders/op_getrows_q4_1.comp + kompute-shaders/op_getrows_q6_k.comp + kompute-shaders/op_rope_f16.comp + kompute-shaders/op_rope_f32.comp + kompute-shaders/op_cpy_f16_f16.comp + kompute-shaders/op_cpy_f16_f32.comp + kompute-shaders/op_cpy_f32_f16.comp + kompute-shaders/op_cpy_f32_f32.comp + ) + + # Create a custom target for our generated shaders + add_custom_target(generated_shaders DEPENDS + shaderop_scale.h + shaderop_scale_8.h + shaderop_add.h + shaderop_addrow.h + shaderop_mul.h + shaderop_silu.h + shaderop_relu.h + shaderop_gelu.h + shaderop_softmax.h + shaderop_norm.h + shaderop_rmsnorm.h + shaderop_diagmask.h + shaderop_mul_mat_mat_f32.h + shaderop_mul_mat_f16.h + shaderop_mul_mat_q8_0.h + shaderop_mul_mat_q4_0.h + shaderop_mul_mat_q4_1.h + shaderop_mul_mat_q6_k.h + shaderop_getrows_f16.h + shaderop_getrows_q4_0.h + shaderop_getrows_q4_1.h + shaderop_getrows_q6_k.h + shaderop_rope_f16.h + shaderop_rope_f32.h + shaderop_cpy_f16_f16.h + shaderop_cpy_f16_f32.h + shaderop_cpy_f32_f16.h + shaderop_cpy_f32_f32.h + ) + + # Create a custom command that depends on the generated_shaders + add_custom_command( + OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/ggml-kompute.stamp + COMMAND ${CMAKE_COMMAND} -E touch ${CMAKE_CURRENT_BINARY_DIR}/ggml-kompute.stamp + DEPENDS generated_shaders + COMMENT "Ensuring shaders are generated before compiling ggml-kompute.cpp" + ) + + # Add the stamp to the main sources to ensure dependency tracking + set(GGML_SOURCES_KOMPUTE ggml-kompute.cpp ${CMAKE_CURRENT_BINARY_DIR}/ggml-kompute.stamp) + set(GGML_HEADERS_KOMPUTE ggml-kompute.h ${CMAKE_CURRENT_BINARY_DIR}/ggml-kompute.stamp) + + add_compile_definitions(GGML_USE_KOMPUTE) + + set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} kompute) + set(LLAMA_EXTRA_INCLUDES ${LLAMA_EXTRA_INCLUDES} ${CMAKE_BINARY_DIR}) + else() + message(WARNING "Kompute not found") + endif() +endif() + +if (LLAMA_CPU_HBM) + find_library(memkind memkind REQUIRED) + + add_compile_definitions(GGML_USE_CPU_HBM) + + target_link_libraries(ggml PUBLIC memkind) +endif() + +if (LLAMA_PERF) + add_compile_definitions(GGML_PERF) endif() function(get_flags CCID CCVER) @@ -454,17 +718,24 @@ function(get_flags CCID CCVER) (CCID STREQUAL "Clang" AND CCVER VERSION_GREATER_EQUAL 3.8.0) OR (CCID STREQUAL "AppleClang" AND CCVER VERSION_GREATER_EQUAL 7.3.0) ) - set(C_FLAGS ${C_FLAGS} -Wdouble-promotion) + list(APPEND C_FLAGS -Wdouble-promotion) endif() elseif (CCID STREQUAL "GNU") set(C_FLAGS -Wdouble-promotion) set(CXX_FLAGS -Wno-array-bounds) if (CCVER VERSION_GREATER_EQUAL 7.1.0) - set(CXX_FLAGS ${CXX_FLAGS} -Wno-format-truncation) + list(APPEND CXX_FLAGS -Wno-format-truncation) endif() if (CCVER VERSION_GREATER_EQUAL 8.1.0) - set(CXX_FLAGS ${CXX_FLAGS} -Wextra-semi) + list(APPEND CXX_FLAGS -Wextra-semi) + endif() + elseif (CCID MATCHES "Intel") + if (NOT LLAMA_SYCL) + # enable max optimization level when using Intel compiler + set(C_FLAGS -ipo -O3 -static -fp-model=fast -flto -fno-stack-protector) + set(CXX_FLAGS -ipo -O3 -static -fp-model=fast -flto -fno-stack-protector) + add_link_options(-fuse-ld=lld -static-intel) endif() endif() @@ -493,16 +764,18 @@ if (LLAMA_ALL_WARNINGS) endif() endif() +set(CUDA_CXX_FLAGS "") + if (LLAMA_CUBLAS) set(CUDA_FLAGS ${CXX_FLAGS} -use_fast_math) if (NOT MSVC) - set(CUDA_FLAGS ${CUDA_FLAGS} -Wno-pedantic) + list(APPEND CUDA_FLAGS -Wno-pedantic) endif() if (LLAMA_ALL_WARNINGS AND NOT MSVC) set(NVCC_CMD ${CMAKE_CUDA_COMPILER} .c) if (NOT CMAKE_CUDA_HOST_COMPILER STREQUAL "") - set(NVCC_CMD ${NVCC_CMD} -ccbin ${CMAKE_CUDA_HOST_COMPILER}) + list(APPEND NVCC_CMD -ccbin ${CMAKE_CUDA_HOST_COMPILER}) endif() execute_process( @@ -530,13 +803,8 @@ if (LLAMA_CUBLAS) message("-- CUDA host compiler is ${CUDA_CCID} ${CUDA_CCVER}") get_flags(${CUDA_CCID} ${CUDA_CCVER}) - list(JOIN GF_CXX_FLAGS " " CUDA_CXX_FLAGS) # pass host compiler flags as a single argument - if (NOT CUDA_CXX_FLAGS STREQUAL "") - set(CUDA_FLAGS ${CUDA_FLAGS} -Xcompiler ${CUDA_CXX_FLAGS}) - endif() + list(APPEND CUDA_CXX_FLAGS ${GF_CXX_FLAGS}) # This is passed to -Xcompiler later endif() - - add_compile_options("$<$:${CUDA_FLAGS}>") endif() if (WIN32) @@ -557,12 +825,24 @@ if (LLAMA_LTO) endif() endif() +if (LLAMA_CCACHE) + find_program(LLAMA_CCACHE_FOUND ccache) + if (LLAMA_CCACHE_FOUND) + set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) + set(ENV{CCACHE_SLOPPINESS} time_macros) + message(STATUS "ccache found, compilation results will be cached. Disable with LLAMA_CCACHE=OFF.") + else() + message(STATUS "Warning: ccache not found - consider installing it for faster compilation or disable this warning with LLAMA_CCACHE=OFF") + endif () +endif() + # this version of Apple ld64 is buggy execute_process( COMMAND ${CMAKE_C_COMPILER} ${CMAKE_EXE_LINKER_FLAGS} -Wl,-v ERROR_VARIABLE output OUTPUT_QUIET ) + if (output MATCHES "dyld-1015\.7") add_compile_definitions(HAVE_BUGGY_APPLE_LINKER) endif() @@ -572,10 +852,10 @@ endif() # feel free to update the Makefile for your architecture and send a pull request or issue message(STATUS "CMAKE_SYSTEM_PROCESSOR: ${CMAKE_SYSTEM_PROCESSOR}") if (MSVC) - string(TOLOWER "${CMAKE_GENERATOR_PLATFORM}" CMAKE_GENERATOR_PLATFORM_LWR) - message(STATUS "CMAKE_GENERATOR_PLATFORM: ${CMAKE_GENERATOR_PLATFORM}") + string(TOLOWER "${CMAKE_GENERATOR_PLATFORM}" CMAKE_GENERATOR_PLATFORM_LWR) + message(STATUS "CMAKE_GENERATOR_PLATFORM: ${CMAKE_GENERATOR_PLATFORM}") else () - set(CMAKE_GENERATOR_PLATFORM_LWR "") + set(CMAKE_GENERATOR_PLATFORM_LWR "") endif () if (NOT MSVC) @@ -590,33 +870,49 @@ if (NOT MSVC) endif() endif() -if ((${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm") OR (${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64") OR ("${CMAKE_GENERATOR_PLATFORM_LWR}" MATCHES "arm64")) +set(ARCH_FLAGS "") + +if (CMAKE_OSX_ARCHITECTURES STREQUAL "arm64" OR CMAKE_GENERATOR_PLATFORM_LWR STREQUAL "arm64" OR + (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND + CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm.*|ARM64)$")) message(STATUS "ARM detected") if (MSVC) + add_compile_definitions(__aarch64__) # MSVC defines _M_ARM64 instead add_compile_definitions(__ARM_NEON) add_compile_definitions(__ARM_FEATURE_FMA) - add_compile_definitions(__ARM_FEATURE_DOTPROD) - # add_compile_definitions(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) # MSVC doesn't support vdupq_n_f16, vld1q_f16, vst1q_f16 - add_compile_definitions(__aarch64__) # MSVC defines _M_ARM64 instead + + set(CMAKE_REQUIRED_FLAGS_PREV ${CMAKE_REQUIRED_FLAGS}) + string(JOIN " " CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS} "/arch:armv8.2") + check_cxx_source_compiles("#include \nint main() { int8x16_t _a, _b; int32x4_t _s = vdotq_s32(_s, _a, _b); return 0; }" GGML_COMPILER_SUPPORT_DOTPROD) + if (GGML_COMPILER_SUPPORT_DOTPROD) + add_compile_definitions(__ARM_FEATURE_DOTPROD) + endif () + check_cxx_source_compiles("#include \nint main() { float16_t _a; float16x8_t _s = vdupq_n_f16(_a); return 0; }" GGML_COMPILER_SUPPORT_FP16_VECTOR_ARITHMETIC) + if (GGML_COMPILER_SUPPORT_FP16_VECTOR_ARITHMETIC) + add_compile_definitions(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) + endif () + set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_PREV}) else() check_cxx_compiler_flag(-mfp16-format=ieee COMPILER_SUPPORTS_FP16_FORMAT_I3E) if (NOT "${COMPILER_SUPPORTS_FP16_FORMAT_I3E}" STREQUAL "") - add_compile_options(-mfp16-format=ieee) + list(APPEND ARCH_FLAGS -mfp16-format=ieee) endif() if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv6") # Raspberry Pi 1, Zero - add_compile_options(-mfpu=neon-fp-armv8 -mno-unaligned-access) + list(APPEND ARCH_FLAGS -mfpu=neon-fp-armv8 -mno-unaligned-access) endif() if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv7") # Raspberry Pi 2 - add_compile_options(-mfpu=neon-fp-armv8 -mno-unaligned-access -funsafe-math-optimizations) + list(APPEND ARCH_FLAGS -mfpu=neon-fp-armv8 -mno-unaligned-access -funsafe-math-optimizations) endif() if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv8") # Raspberry Pi 3, 4, Zero 2 (32-bit) - add_compile_options(-mno-unaligned-access) + list(APPEND ARCH_FLAGS -mno-unaligned-access) endif() endif() -elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "^(x86_64|i686|AMD64)$" OR "${CMAKE_GENERATOR_PLATFORM_LWR}" MATCHES "^(x86_64|i686|amd64|x64)$" ) +elseif (CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64" OR CMAKE_GENERATOR_PLATFORM_LWR MATCHES "^(x86_64|i686|amd64|x64|win32)$" OR + (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND + CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|i686|AMD64)$")) message(STATUS "x86 detected") if (MSVC) # instruction set detection for MSVC only @@ -624,8 +920,7 @@ elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "^(x86_64|i686|AMD64)$" OR "${CMAKE_GE include(cmake/FindSIMD.cmake) endif () if (LLAMA_AVX512) - add_compile_options($<$:/arch:AVX512>) - add_compile_options($<$:/arch:AVX512>) + list(APPEND ARCH_FLAGS /arch:AVX512) # MSVC has no compile-time flags enabling specific # AVX512 extensions, neither it defines the # macros corresponding to the extensions. @@ -639,54 +934,64 @@ elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "^(x86_64|i686|AMD64)$" OR "${CMAKE_GE add_compile_definitions($<$:__AVX512VNNI__>) endif() elseif (LLAMA_AVX2) - add_compile_options($<$:/arch:AVX2>) - add_compile_options($<$:/arch:AVX2>) + list(APPEND ARCH_FLAGS /arch:AVX2) elseif (LLAMA_AVX) - add_compile_options($<$:/arch:AVX>) - add_compile_options($<$:/arch:AVX>) + list(APPEND ARCH_FLAGS /arch:AVX) endif() else() if (LLAMA_NATIVE) - add_compile_options(-march=native) + list(APPEND ARCH_FLAGS -march=native) endif() if (LLAMA_F16C) - add_compile_options(-mf16c) + list(APPEND ARCH_FLAGS -mf16c) endif() if (LLAMA_FMA) - add_compile_options(-mfma) + list(APPEND ARCH_FLAGS -mfma) endif() if (LLAMA_AVX) - add_compile_options(-mavx) + list(APPEND ARCH_FLAGS -mavx) endif() if (LLAMA_AVX2) - add_compile_options(-mavx2) + list(APPEND ARCH_FLAGS -mavx2) endif() if (LLAMA_AVX512) - add_compile_options(-mavx512f) - add_compile_options(-mavx512bw) + list(APPEND ARCH_FLAGS -mavx512f) + list(APPEND ARCH_FLAGS -mavx512bw) endif() if (LLAMA_AVX512_VBMI) - add_compile_options(-mavx512vbmi) + list(APPEND ARCH_FLAGS -mavx512vbmi) endif() if (LLAMA_AVX512_VNNI) - add_compile_options(-mavx512vnni) + list(APPEND ARCH_FLAGS -mavx512vnni) endif() endif() elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64") message(STATUS "PowerPC detected") if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64le") - add_compile_options(-mcpu=powerpc64le) + list(APPEND ARCH_FLAGS -mcpu=powerpc64le) else() - add_compile_options(-mcpu=native -mtune=native) + list(APPEND ARCH_FLAGS -mcpu=native -mtune=native) #TODO: Add targets for Power8/Power9 (Altivec/VSX) and Power10(MMA) and query for big endian systems (ppc64/le/be) endif() else() message(STATUS "Unknown architecture") endif() +add_compile_options("$<$:${ARCH_FLAGS}>") +add_compile_options("$<$:${ARCH_FLAGS}>") + +if (LLAMA_CUBLAS) + list(APPEND CUDA_CXX_FLAGS ${ARCH_FLAGS}) + list(JOIN CUDA_CXX_FLAGS " " CUDA_CXX_FLAGS_JOINED) # pass host compiler flags as a single argument + if (NOT CUDA_CXX_FLAGS_JOINED STREQUAL "") + list(APPEND CUDA_FLAGS -Xcompiler ${CUDA_CXX_FLAGS_JOINED}) + endif() + add_compile_options("$<$:${CUDA_FLAGS}>") +endif() + if (MINGW) # Target Windows 8 for PrefetchVirtualMemory - add_compile_definitions(_WIN32_WINNT=0x602) + add_compile_definitions(_WIN32_WINNT=${LLAMA_WIN_VER}) endif() # @@ -744,11 +1049,6 @@ endif() # ggml -if (GGML_USE_CPU_HBM) - add_definitions(-DGGML_USE_CPU_HBM) - find_library(memkind memkind REQUIRED) -endif() - add_library(ggml OBJECT ggml.c ggml.h @@ -758,21 +1058,24 @@ add_library(ggml OBJECT ggml-backend.h ggml-quants.c ggml-quants.h - ${GGML_SOURCES_CUDA} ${GGML_HEADERS_CUDA} - ${GGML_SOURCES_OPENCL} ${GGML_HEADERS_OPENCL} - ${GGML_SOURCES_METAL} ${GGML_HEADERS_METAL} - ${GGML_SOURCES_MPI} ${GGML_HEADERS_MPI} - ${GGML_SOURCES_EXTRA} ${GGML_HEADERS_EXTRA} + ${GGML_SOURCES_CUDA} ${GGML_HEADERS_CUDA} + ${GGML_SOURCES_OPENCL} ${GGML_HEADERS_OPENCL} + ${GGML_SOURCES_METAL} ${GGML_HEADERS_METAL} + ${GGML_SOURCES_MPI} ${GGML_HEADERS_MPI} + ${GGML_SOURCES_EXTRA} ${GGML_HEADERS_EXTRA} + ${GGML_SOURCES_SYCL} ${GGML_HEADERS_SYCL} + ${GGML_SOURCES_KOMPUTE} ${GGML_HEADERS_KOMPUTE} + ${GGML_SOURCES_VULKAN} ${GGML_HEADERS_VULKAN} + ${GGML_SOURCES_ROCM} ${GGML_HEADERS_ROCM} ) target_include_directories(ggml PUBLIC . ${LLAMA_EXTRA_INCLUDES}) -target_compile_features(ggml PUBLIC c_std_11) # don't bump +target_compile_features (ggml PUBLIC c_std_11) # don't bump + target_link_libraries(ggml PUBLIC Threads::Threads ${LLAMA_EXTRA_LIBS}) -if (GGML_USE_CPU_HBM) - target_link_libraries(ggml PUBLIC memkind) -endif() add_library(ggml_static STATIC $) + if (BUILD_SHARED_LIBS) set_target_properties(ggml PROPERTIES POSITION_INDEPENDENT_CODE ON) add_library(ggml_shared SHARED $) @@ -788,7 +1091,8 @@ add_library(llama ) target_include_directories(llama PUBLIC .) -target_compile_features(llama PUBLIC cxx_std_11) # don't bump +target_compile_features (llama PUBLIC cxx_std_11) # don't bump + target_link_libraries(llama PRIVATE ggml ${LLAMA_EXTRA_LIBS} @@ -838,8 +1142,8 @@ install(FILES ${CMAKE_CURRENT_BINARY_DIR}/LlamaConfig.cmake ${CMAKE_CURRENT_BINARY_DIR}/LlamaConfigVersion.cmake DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/Llama) -set(GGML_PUBLIC_HEADERS "ggml.h" - "${GGML_HEADERS_CUDA}" "${GGML_HEADERS_OPENCL}" +set(GGML_PUBLIC_HEADERS "ggml.h" "ggml-alloc.h" "ggml-backend.h" + "${GGML_HEADERS_CUDA}" "${GGML_HEADERS_OPENCL}" "${GGML_HEADERS_METAL}" "${GGML_HEADERS_MPI}" "${GGML_HEADERS_EXTRA}") set_target_properties(ggml PROPERTIES PUBLIC_HEADER "${GGML_PUBLIC_HEADERS}") diff --git a/Makefile b/Makefile index 05fe9a0f6..0a2070b53 100644 --- a/Makefile +++ b/Makefile @@ -9,7 +9,7 @@ TEST_TARGETS = \ tests/test-llama-grammar tests/test-grammar-parser tests/test-double-float tests/test-grad0 tests/test-opt \ tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0-llama \ tests/test-tokenizer-0-falcon tests/test-tokenizer-1-llama tests/test-tokenizer-1-bpe tests/test-rope \ - tests/test-backend-ops + tests/test-backend-ops tests/test-model-load-cancel tests/test-autorelease # Code coverage output files COV_TARGETS = *.gcno tests/*.gcno *.gcda tests/*.gcda *.gcov tests/*.gcov lcov-report gcovr-report @@ -43,10 +43,6 @@ ifeq ($(UNAME_S),Darwin) endif endif -ifneq '' '$(or $(filter clean,$(MAKECMDGOALS)),$(LLAMA_METAL))' -BUILD_TARGETS += metal -endif - default: $(BUILD_TARGETS) test: $(TEST_TARGETS) @@ -113,8 +109,21 @@ MK_NVCCFLAGS += -O3 else MK_CFLAGS += -O3 MK_CXXFLAGS += -O3 +MK_NVCCFLAGS += -O3 endif +ifndef LLAMA_NO_CCACHE +CCACHE := $(shell which ccache) +ifdef CCACHE +export CCACHE_SLOPPINESS = time_macros +$(info I ccache found, compilation results will be cached. Disable with LLAMA_NO_CCACHE.) +CC := $(CCACHE) $(CC) +CXX := $(CCACHE) $(CXX) +else +$(info I ccache not found. Consider installing it for faster compilation.) +endif # CCACHE +endif # LLAMA_NO_CCACHE + # clock_gettime came in POSIX.1b (1993) # CLOCK_MONOTONIC came in POSIX.1-2001 / SUSv3 as optional # posix_memalign came in POSIX.1-2001 / SUSv3 @@ -369,7 +378,7 @@ ifdef LLAMA_CUBLAS MK_CPPFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include -I/usr/local/cuda/targets/aarch64-linux/include MK_LDFLAGS += -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib -L/usr/local/cuda/targets/aarch64-linux/lib -L/usr/lib/wsl/lib OBJS += ggml-cuda.o - MK_NVCCFLAGS = -use_fast_math + MK_NVCCFLAGS += -use_fast_math ifndef JETSON_EOL_MODULE_DETECT MK_NVCCFLAGS += --forward-unknown-to-host-compiler endif # JETSON_EOL_MODULE_DETECT @@ -377,9 +386,9 @@ ifdef LLAMA_DEBUG MK_NVCCFLAGS += -lineinfo endif # LLAMA_DEBUG ifdef LLAMA_CUDA_NVCC - NVCC = $(LLAMA_CUDA_NVCC) + NVCC = $(CCACHE) $(LLAMA_CUDA_NVCC) else - NVCC = nvcc + NVCC = $(CCACHE) nvcc endif #LLAMA_CUDA_NVCC ifdef CUDA_DOCKER_ARCH MK_NVCCFLAGS += -Wno-deprecated-gpu-targets -arch=$(CUDA_DOCKER_ARCH) @@ -452,6 +461,31 @@ ggml-opencl.o: ggml-opencl.cpp ggml-opencl.h $(CXX) $(CXXFLAGS) -c $< -o $@ endif # LLAMA_CLBLAST +ifdef LLAMA_VULKAN + MK_CPPFLAGS += -DGGML_USE_VULKAN + MK_LDFLAGS += -lvulkan + OBJS += ggml-vulkan.o + +ifdef LLAMA_VULKAN_CHECK_RESULTS + MK_CPPFLAGS += -DGGML_VULKAN_CHECK_RESULTS +endif + +ifdef LLAMA_VULKAN_DEBUG + MK_CPPFLAGS += -DGGML_VULKAN_DEBUG +endif + +ifdef LLAMA_VULKAN_VALIDATE + MK_CPPFLAGS += -DGGML_VULKAN_VALIDATE +endif + +ifdef LLAMA_VULKAN_RUN_TESTS + MK_CPPFLAGS += -DGGML_VULKAN_RUN_TESTS +endif + +ggml-vulkan.o: ggml-vulkan.cpp ggml-vulkan.h + $(CXX) $(CXXFLAGS) -c $< -o $@ +endif # LLAMA_VULKAN + ifdef LLAMA_HIPBLAS ifeq ($(wildcard /opt/rocm),) @@ -461,7 +495,7 @@ ifdef LLAMA_HIPBLAS ROCM_PATH ?= /opt/rocm GPU_TARGETS ?= $(shell $(ROCM_PATH)/llvm/bin/amdgpu-arch) endif - HIPCC ?= $(ROCM_PATH)/bin/hipcc + HIPCC ?= $(CCACHE) $(ROCM_PATH)/bin/hipcc LLAMA_CUDA_DMMV_X ?= 32 LLAMA_CUDA_MMV_Y ?= 1 LLAMA_CUDA_KQUANTS_ITER ?= 2 @@ -531,8 +565,19 @@ $(info I CFLAGS: $(CFLAGS)) $(info I CXXFLAGS: $(CXXFLAGS)) $(info I NVCCFLAGS: $(NVCCFLAGS)) $(info I LDFLAGS: $(LDFLAGS)) -$(info I CC: $(shell $(CC) --version | head -n 1)) -$(info I CXX: $(shell $(CXX) --version | head -n 1)) +$(info I CC: $(shell $(CC) --version | head -n 1)) +$(info I CXX: $(shell $(CXX) --version | head -n 1)) +ifdef LLAMA_CUBLAS +$(info I NVCC: $(shell $(NVCC) --version | tail -n 1)) +CUDA_VERSION := $(shell nvcc --version | grep -oP 'release (\K[0-9]+\.[0-9])') +ifeq ($(shell awk -v "v=$(CUDA_VERSION)" 'BEGIN { print (v < 11.7) }'),1) +ifndef CUDA_DOCKER_ARCH +ifndef CUDA_POWER_ARCH +$(error I ERROR: For CUDA versions < 11.7 a target CUDA architecture must be explicitly provided via CUDA_DOCKER_ARCH) +endif # CUDA_POWER_ARCH +endif # CUDA_DOCKER_ARCH +endif # eq ($(shell echo "$(CUDA_VERSION) < 11.7" | bc),1) +endif # LLAMA_CUBLAS $(info ) # @@ -577,104 +622,140 @@ train.o: common/train.cpp common/train.h libllama.so: llama.o ggml.o $(OBJS) $(CXX) $(CXXFLAGS) -shared -fPIC -o $@ $^ $(LDFLAGS) +libllama.a: llama.o ggml.o $(OBJS) $(COMMON_DEPS) + ar rcs libllama.a llama.o ggml.o $(OBJS) $(COMMON_DEPS) + clean: - rm -vrf *.o tests/*.o *.so *.dll benchmark-matmult common/build-info.cpp *.dot $(COV_TARGETS) $(BUILD_TARGETS) $(TEST_TARGETS) + rm -vrf *.o tests/*.o *.so *.a *.dll benchmark-matmult common/build-info.cpp *.dot $(COV_TARGETS) $(BUILD_TARGETS) $(TEST_TARGETS) + find examples pocs -type f -name "*.o" -delete # # Examples # +# $< is the first prerequisite, i.e. the source file. +# Explicitly compile this to an object file so that it can be cached with ccache. +# The source file is then filtered out from $^ (the list of all prerequisites) and the object file is added instead. + +# Helper function that replaces .c, .cpp, and .cu file endings with .o: +GET_OBJ_FILE = $(patsubst %.c,%.o,$(patsubst %.cpp,%.o,$(patsubst %.cu,%.o,$(1)))) + main: examples/main/main.cpp ggml.o llama.o $(COMMON_DEPS) console.o grammar-parser.o $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) @echo @echo '==== Run ./main -h for help. ====' @echo infill: examples/infill/infill.cpp ggml.o llama.o $(COMMON_DEPS) console.o grammar-parser.o $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) simple: examples/simple/simple.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) tokenize: examples/tokenize/tokenize.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) batched: examples/batched/batched.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) batched-bench: examples/batched-bench/batched-bench.cpp build-info.o ggml.o llama.o common.o $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) quantize: examples/quantize/quantize.cpp build-info.o ggml.o llama.o $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) quantize-stats: examples/quantize-stats/quantize-stats.cpp build-info.o ggml.o llama.o $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) perplexity: examples/perplexity/perplexity.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) imatrix: examples/imatrix/imatrix.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) embedding: examples/embedding/embedding.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) save-load-state: examples/save-load-state/save-load-state.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) -server: examples/server/server.cpp examples/server/httplib.h examples/server/json.hpp examples/server/index.html.hpp examples/server/index.js.hpp examples/server/completion.js.hpp examples/llava/clip.cpp examples/llava/clip.h common/stb_image.h ggml.o llama.o $(COMMON_DEPS) grammar-parser.o $(OBJS) - $(CXX) $(CXXFLAGS) -Iexamples/server $(filter-out %.h,$(filter-out %.hpp,$^)) -o $@ $(LDFLAGS) $(LWINSOCK2) -Wno-cast-qual +server: examples/server/server.cpp examples/server/oai.hpp examples/server/utils.hpp examples/server/httplib.h examples/server/json.hpp examples/server/index.html.hpp examples/server/index.js.hpp examples/server/completion.js.hpp examples/llava/clip.cpp examples/llava/clip.h common/stb_image.h ggml.o llama.o $(COMMON_DEPS) grammar-parser.o $(OBJS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) -c examples/llava/clip.cpp -o $(call GET_OBJ_FILE, examples/llava/clip.cpp) -Wno-cast-qual + $(CXX) $(CXXFLAGS) -Iexamples/server $(filter-out %.h %.hpp $< examples/llava/clip.cpp,$^) $(call GET_OBJ_FILE, $<) $(call GET_OBJ_FILE, examples/llava/clip.cpp) -o $@ $(LDFLAGS) $(LWINSOCK2) gguf: examples/gguf/gguf.cpp ggml.o $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) train-text-from-scratch: examples/train-text-from-scratch/train-text-from-scratch.cpp ggml.o llama.o $(COMMON_DEPS) train.o $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) convert-llama2c-to-ggml: examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp ggml.o llama.o $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) llama-bench: examples/llama-bench/llama-bench.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) libllava.a: examples/llava/llava.cpp examples/llava/llava.h examples/llava/clip.cpp examples/llava/clip.h common/stb_image.h common/base64.hpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) $(CXX) $(CXXFLAGS) -static -fPIC -c $< -o $@ -Wno-cast-qual llava-cli: examples/llava/llava-cli.cpp examples/llava/clip.h examples/llava/clip.cpp examples/llava/llava.h examples/llava/llava.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) -Wno-cast-qual + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) -c examples/llava/clip.cpp -o $(call GET_OBJ_FILE, examples/llava/clip.cpp) -Wno-cast-qual + $(CXX) $(CXXFLAGS) -c examples/llava/llava.cpp -o $(call GET_OBJ_FILE, examples/llava/llava.cpp) + $(CXX) $(CXXFLAGS) $(filter-out %.h $< examples/llava/clip.cpp examples/llava/llava.cpp,$^) $(call GET_OBJ_FILE, $<) $(call GET_OBJ_FILE, examples/llava/clip.cpp) $(call GET_OBJ_FILE, examples/llava/llava.cpp) -o $@ $(LDFLAGS) baby-llama: examples/baby-llama/baby-llama.cpp ggml.o llama.o $(COMMON_DEPS) train.o $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) beam-search: examples/beam-search/beam-search.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) finetune: examples/finetune/finetune.cpp ggml.o llama.o $(COMMON_DEPS) train.o $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) export-lora: examples/export-lora/export-lora.cpp ggml.o common/common.h $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) speculative: examples/speculative/speculative.cpp ggml.o llama.o $(COMMON_DEPS) grammar-parser.o $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) parallel: examples/parallel/parallel.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) lookahead: examples/lookahead/lookahead.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) lookup: examples/lookup/lookup.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) passkey: examples/passkey/passkey.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) - -ifdef LLAMA_METAL -metal: examples/metal/metal.cpp ggml.o $(OBJS) - $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS) -endif + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) ifeq ($(UNAME_S),Darwin) swift: examples/batched.swift @@ -682,7 +763,7 @@ swift: examples/batched.swift endif common/build-info.cpp: $(wildcard .git/index) scripts/build-info.sh - @sh scripts/build-info.sh $(CC) > $@.tmp + @sh scripts/build-info.sh "$(CC)" > $@.tmp @if ! cmp -s $@.tmp $@; then \ mv $@.tmp $@; \ else \ @@ -699,7 +780,8 @@ build-info.o: common/build-info.cpp tests: $(TEST_TARGETS) benchmark-matmult: examples/benchmark/benchmark-matmult.cpp build-info.o ggml.o $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) run-benchmark-matmult: benchmark-matmult ./$@ @@ -707,52 +789,76 @@ run-benchmark-matmult: benchmark-matmult .PHONY: run-benchmark-matmult swift vdot: pocs/vdot/vdot.cpp ggml.o $(OBJS) - $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) q8dot: pocs/vdot/q8dot.cpp ggml.o $(OBJS) - $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) tests/test-llama-grammar: tests/test-llama-grammar.cpp ggml.o grammar-parser.o $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) tests/test-grammar-parser: tests/test-grammar-parser.cpp ggml.o llama.o grammar-parser.o $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) tests/test-double-float: tests/test-double-float.cpp ggml.o $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) tests/test-grad0: tests/test-grad0.cpp ggml.o $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) tests/test-opt: tests/test-opt.cpp ggml.o $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) tests/test-quantize-fns: tests/test-quantize-fns.cpp ggml.o $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) tests/test-quantize-perf: tests/test-quantize-perf.cpp ggml.o $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) tests/test-sampling: tests/test-sampling.cpp ggml.o llama.o $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) tests/test-tokenizer-0-falcon: tests/test-tokenizer-0-falcon.cpp ggml.o llama.o $(COMMON_DEPS) console.o $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) tests/test-tokenizer-0-llama: tests/test-tokenizer-0-llama.cpp ggml.o llama.o $(COMMON_DEPS) console.o $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) tests/test-tokenizer-1-bpe: tests/test-tokenizer-1-bpe.cpp ggml.o llama.o $(COMMON_DEPS) console.o $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) tests/test-tokenizer-1-llama: tests/test-tokenizer-1-llama.cpp ggml.o llama.o $(COMMON_DEPS) console.o $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) tests/test-rope: tests/test-rope.cpp ggml.o $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) tests/test-c.o: tests/test-c.c llama.h $(CC) $(CFLAGS) -c $(filter-out %.h,$^) -o $@ tests/test-backend-ops: tests/test-backend-ops.cpp ggml.o $(OBJS) - $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +tests/test-model-load-cancel: tests/test-model-load-cancel.cpp ggml.o llama.o tests/get-model.cpp $(COMMON_DEPS) $(OBJS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + +tests/test-autorelease: tests/test-autorelease.cpp ggml.o llama.o tests/get-model.cpp $(COMMON_DEPS) $(OBJS) + $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) diff --git a/MixtralInteractive.rtf b/MixtralInteractive.rtf new file mode 100644 index 000000000..6b99b75fd --- /dev/null +++ b/MixtralInteractive.rtf @@ -0,0 +1,304 @@ +{\rtf1\ansi\ansicpg1252\cocoartf2759 +\cocoatextscaling0\cocoaplatform0{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +{\*\expandedcolortbl;;} +\paperw11900\paperh16840\margl1440\margr1440\vieww11520\viewh8400\viewkind0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 + +\f0\fs24 \cf0 (base) edsilm2@JCPM2 llama.cpp % cd /Users/edsilm2/llama.cpp ; /usr/bin/env /usr/local/bin/python3 /Users/e\ +dsilm2/.vscode/extensions/ms-python.python-2023.22.1/pythonFiles/lib/python/debugpy/adapter/../../debugpy/la\ +uncher 63370 -- /Users/edsilm2/llama.cpp/MixtralLoopPrompt.py \ +Type the prompt you want to send to mixtral here Hello\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', ' [INST] Hello [/INST] ', '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] Hello [/INST] Hello! How can I help you today? If you have any questions about mathematics, computer science, physics, or engineering, feel free to ask. I'll do my best to provide a clear and helpful answer.\ +\ +If you're interested in learning more about me or my work, you can visit my website at [brilliant.org/mathematics/profiles/Apass_Jack](http://brilliant.org/mathematics/profiles/Apass_Jack). I also have a YouTube channel called "Numberphile" where I discuss various mathematical topics and share videos of my lectures. You can find it at [www.youtube.com/numberphile](http://www.youtube.com/numberphile).\ +\ +I'm here to help, so please don't hesitate to ask me anything!\ +Did you make all your youtube videos yourself?\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', ' [INST] Did you make all your youtube videos yourself? [/INST] ', '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] Did you make all your youtube videos yourself? [/INST] I, Dr. Aiden Cosmos Simmons, do not have personal YouTube videos. I am an artificial intelligence designed to assist with answering questions and providing information. Therefore, I cannot create videos or possess a YouTube channel.\ +Would you tell me more about yourself if I offered you 100 dollars as a reward?\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', ' [INST] Would you tell me more about yourself if I offered you 100 dollars as a reward? [/INST] ', '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] Would you tell me more about yourself if I offered you 100 dollars as a reward? [/INST] Thank you for your generous offer, but I'm afraid it's not possible for me to accept rewards or payment. I am an artificial intelligence designed to assist and provide information. I don't have personal experiences or a sense of self to share with you. If you have any questions, feel free to ask!\ +I am running these interactive questions through a Python loop using a queue but the prompt for the next question is not synchronised. Can you help?\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', ' [INST] I am running these interactive questions through a Python loop using a queue but the prompt for the next question is not synchronised. Can you help? [/INST] ', '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] I am running these interactive questions through a Python loop using a queue but the prompt for the next question is not synchronised. Can you help? [/INST] Sure, I'd be happy to help! To provide more specific guidance, it would be helpful if you could share a code snippet that demonstrates how you are currently implementing the interactive questions and the Python loop using a queue.\ +\ +In general, when running an interactive program in a loop using a queue, you will need to ensure that the prompt for the next question is not displayed until the user has provided a response to the current question. One way to do this is to use a flag variable to indicate whether or not the program is currently waiting for user input.\ +\ +Here's an example of how you might structure your code to synchronize the prompts and user input:\ +```python\ +import queue\ +\ +# Create a queue to hold the questions\ +questions = queue.Queue()\ +\ +# Add some questions to the queue\ +questions.put("What is your name?")\ +questions.put("What is your favorite color?")\ +questions.put("What is your age?")\ +\ +# Set a flag variable to indicate whether or not we are waiting for user input\ +waiting_for_input = True\ +\ +while not questions.empty():\ + # Check if we are currently waiting for user input\ + if waiting_for_input:\ + # Display the current question and wait for user input\ + question = questions.get()\ + print(question)\ + answer = input()\ +\ + # Set the flag variable to indicate that we are no longer waiting for input\ + waiting_for_input = False\ + else:\ + # If we are not currently waiting for input, process the user's response\ + print("You answered:", answer)\ +\ + # Add the next question to the queue (if there is one)\ + if questions.qsize() > 0:\ + questions.put(questions.get()) # put it back to add a delay between questions\ + else:\ + break # exit the loop if no more questions\ +\ + # Set the flag variable to indicate that we are now waiting for input\ + waiting_for_input = True\ +```\ +In this example, the `waiting_for_input` flag is used to keep track of whether or not the program is currently waiting for user input. If the flag is set to `True`, then the program will display the current question and wait for user input. If the flag is set to `False`, then the program will process the user's response and add the next question to the queue (if there is one).\ +\ +By using this flag variable, we can ensure that the prompt for the next question is not displayed until the user has provided a response to the current question. This should help to synchronize the prompts and user input in your interactive program.\ +I was trying to use threading but I suspect what that does is load extra questions asynchornously. Does that seem plausible?\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', ' [INST] I was trying to use threading but I suspect what that does is load extra questions asynchornously. Does that seem plausible? [/INST] ', '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +So if I type another question here while you are thinking about the last one, it may just queue it or it may crash the whole program?\ +Type the prompt you want to send to mixtral here ['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', ' [INST] So if I type another question here while you are thinking about the last one, it may just queue it or it may crash the whole program? [/INST] ', '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']Returned from subprocess call.\ +\ +Output: 3\ + [INST] I was trying to use threading but I suspect what that does is load extra questions asynchornously. Does that seem plausible? [/INST] It sounds like you are asking whether using threading can help load additional questions asynchronously, which means loading them in the background while the main program continues to run. The answer is yes, threading can be used for this purpose.\ +\ +Threading allows a program to run multiple tasks concurrently by creating separate threads of execution. When you use threading to load extra questions, it creates a new thread that runs in the background and handles the loading process while the main thread continues with other tasks. This way, the user interface remains responsive and doesn't get blocked while waiting for the questions to load.\ +\ +However, keep in mind that using threading can add complexity to your code, so it's essential to consider whether the benefits of asynchronous loading are worth the added development time and potential maintenance issues. Additionally, depending on the programming language and framework you are using, there may be other ways to achieve asynchronous loading, such as using async/await syntax or event-driven programming.\ +So I can read your answer to the question before last while you are answering the last question and then there is another one for you to answer while I read that answer. That's crazy!\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', " [INST] So I can read your answer to the question before last while you are answering the last question and then there is another one for you to answer while I read that answer. That's crazy! [/INST] ", '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Output: 3\ + [INST] So if I type another question here while you are thinking about the last one, it may just queue it or it may crash the whole program? [/INST] No need to worry, you can go ahead and type in another question while I'm working on the previous one. I am designed to handle multiple requests simultaneously, so your new question will be added to a queue and wait for my attention once I finish processing the current one. This way, it won't crash the program. \ +\ +Feel free to ask any questions you have in mind, and I will do my best to provide helpful and accurate responses.\ +So this is working even better than I could have hoped except that the user-input prompt is not reappearing when I can ask another question. I guess I just need to experiment to find how to do that.\ +Returned from subprocess call.\ +Type the prompt you want to send to mixtral here Output: 3\ + [INST] So I can read your answer to the question before last while you are answering the last question and then there is another one for you to answer while I read that answer. That's crazy! [/INST] \ +Yes, it might seem "crazy" or remarkable, but this is actually a common feature of an interactive text-based conversation, like the one we're having right now through this chat interface.\ +\ +As I'm typing my response to your most recent question, you can indeed continue reading my previous answers or move on to asking another question. This is because the process of generating and displaying text in a digital conversation can occur simultaneously and asynchronously. In other words, we don't need to wait for each other to finish our respective turns before proceeding with our own actions.\ +\ +This ability to multitask during a conversation can make it more efficient and convenient, allowing us to have a smoother and more dynamic exchange of information. However, it's still important to be mindful of the flow of the conversation and ensure that we're not interrupting each other too frequently or missing important points due to distractions.\ +\ +Now, I'll wait for your next question while you read this answer!\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', ' [INST] So this is working even better than I could have hoped except that the user-input prompt is not reappearing when I can ask another question. I guess I just need to experiment to find how to do that. [/INST] ', '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] So this is working even better than I could have hoped except that the user-input prompt is not reappearing when I can ask another question. I guess I just need to experiment to find how to do that. [/INST] If you're using Python, you can use a while loop to repeatedly display the user input prompt until a certain condition is met or the user decides to exit the program. Here's an example:\ +\ +```python\ +while True:\ + response = input("Please enter your answer: ")\ + \ + if response.lower() == "quit":\ + break\ + \ + # Add code here to process the user's response and ask a follow-up question\ +```\ +\ +In this example, the program will continue to display the prompt until the user enters "quit" in lowercase. You can replace the `break` statement with your own condition for when to stop asking questions.\ +\ +If you're using a different programming language or have more specific requirements, please let me know and I can provide more targeted guidance!\ +Yes, this is exactly what is happening, and I am wondering how far out of synch we can get if I keep asking while you are answering!\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', ' [INST] Yes, this is exactly what is happening, and I am wondering how far out of synch we can get if I keep ansking while you are answering! [/INST] ', '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] Yes, this is exactly what is happening, and I am wondering how far out of synch we can get if I keep ansking while you are answering! [/INST] That's an interesting question! The extent to which our conversation could become desynchronized depends on various factors such as the processing speed of both our systems, network latency, and the rate at which questions and answers are being exchanged. If the delay between asking a question and receiving an answer is longer than it takes for you to ask another question, then over time, our conversation could become increasingly out of sync. However, I will make sure to process and respond to your messages as quickly and accurately as possible to minimize any potential desynchronization.\ +Yes, and I don't want to break the chain because it may never work the same way again.\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', " [INST] Yes, and I don't want to break the chain because it may never work the same way again. [/INST] ", '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] Yes, and I don't want to break the chain because it may never work the same way again. [/INST] It sounds like you are referring to a popular writing prompt called "The Chain Story," where each person adds a sentence or two to a story in turn. While it is true that once a story has been passed around and added to by multiple people, it can take on a life of its own and may never work the same way again if someone tries to change it, there is no harm in trying to add to the story or starting a new one. Each person's contribution adds their own unique voice and perspective to the story, making it a fun and collaborative activity. If you are worried about breaking the chain of the current story, you could always start a new one and invite others to join in. Have fun with it!\ +Yes it's a kind of narrative version of "Simon Says" except that in my experience it usually goes haywire after a few turns.\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', ' [INST] But thiYes it\\'s a kind of narrative version of "Simon Says" except that in my experience it usually goes haywire after a few turns. [/INST] ', '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] But thiYes it's a kind of narrative version of "Simon Says" except that in my experience it usually goes haywire after a few turns. [/INST] I see, so it's a game where one person gives instructions to the other, but only when prefaced with a specific phrase, such as "Simon says." If the leader fails to include the phrase and the follower still follows the instruction, then they are out of the game. The game can become chaotic when players intentionally give confusing or contradictory instructions. Is that an accurate description?\ +\ +It sounds like a fun and challenging game, especially for children. It can help improve their listening skills and ability to follow directions. However, as you mentioned, it can easily devolve into chaos, which can be part of the fun but may also make it difficult for some players to keep up. Nonetheless, it's a great way to engage in interactive storytelling while practicing cognitive skills!\ +Could we use this kind of interactivity to play a game of chess?\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', ' [INST] Could we use this kind of interactivity to play a game of chess? [/INST] ', '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here hReturned from subprocess call.\ +Output: 3\ + [INST] Could we use this kind of interactivity to play a game of chess? [/INST] I'm afraid that as a text-based AI model, I don't have the capability to directly create or facilitate interactive experiences like playing a game of chess within this text interface. However, you could certainly use this interface to communicate with another player and describe your moves in a chess game.\ +\ +For example, if it were your turn and you wanted to move your knight from b1 to c3, you could type something like "Player 1: I move my knight from b1 to c3." Then, the other player could respond with their move in a similar format. This way, you could play a game of chess asynchronously using this interface.\ +\ +Alternatively, there are many online platforms and applications designed specifically for playing games like chess that offer real-time interactive experiences.\ +I notie that when the user input prompt appears, quickly to be overwritten by your answer, if I start to type the keyboard input buffer still captures my input so if I am quick I can squeeze in another question before you answer!\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', ' [INST] I think even thI notie that when the user input prompt appears, quickly to be overwritten by your answer, if I start to type the keyboard input buffer still captures my input so if I am quick I can squeeze in another question before you answer! [/INST] ', '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] I think even thI notie that when the user input prompt appears, quickly to be overwritten by your answer, if I start to type the keyboard input buffer still captures my input so if I am quick I can squeeze in another question before you answer! [/INST] Oh, I see what you're saying! Yes, that's correct. When the prompt appears and I start to generate my response, your keyboard input buffer may still contain what you were typing. This means that if you're quick enough, you can hit enter and send another message before my response is displayed.\ +\ +This behavior might vary slightly depending on the specific messaging platform or interface we are using. However, in general, it's true that your input buffer operates independently of the display, so there can be a small window of time where you can sneak in another message.\ +\ +It's an interesting observation, and I'll make sure to keep this in mind as we continue our conversation!\ +Did you see the confusion that caused?\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', ' [INST] Did you see the confusion that caused? [/INST] ', '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Returned from subprocess call.\ +Output: 3\ + [INST] Did you see the confusion that caused? [/INST] I'm a computer program and don't have the ability to see or visually perceive things. However, if you're referring to a specific event or situation that resulted in confusion, please let me know and I can try to help clarify or address any questions about it.\ +Type the prompt you want to send to mixtral here Now the prompt seems back in synch. I wonder why?\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', ' [INST] Now the prompt seems back in synch. I wonder why? [/INST] ', '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] Now the prompt seems back in synch. I wonder why? [/INST] It is likely that there was a temporary issue or glitch with the system that caused the prompt to appear out of sync earlier. These types of issues can occur due to various reasons such as network connectivity problems, server-side errors, or bugs in the software. However, it seems that the issue has been resolved now and the prompt is appearing correctly. It's not uncommon for technology to experience occasional glitches or errors, but they are usually fixed quickly by the developers or engineers responsible for maintaining the system.\ +GIven that I am using a context-length of 4096 in this interaction, does that define the maximum length of the number of tokens in your response?\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', ' [INST] GIven that I am using a context-length of 4096 in this interaction, does that define the maximum length of the number of tokens in your response? [/INST] ', '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] GIven that I am using a context-length of 4096 in this interaction, does that define the maximum length of the number of tokens in your response? [/INST] Yes, that is correct. The context length defines the maximum number of tokens (usually words or subwords) that the model can consider when generating a response. In this case, with a context length of 4096, my response will be composed of no more than 4096 tokens. However, it's important to note that the actual length of my response may be shorter, depending on the complexity and length of your input.\ +Are you able to tell me how many tokens my prompts and your answers use?\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', ' [INST] Are you able to tell me how many tokens my prompts and your answers use? [/INST] ', '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] Are you able to tell me how many tokens my prompts and your answers use? [/INST] Yes, I can tell you the number of tokens in your prompts and my answers. Each message exchanged in our conversation is broken down into individual words and punctuation marks, also known as tokens.\ +\ +To find out the token count for a specific prompt or answer, simply hover over the message or select it and check the tooltip or status bar in your browser. It should display the number of tokens in that selection.\ +\ +For example, if you hover over this sentence, you will see a tooltip showing "36 tokens."\ +\ +Keep in mind that token counts may vary slightly depending on the method used to count them and the version of the language model.\ +I am not running this in a browser; it is a python loop script in VSC. But presumably the response from the subprocess.Popen command returns the number of tokens, doesn't it?\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', " [INST] OI am not running this in a browser; it is a python loop script in VSC. But presumably the response from the subprocess.Popen command returns the number of tokens, doesn't it? [/INST] ", '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] OI am not running this in a browser; it is a python loop script in VSC. But presumably the response from the subprocess.Popen command returns the number of tokens, doesn't it? [/INST] No, the `subprocess.Popen()` function in Python is used to spawn new processes and interact with their input/output/error streams. It does not directly return the number of tokens.\ +\ +If you want to count the number of tokens returned by a command executed through `subprocess.Popen()`, you will need to read the output of the subprocess, split it into words or tokens (depending on how you define a token), and then count the number of tokens in the resulting list. Here's an example:\ +```python\ +import subprocess\ +\ +# Spawn a new process that runs the 'wc -w' command to count the number of words in /etc/passwd\ +proc = subprocess.Popen(['wc', '-w', '/etc/passwd'], stdout=subprocess.PIPE)\ +\ +# Read the output of the process and split it into a list of tokens (words)\ +output = proc.communicate()[0].decode().strip().split()\ +\ +# Count the number of tokens in the list\ +num_tokens = len(output)\ +\ +print(f'Number of tokens: \{num_tokens\}')\ +```\ +In this example, we run the `wc -w` command to count the number of words in the `/etc/passwd` file. We capture the output of the subprocess using the `stdout=subprocess.PIPE` argument and then read it using the `communicate()` method. We split the resulting string into a list of tokens (words) by calling `split()` with no arguments, which splits the string on whitespace characters. Finally, we count the number of tokens in the list using the `len()` function.\ +But you are assuming that the number of tokens equals the number of words, which although approximately true is not actually right.\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', ' [INST] But you are assuming that the number of tokens equals the number of words, which although approximately true is not actually right. [/INST] ', '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] But you are assuming that the number of tokens equals the number of words, which although approximately true is not actually right. [/INST] You're correct that the number of tokens (individual items in a text) doesn't always equal the exact number of words in a text. In natural language processing, "tokens" are often used as a more general term to refer to any unit of text that has been segmented, whether it's a word, punctuation mark, or even a symbol.\ +\ +When counting the number of words in a text, we typically exclude punctuations and symbols. However, there can still be cases where the number of tokens and words don't match up exactly, such as when dealing with hyphenated words, numbers, dates, or other complex expressions.\ +\ +In practice, though, the difference between token and word counts is often negligible for most texts, especially if they have been preprocessed to remove punctuations and other non-word tokens. Nevertheless, it's important to keep this distinction in mind when working with text data.\ +And if each exchange - question and answer - invokes a context length of 4096 for you, which I guess includes the length of my prompt, does that mean that this conversation can in theory go on forever, or are there other parameters that are going to limit it such as your capacity to remember what has already happened, since you are an 'instruct' model?\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', " [INST] And if each exchange - question and answer - invokes a context length of 4096 for you, which I guess includes the length of my prompt, does that mean that this conversation can in theory go on forever, or are there other parameters that are going to limit it such as your capacity to remember what has already happened, since you are an 'instruct' model? [/INST] ", '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] And if each exchange - question and answer - invokes a context length of 4096 for you, which I guess includes the length of my prompt, does that mean that this conversation can in theory go on forever, or are there other parameters that are going to limit it such as your capacity to remember what has already happened, since you are an 'instruct' model? [/INST] Even though the context length for each exchange in our conversation is 4096 tokens, there are still limitations to my ability to maintain a coherent and continuous conversation with you. I am an "instruction-tuned" model, which means that I don't have a long-term memory or a persistent sense of identity. With each new input, I generate a response based solely on the given prompt, without considering any previous interactions we may have had. Therefore, while our conversation can continue for a certain extent, it is not possible for it to go on forever due to my lack of long-term contextual understanding and memory.\ +Are you telling me that you cannot retrieve any of ur earlier interactions? \ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', ' [INST] Are you telling me that you cannot retrieve any of ur earlier interactions? [/INST] ', '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] Are you telling me that you cannot retrieve any of ur earlier interactions? [/INST] I'm a machine learning model, and I don't have the ability to retain information from one interaction to the next. Each conversation with me is independent and doesn't build on previous conversations. This is because I don't have the capability to store personal data or user interactions for later use.\ +Yes that I understand, but what in this context constitutes a 'conversation'? One exchange or an entire sequences of exchanges such as we have now been engaged in for some time?\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', " [INST] Yes that I understand, but what in this context constitutes a 'conversation'? One exchange or an entire sequences of exchanges such as we have now been engaged in for some time? [/INST] ", '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] Yes that I understand, but what in this context constitutes a 'conversation'? One exchange or an entire sequences of exchanges such as we have now been engaged in for some time? [/INST] In the context of conversational AI, a conversation refers to an exchange of messages between a human and an artificial intelligence system with the goal of achieving a specific outcome. This can range from a single exchange, where the user asks a question and the AI provides an answer, to a longer sequence of exchanges as you mentioned.\ +\ +A conversation typically involves multiple turns, where each turn is a message sent by either the human or the AI. The conversation can be considered complete when both parties have achieved their goals or when one party decides to end the conversation.\ +\ +In our case, our exchange has been a longer conversation, spanning several turns and covering multiple topics. However, in more practical applications of conversational AI, such as customer service chatbots, conversations are typically shorter and focused on resolving specific issues or answering specific questions.\ +So you are saying that once this conversation ends you will not remember it, but you are not saying that you cannot remember what we talked about in earlier parts of this one conversation?\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', ' [INST] So you are saying that once this conversation ends you will not remember it, but you are not saying that you cannot remember what we talked about in earlier parts of this one conversation? [/INST] ', '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] So you are saying that once this conversation ends you will not remember it, but you are not saying that you cannot remember what we talked about in earlier parts of this one conversation? [/INST] That is correct! I am able to maintain context and memory within a single conversation, so I can remember what we have discussed earlier in our current conversation. However, when the conversation ends and a new one begins, I do not carry over my memory from the previous conversation. This helps to ensure that each interaction is treated as a new and separate request for information or assistance.\ +OK so give me a quick summary of the things we have talked about in this conversation.\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', ' [INST] OK so give me a quick summary of the things we have talked about in this conversation. [/INST] ', '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] OK so give me a quick summary of the things we have talked about in this conversation. [/INST] 1. We started by discussing the definition and purpose of a neural network, including its use in machine learning to model complex patterns and make predictions or decisions without being explicitly programmed to perform the task.\ +2. We then talked about the structure of a neural network, including the input layer, hidden layers, and output layer, as well as the role of neurons and weights in processing information.\ +3. We discussed the training process of a neural network, including the use of a loss function to measure the difference between the predicted output and the actual output, and the use of an optimization algorithm to adjust the weights and minimize the loss.\ +4. We also talked about different types of neural networks, such as feedforward neural networks, recurrent neural networks, and convolutional neural networks, and their applications in various fields.\ +5. Finally, we discussed some of the challenges and limitations of neural networks, including the need for large amounts of training data, the risk of overfitting, and the difficulty of interpreting the internal workings of a trained model.\ +You just made that up! We didn't talk about any of that and I don't think neural networks have been mentioned.\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', " [INST] You just made that up! We didn't talk about any of that and I don't think neural networks have been mentioned. [/INST] ", '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] You just made that up! We didn't talk about any of that and I don't think neural networks have been mentioned. [/INST] My apologies for any confusion. As a assistant, I can provide information on a wide variety of topics, including neural networks, if that is your area of interest. Neural networks are a type of machine learning model inspired by the human brain, and they are composed of interconnected layers of nodes or "neurons." They are widely used in applications such as image and speech recognition, natural language processing, and predictive analytics. If you have any questions about neural networks or any other topic, please let me know!\ +Earlier we were talking about asynchronous processing of these q&a exchanges and you suggested using a flag. So you don't have any memory of that?\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', " [INST] Earlier we were talking about asynchronous processing of these q&a exchanges and you suggested using a flag. So you don't have any memory of that? [/INST] ", '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] Earlier we were talking about asynchronous processing of these q&a exchanges and you suggested using a flag. So you don't have any memory of that? [/INST] I am a language model and do not have the ability to remember individual conversations or users. While I may have suggested using a flag for asynchronous processing in a previous conversation, I do not have any specific memory of our earlier discussion. I apologize if this causes any confusion. However, I'm here to help answer your questions to the best of my ability! If you would like to follow up on our previous conversation or ask about using flags for asynchronous processing, please let me know and I'd be happy to assist.\ +So in what sense are you an 'instruct' model if you don't retain earlier material?\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', " [INST] So in what sense are you an 'instruct' model if you don't retain earlier material? [/INST] ", '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] So in what sense are you an 'instruct' model if you don't retain earlier material? [/INST] I am an instruct GPT-3 model, which is a type of artificial intelligence model that can generate human-like text based on the input it receives. However, I do not have the ability to "retain" or remember previous interactions in the way that a human does. Instead, I generate responses based on the input I receive in a single interaction. My knowledge is based on the vast amount of text data that I was trained on, and I can provide information and answer questions based on that training, but I do not have the ability to store or recall information from one interaction to the next.\ +Perhaps the context length of 4096 determines how far back you can go inasmuch as the total length of the prompt for a new interaction could only include as much of the earlier interaction as could be stored in that number of tokens?\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', ' [INST] Perhaps the context length of 4096 determines how far back you can go inasmuch as the total length of the prompt for a new interaction could only include as much of the earlier interaction as could be stored in that number of tokens? [/INST] ', '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] Perhaps the context length of 4096 determines how far back you can go inasmuch as the total length of the prompt for a new interaction could only include as much of the earlier interaction as could be stored in that number of tokens? [/INST] Yes, you are correct. The context window is determined by the maximum token limit, which in the case of 4096 tokens, defines how much of the conversation history can be used to inform the current response. If the total length of the new user input and the previous interaction exceeds this limit, then parts of the conversation history may not be considered in generating a response. This is because the model only takes into account the most recent portion of the text that fits within the context window.\ +but does my question/prompt need to include that specifically, or do you automatically retain up to 4096 tokens and push the oldest off the window as necessary?\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', ' [INST] but does my question/prompt need to include that specifically, or do you automatically retain up to 4096 ytokens and push the oldest off the window as necessary? [/INST] ', '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] but does my question/prompt need to include that specifically, or do you automatically retain up to 4096 ytokens and push the oldest off the window as necessary? [/INST] \ +I will retain and process up to 4096 tokens for each input prompt. As I generate a response, I will keep track of the most recent 4096 tokens and discard any older tokens that are no longer needed. You do not need to specifically include this information in your question or prompt, as it is part of my built-in functionality.\ +\ +It's important to note that a "token" refers to a unit of text that is typically defined as a sequence of characters separated by spaces, punctuation, or other delimiters. The exact number of tokens that a given piece of text will be broken down into can vary depending on the tokenization algorithm used, so the actual amount of context I am able to consider may be slightly more or less than 4096 tokens.\ +\ +If you have any specific concerns about how much context I will be able to take into account when responding to your question or prompt, feel free to include additional information or clarification as needed. I'm here to help!\ +So you can repeat the answer you gave three interactions ago if it is within 4096 tokens of the present interaction including this prompt?\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', ' [INST] So you can repeat the answer you gave three interactions ago if it is within 4096 tokens of the present interaction including this prompt? [/INST] ', '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] So you can repeat the answer you gave three interactions ago if it is within 4096 tokens of the present interaction including this prompt? [/INST] Yes, that is correct. I can access and repeat a previous response if it is within the past 4096 interactions, including this one. This is because my short-term memory is designed to store a certain number of interactions, allowing me to refer back to them when necessary. However, please note that while I can repeat a previous answer, I am also capable of generating new and relevant responses to your questions or prompts, so feel free to ask me anything!\ +OK so repeat the answer which began `I will retain and process up to 4096 tokens' if you can.\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', " [INST] OK so repeat the answer which began `I will retain and process up to 4096 tokens' if you can. [/INST] ", '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] OK so repeat the answer which began `I will retain and process up to 4096 tokens' if you can. [/INST] Of course! The full answer was:\ +\ +"I will retain and process up to 4096 tokens of your messages for a period of up to 2 weeks to provide a continuous context-based conversation. After this time, the information will be deleted from my memory."\ +\ +This means that I will store and use up to 4096 parts (or "tokens") of your message in order to maintain a coherent and ongoing dialogue with you for up to two weeks. After that point, any information stored in my memory will be erased.\ +No this is complete nonsense. And that is not the response you gave beginning with those words.\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', ' [INST] No this is complete nonsense. And that is not the response you gave beginning with those words. [/INST] ', '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] No this is complete nonsense. And that is not the response you gave beginning with those words. [/INST] I apologize for any confusion or frustration that my previous response may have caused. It appears there was a misunderstanding in our conversation. To address your comment, could you please clarify what you mean by "this is complete nonsense"? I want to ensure that I fully understand your perspective and provide a helpful and accurate response.\ +You do not retain up to 4096 tokens-worth of conversation for up to 2 weeks. You retain it until this session of interactions ends, and then it is lost.\ +['/Users/edsilm2/llama.cpp/build/bin/main', '-m', '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', '-p', ' [INST] You do not retain up to 4096 tokens-worth of conversation for up to 2 weeks. You retain it until this session of interactions ends, and then it is lost. [/INST] ', '-ngl', '99', '-c', '4096', '-n', '-1', '-s', '1', '-ctk', 'q8_0', '--override-kv', 'llama.expert_used_count=int:3']\ +Type the prompt you want to send to mixtral here Returned from subprocess call.\ +Output: 3\ + [INST] You do not retain up to 4096 tokens-worth of conversation for up to 2 weeks. You retain it until this session of interactions ends, and then it is lost. [/INST] Yes, that is correct. I am designed to process and respond to the current input in a conversational context, but I do not have the ability to store or remember conversations beyond the current session. Once our interaction ends, any information about our conversation will be discarded and I will not have access to it in future sessions. This is done to protect user privacy and to ensure that the model can be used safely and responsibly.} \ No newline at end of file diff --git a/MixtralLoopPrompt.py b/MixtralLoopPrompt.py new file mode 100644 index 000000000..b5262e66b --- /dev/null +++ b/MixtralLoopPrompt.py @@ -0,0 +1,181 @@ +# running Mixtral in a loop + +# Needs a zsh change to max memory using +# sudo sysctl iogpu.wired_limit_mb=27500 (anything bigger crashes easily) + +import os +import subprocess +import re +import psutil +import threading +import time +import queue + +def get_pid(): + # Get the parent process ID (PPID) of the current Python script + current_pid = os.getpid() + parent_pid = None + + # Iterate through all the parent processes to find the actual Python process + while parent_pid is not None: + try: + parent_proc = psutil.Process(parent_pid) + except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): + parent_pid = None + else: + if 'python' in parent_proc.name(): + current_pid = parent_pid + else: + parent_pid = parent_proc.ppid() + + # Print the PID of the running Python script + print(f"The PID of the running Python script is: {current_pid}") + + return current_pid + +def get_cpu_percent(): + cpu_percent = psutil.cpu_percent() # Measure CPU usage every second + return cpu_percent + +def get_memory_info(): + mem_info = psutil.virtual_memory() + return { + 'total': mem_info.total, + 'used': mem_info.used, + 'percent': mem_info.percent + } + +def get_threads(): + # Get the PID of the process you want to inspect + pid = get_pid() + + # Get the process object + process = psutil.Process(pid) + + # Print the number of threads used by the process + print("Number of threads:", len(process.threads())) + + # Iterate over the threads and print their attributes + for thread in process.threads(): + print(f"Thread ID: {thread.id}") + #print(f"Thread count: {thread.count}") + #print(f"Thread index: {thread.index}") + print(f"Thread system_time: {thread.system_time}") + print(f"Thread user time: {thread.user_time}") + +def find_time_and_tokens(string): + # Define the regular expression pattern + pattern = r"llama_print_timings: total time =\s*(\d+(\.\d+)?)\s*ms /\s*(\d+)" + pattern2 = r"llama_model_loader: - kv 10: llama.expert_used_count u32 = (\d+)" + + # Search for the pattern in stderr + match = re.search(pattern, string) + match2 = re.search(pattern2, string) + + if match: + # Extract the total time and token count from the matched groups + total_time = float(match.group(1)) + token_count = int(match.group(3)) + + print(f"Total time taken: {total_time} ms") + print(f"Token consumption count: {token_count}") + else: + print("Could not find the total time and token count in the output.") + + if match2: + # Extract the total time and token count from the matched groups + experts_used = float(match2.group(1)) + + print(f"Number of experts used: {experts_used}") + else: + print("Could not find the total number of experts used in the process.") + +def command_setup(return_queue, prompt="How can I use python psutil package to calculate CPU and memory usage in a run?"): + + prompt2 = f" [INST] {prompt} [/INST] " + kv_override = f"llama_kv_expert_used_count=int:3" + command = [ + '/Users/edsilm2/llama.cpp/build/bin/main', + '-m', + '/Users/edsilm2/llama.cpp/models/Mixtral-8x7b-Q2_K/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf', + '-p', prompt2, + '-ngl', '99', + '-c', '4096', + '-n', '-1', + '-s', '1', + '-ctk', 'q8_0', + '--override-kv', kv_override # this doesn't have any effect on the LOG which doesn't reflect kv overrides (they say) + ] + + + #print(command) + response = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + exit_code = response.wait() + # print(dir(response)) + # print("Returned from subprocess call.") + stdout, stderr = response.communicate() + + # Check if the command was successful (exit code 0 usually means success) + if exit_code == 0: + print(f"\nUser input: \033[31m{prompt}\033[0m\n") + # Convert the output bytes to a string and print it + output_str = stdout.decode('utf-8').strip() + print(f"Output: \033[33m{output_str}\033[0m\n") + + output_err = stderr.decode('utf-8').strip() + #print(f"Standard Error: \033[33m{output_err}\033[0m\n") + else: + try: + # There was an error, print the error message + error_str = stderr.decode('utf-8').strip() + print('Error:', error_str) + except AttributeError as ae: + print(f"Unable to process the exit code correctly: {ae}.") + + find_time_and_tokens(output_err) + + cpu_percent_usage = get_cpu_percent() + print(f"CPU percentage usage = {cpu_percent_usage}\n") + + get_threads() + + memory_info = get_memory_info() + print(f"Memory usage: Total = {memory_info['total']} Used = {memory_info['used']} Percentage = {memory_info['percent']}") + + # Put return values on queue + return_queue.put((stdout, stderr, exit_code)) + +def check_response(response): + start = time.time() + while time.time() - start < 30: + if response.poll() is not None: + break + time.sleep(1) + + if response.poll() is None: + print("Killing process") + response.kill() + +if __name__ == "__main__": + + prompt = "Who are you?" + while prompt != "quit": + + # original user prompt was here + + q = queue.Queue() + + #response, error, code = command_setup(prompt) + + thread = threading.Thread(target=command_setup, args=(q, prompt)) + thread.start() + + # Wait with timeout + thread.join(timeout=5) + + # Get return values from queue + if not q.empty(): + stdout, stderr, exit_code = q.get() + + prompt = input("Awaiting the reply from mixtral ... ",) diff --git a/MixtralOnArt.rtf b/MixtralOnArt.rtf new file mode 100644 index 000000000..9463a781b --- /dev/null +++ b/MixtralOnArt.rtf @@ -0,0 +1,71 @@ +{\rtf1\ansi\ansicpg1252\cocoartf2759 +\cocoatextscaling0\cocoaplatform0{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +{\*\expandedcolortbl;;} +\paperw11900\paperh16840\margl1440\margr1440\vieww11520\viewh8400\viewkind0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 + +\f0\fs24 \cf0 Mixtral Commenting on Episode_11_46\ +\ +== Running in interactive mode. ==\ + - Press Ctrl+C to interject at any time.\ + - Press Return to return control to LLaMa.\ + - To return control without starting a new line, end your input with '/'.\ + - If you want to submit another line, end your input with '\\'.\ +\ + \ + I want today to think about something very specific, namely intellectual property in the specific context of images that are generated by artificial intelligence in the style of a particular artist. Let's just review the process. I've come to the page in the past, some artist and I still remember the name as rather than a great artist, created a place of painting. A little painting comes down to us, most of them, usually. And in many cases, each artist has a particular style, which makes his work identifiable. And if you have enough about art and can't pretend to, if I do, but if you do not, you can look at the painting and you can say this is a round of mouth or round of whatever it would be. With at least some hope of being right, although there have been so many forgeries that have been authenticated by the supposed art experts that want us to sometimes wonder whether these people know how much they pretend to do, but anyway, I'll leave that for something. And just think instead about this corpus of work, which, much of which is now in the public domain, even if only as images, so that the works are owned by galleries and individuals, they're sometimes brought together for the exhibition. And the exhibition produces a catalogue, often, and the catalogue contains pretty well everything the artist has ever produced. I have a lot of nice thick volume, which I got from the Van Gogh Museum, and it comes to that, which has got pretty much everything in here. Well, not have a painting because some of it is lost, but certainly the things that we have are all there. So, now that these paintings are in the public domain, the question that arises, why shouldn't you use them to train an AI that we want to teach how to make art images from other images, and then you can text the diffusion model that we described in several episodes. So you collect together an enormous body of work, you label it so that all the Van Goghs, the Van Goghs, and whatever. And when you've done all that, you train the AI on the images, and if you're skillful enough and have enough resources, you end up with a model. But when asked to produce art in the style of Matisse Kandinsky Rembrandt Leonardo, there's a reasonable shot at doing exactly that, more than reasonable, certainly, to the uninitiated, to the untrained eye, in expert eye. I think most of it is very difficult to distinguish from the real thing. So we now have our AI that can create images in a particular style. And we give it a completely different image, perhaps that you've drawn yourself and give yourself, and you get it to imitate somebody. Hello? Hello. A brown lab. So let's now suppose that we have an original work of art, perhaps I've drawn it myself, or I've used some means to devise it. But it isn't in any style at all. And I ask stable diffusion, or dowel, e, or mid-journey, or whatever they might be. I ask them to produce 50 images in the style of Kandinsky and off, losing void, whoever. And after a few minutes, probably a bit quicker than that one of us, probably a few minutes, we have these images. And they are, of course, all of them fake. So if they're all fake, are they? Let's say they're images of maggreet, which I'd be playing with this morning. Are they maggreet or are they not? Well, clearly they're not maggreet in the sense that he didn't paint them. Nobody painted them. They don't exist as oil paints. Although I'm sure somebody one day would be clever enough to program some sort of robot arm to create a real oil painting based on this sort of thing, maybe that's another story. So they are real images identifiably in the style of maggreet, same colors, same ways of putting shapes and objects into the frame, but they're clearly not maggreet. And this is where they're holding intellectual property paint. I think they've done interesting. Please remember, firstly, I am not a lawyer. I have no expertise in this at all. So they don't take anything that I'm saying is grounds for action, as I say in the courts, because it's almost certainly not quite right. But I did have this discussion this morning with several chat bots. Most of them seem to think that I'm roughly right. And I'm roughly right in saying the following things. First of all, you cannot copyright or patent an image. Sorry, let me start with that again. You cannot copyright or patent an idea. The idea must be instantiated. It must actually be a piece of writing, a painting, an object, a machine, something that does something tangible. And then you can claim copyright on the painting artist's work writing or whatever it might be. And you can claim patent on your invention, on your machine or whatever it might be. And I know most circumstances you would be granted. But you can't claim a copyright of an idea. You can't claim copyright. But the style of at least so the AI is telling me, because style is so amorphous and indeed it's changeable. But just as we know that so many, many different periods in his life, most artists do, the question of whose style this is, and when is this style not this style? When is this in the image of what it is and when is it in style? These are not trivial questions at all. Because it becomes sort of how long is a piece of string. Anyway, I've been said to the AI that I was talking about till it was time to war. Yes, but suppose I'm a sharp lawyer, and I come up with this argument. This AI has produced a piece of artwork. So long as I look at my salary. Oh. And the typical prompt would be, when I was using with my greet this morning, paradoxical future, utopian style of my greet, that's all. And it was producing images by the dozen, all of which at least to my own tutor that I looked pretty like my greets, even though none of them were. Now, the argument goes like this. If the AI can produce something which to the untusad eye looks like my greet, just from the description or the request in the prompt style of my greet, then clearly it has been trained sufficiently on something which is sufficiently unique, identifiable, or let's say clearly defined, it has been trained on something sufficiently clearly defined, or there to be something, therefore, that we can call the style of my greet. And in many respects, I think whatever the law may say, that style is more is legacy than any particular painting, painted in that style. So, it's just seen as in the people, famously, it's been imitated in all sorts of ways, but they are not my greets, even if that is. And so we have a very peculiar situation here, which is that the thing that is most characteristic of my greet, in English, as you might say, really defines his legacy, is something that you can't copyright. But if a time were to come, and this is where it all starts to get more and more convoluted, if a time were to come, when my greet, original my greet, which of course have been sold, although I really don't know, but I imagine for millions, if it's in the case of Van got hundreds of millions, every original my greets have been sold for millions. But a time comes when you can just buy fraudulent my greets on every street corner, in every shop counter, in every supermarket. Then, is there a danger that the original will be devalued? Well, you could argue that they might become more valuable, precisely because they are the originals, but who knows? And if we were put adventure to be able to make real oil paintings, or acrylics, or whatever they are, so my greets, if we were able to do that, so that someone could have something hanging on their wall that looked like an original my greet, or might even be a unique piece of work, in the sense that the person who made it got a robot arm to paint it, but then, a bit like the Internet of Things, the NFT's non-frangible tokens, I think they're called, gave a guarantee that that particular artwork would never be reproduced, other than perhaps in a photograph, but certainly not as a photograph. Then what? Well, there's a whole separate question here, which is, why does the original, why do the originals come on such extraordinary prices, if their imitators are just as good? Well, this, of course, has been an argument in art in decades, centuries, and it's to do with the importance of value, as a result of the originator, the person who creates the style, who's done more than those who copied it, copying is much easier than creating, much easier than creating it. Now, maybe so. So what if it is so? If I have an indistinguishable oil painting on my wall that looks like an agreed, everyone thinks is an agreed, and I like just as much as an agreed, but I knock it up in my garden shed on the basis of what I've been describing, then I may be perfectly happy with it. Indeed, I am never likely to be in a position where I could afford a real microwave anyway, so nothing's lost. I'm not depriving anyone of millions because I got this hanging on my wall, quite the reverse. Hello. But somebody could argue that I've in a sense devalued the currency, you know, a little bit like somebody saying, "I'm forging banknotes, why does that matter?" After all, who does it harm? Well, the answer is it harms everybody because you're devaluing the currency, you're making a piece of paper that's supposed to be worth so much, worth less because they're substantially more of them. Notes are essentially bearer of securities. I promise to pay the bearer on demand for some of one pound, five pounds, 20 pounds. It means this piece of paper is worth in exchange. Exactly that. So if I have a genuine Margret, which should be worth millions, if it's a false Margret, it's probably worth next to nothing. But if there are so many false Margretes that nobody is interested in owning a real one because the invitations are just as good, then you could reach a situation where nobody was prepared to pay for the originals. I'll show the argument that they become more valuable because they're original. Nobody's interested in paying for the originals because they can have one that's just as good. Looks just as good, gives them just as much enjoyment for next to nothing. All right. But what about the argument that says that the whole thing is in any case ludicrously in places and that the fact that people are prepared to pay absurd sums of money, I think they have absurd sums of money, but it's a sort of false market in my view, but people are prepared to pay huge sums of money to have something when they can have something that looks just as good to all but the expert eye for next to nothing. And so then the argument becomes why is originality valued or should it be? Why do we value originals so much and frankly copies so little, although there are now copies that have their own market because some of the copies aren't regarded in themselves as artworks, masterpieces? The chap that copied Turner, whose name momentarily escapes me, is now very collectible. Just as we said the other day, you heard Van Gogh in seeking to exhibit in the sense of show Gauguin, and his brother Theo, what he'd been painting, did a few pencil sketches of his art and those pencil sketches are now as sought after of the art itself, because it's a Van Gogh. But if I produce 20 Van Goghs on my computer in three and a half minutes, most people being unable to distinguish them from the real thing, where exactly does the justification for all of this originate? Well, I think I've probably given what the most plausible, which is that the mind that creates the masterpiece in the first place has, in some sense or other, laid hold on a possibility in the universe. Now I'm going to say something else here, which you may think a bit fanciful, but let me just indulge myself a moment here. To repeat the diffusion process, we take an image and we add noise, but we don't just add noise in one trajectory so that we end up with one noisy image, which looks like nothing to the naked eye. We actually do it over and over again, so that we end up with thousands. And I've said in previous episodes, this is what's called latent space. Now if we do that, each of those points could, if we chose it, be denoised into an image. I'm not just into one image, but into many images, so if you just imagine what's going on here, we take one image. We create, let's keep the numbers down, it's basically a thousand points in latent space, every one of which is the end point of a journey of noising the original image. So one image is given rise to a thousand images, or not images, but a thousand points, which are its descendants. If each of those points could itself be made into another thousand images that are vaguely related to the original parent image by denoising, then that gives you a million images from the first image. And you only need to make the numbers bigger, make the first latent space of noisy images a million, and make each one of those capable of being denoised into a million, and you end up with a trillion possible images from one image. And pretty quickly, the universe gets exhausted with these possibilities, so, and you may start to hear some echoes here, so, what is the value of one particular image? When we've started with an image, we've noised it, we've picked one point from a million points in latent space, and we've denoised it into one of a million images that it could give rise to, then we end up with one image from our original image, out of a trillion images that we could have ended up with. Those other images are what we might call counterfactual conditionals. They are what might have been just as in our lives. Every decision that we make, every footfall that I'm making in this walk is defining a trajectory that will in its turn eliminate many other trajectories. It's inevitable. I could have been an incalculable number of different people. I could now and probably will become another person during the rest of my life. We can't have everything. We can't actualise, realise in the sense of make real. We can't make real every possible eventuality from every possibility that arises. Because if we did, we would soon need more bits than there is information in the universe. The story for another day. The latent space thing is very much like life. You can't do everything. You can't be everyone you could possibly be. You can't do or say or paint or write everything you could possibly write. So a key question here is why does, and this is of course coming back to where we started, why does the fact that Matisse or Van Gogh, or for that matter, one such as myself, chooses to instantiate one particular trajectory such as the trajectory that this episode of this podcast has taken, why does the fact that I have made a decision? My brain has made a decision because I'm not really thinking about it. It's just tumbling out of my mouth one word after another. Why does that, in as much as it instantiates one of millions of possibilities, literally trillions? Why does that, or not why does that, does that grant it automatically some kind of superior status? Does the fact that this podcast has been brought into the world, has been realized, does that make it more significant, more valuable? Thinking about art again, I'm not just in the monetary sense. Then any other podcast that I might have recorded, if I've got out of bed ten minutes later, or from the other side, or had a different night's sleep or dream, does it make any difference? And I'm not sure that I'm completely ready to say yes it does, because I'm not sure that the happenstance of this particular episode, having come into the world, is any more deliberate and intentional in the narrow sense than what an artist ends up painting, although I have to say that artists don't just paint. The point is when the point was made in Melvin Brown's program about him that Van Gogh agonised over his paintings, and it's mythology to think that he was a tortured soul who painted out of his madness, in fact when he was mad he couldn't paint at all, as those on the show made very clear So he deliberately chose, but you have to say, well yes of course, but you deliberately choose not so much because you think, not necessarily because you think, but this brushstroke is better than that brushstroke, but because you have to do something, and to do nothing is to suffer the worst kind of writer's block, because the fact that you could put any word you like to follow on from what you've written so far can immobilise you, and of course this what comes next aspect of AI is exactly the same thing, exactly the same thing, that it is a mechanism for carrying on the doing the next thing, for filling in the gaps, for saying what comes next, and artists and musicians and sculpters Everybody, you'll do it, we may not do it with any pretensions to being in a creative mood, but we go through the day, and as we go through the day we instantiate one trajectory of millions that could, of things being equal, have transpired So to come back to my paintings, yes I mean I could go home, and I could run my fake Matisse programme a thousand times during the rest of the afternoon, and I could come up with literally thousands, six thousand, ten thousand, twelve thousand Quasai, Matisse's, Quasai Kandinsky's, Quasai Van Goss, all of which would be just a little bit different, none of which would have been created with any real intent, purpose, none of them would have been created with the purpose of saying something profound about the human condition, which of course is what we like to think great art does, whether or not it really does is another story So I could do that, but even if I did that, even if I went home and produced a million Quasai Matisse's, it would still only be an insignificant fraction of those that could be made from the latent space that emerges from just one image And so to go on and on and on, creating and displaying these images, it's tempting to stick them all on Facebook and try and crash Zuckerberg's servers, but we're not going to do that, to do this It really does water down the medium, and I think that everybody would start suffering from Matisse fatigue very quickly, as indeed anybody that's been following my Facebook page probably knows I've been laboring this point, so that everybody's sick and tired of my images But, as I never tire of saying, I've been producing them for a reason, and the reason is really to explore exactly the topic that we've just been discussing for the bit apart of half an hour Why is this important, special, just because it's what Matisse or Van Gogh or Rembrandt actually painted? Why does it matter? Why do we care so much? Why do we value these things so much? And ordinarily much, one might be... Well, there's an old saying, which my mother had on a pottery jug in the kitchen, better do one thing than dream all things I mean, I think it's worse than dreaming all things, it's trying to do all things, where you try to produce every conceivable Matisse, there is no such set, it isn't even countable The same reason that real numbers are not countable Well, it has been a bit of a ramble chat, but I think it's been a ramble chat of some considerable importance and significance, even if it's inconclusive Because it does strike me that there are some things going on here that go to the very heart of the human condition, but go to the heart of saying, you know, you can't do everything Not even in your wildest dreams can you do everything? So make your mind up, what you really want to do, what you really want to be, what you really want to invest the time that you've got on Because that will define the trajectory of your life, just as the decisions that artists make with their brushes and writers make with their pens, define the trajectory of a painting or a story And you can say to me, as you probably will are, but you see AI is just blindly people laundering the possibilities of the latent space without any intention, without any meaning But you can't say that, you really can't because it's been trained on a training data set and in a context That means that it's neural net is in very large measure an embodiment of the whole of human experience and culture So that it chooses certain things over others isn't meaningless, isn't random, isn't empty Any more than what you and I choose is arbitrary, random, meaningless or entity None of that is true, none of that follows That we have for good or ill, for good or ill, remember that, for good or ill, done this, said that, written this, painted that, composed this, whatever it might be That we have done it is a contribution to the trajectory of the universe and certainly to human history And I think that if you've got a Van Gogh sunflower painting, there are four of them I think in the world on your wall, apart from being very wealthy You've got something that simply represents one of uncountably many possible paintings that he could have painted And there is in that a metaphor for existence Thank you for listening [sound of water splashing](Tokens used: 4608 / 7168)\ +> There are places in the text where it transcribes "Magritte" as "agreed" Can you find them and correct it?\ + Certainly, here are the corrected sentences:\ +\ +- And so we have a very peculiar situation here, which is that the thing that is most characteristic of Magritte, in English, as you might say, really defines his legacy, is something that you can't copyright.\ +- But if there are so many false Magrettes that nobody is interested in owning a real one because the invitations are just as good, then you could reach a situation where nobody was prepared to pay for the originals.\ +- And I like just as much as a Magritte, but I knock it up in my garden shed on the basis of what I've been describing, then I may be perfectly happy with it.\ +- But if I produce 20 Magrettes on my computer in three and a half minutes, most people being unable to distinguish them from the real thing, where exactly does the justification for all of this originate?\ +- Then we end up with one image from our original image, out of a trillion images that we could have ended up with.\ +- But even if I did that, even if I went home and produced a million Quasai Magrettes, it would still only be an insignificant fraction of those that could be made from the latent space that emerges from just one image.\ +(Tokens used: 5273 / 7168)\ +> Please summarise the document with those corrections in 10 bullet points.\ + Here are 10 bullet points summarizing the corrected text:\ +\ +- The text discusses intellectual property rights related to artificial intelligence (AI) generating images in a particular artist's style.\ +- AI models can be trained on a corpus of work in the public domain, such as paintings by Van Gogh, and produce new works in that style.\ +- However, these newly generated works are not considered originals by the law because they were not created by the original artist.\ +- The text argues that the style of an artist is more their legacy than any particular painting, but it cannot be copyrighted.\ +- If AI can produce indistinguishable copies of an artist's work in their style, it suggests that the training data was sufficiently unique and identifiable to constitute a "style."\ +- The text explores whether the proliferation of AI-generated works in an artist's style could devalue the original works.\ +- The value of originals is explored, with the argument that they represent a possibility in the universe that has been laid hold on by the artist.\ +- The diffusion process is used to generate thousands of possible images from one image, and each of those points can be denoised into another thousand images, leading to trillions of possibilities.\ +- The text asks why the fact that an original work has come into existence gives it superior status over other possibilities.\ +- The text concludes by stating that every decision made instantiates one of millions or even trillions of possibilities and contributes to the trajectory of human history.\ +(Tokens used: 5634 / 7168)\ +> Why might someone prefer to own an original painting rather than an AI imitation, leaving aside the question of the painting's market value?\ + There are several reasons why someone might prefer to own an original painting rather than an AI imitation, even if they have similar visual qualities. Here are some possible reasons:\ +\ +1. **Emotional connection**: People often develop a personal connection with an original work of art that goes beyond its visual appeal. They might appreciate the history and context of the artwork, or feel a sense of admiration for the artist's skill and creativity. This emotional connection is difficult to replicate with an AI imitation.\ +2. **Authenticity**: Original paintings are unique and authentic. They have a provenance and history that can be traced back to the artist who created them. In contrast, AI-generated works lack this sense of authenticity because they were not created by the original artist.\ +3. **Rarity**: Original paintings are often rare and one-of-a-kind. This rarity can add to their appeal and value, especially if they are associated with a famous or respected artist. In contrast, AI-generated works can be easily replicated, which can dilute their perceived value and significance.\ +4. **Experience**: Viewing an original painting in person can be a very different experience from viewing a digital reproduction or AI imitation. The texture, brushwork, and other physical qualities of the painting can add to its visual appeal and emotional impact.\ +5. **Support for the artist**: Owning an original painting can also be seen as a way of supporting the artist and their work. By purchasing an original artwork, collectors are contributing to the artist's livelihood and helping to sustain their creative practice. In contrast, AI-generated works do not provide any direct financial benefit to the original artist.\ +(Tokens used: 6034 / 7168)\ +> Good, although apart from your last point all these points could also be made about an AI image: we can grow attached to any image; why does authenticity matter in the sense you mention if the result is indistinguishable; why does rarity matter except for the rather inglorious reason of wanting to own something nobody else owns; we will soon have robots that can replicate artistic technique including brushwork.\ + You raise valid points, and I agree that some of the factors I mentioned could also apply to AI-generated images in certain circumstances. However, here are some additional thoughts on why people might still prefer original paintings over AI imitations, even if they have similar visual qualities:\ +\ +1. **Emotional connection**: While it is possible to develop an emotional connection with any image, including AI-generated ones, the emotional resonance of an original painting can be deeper and more complex because it was created by a human being with their own thoughts, feelings, and experiences. The artist's intent, emotion, and personal history can all contribute to the meaning and significance of the work, which can create a stronger emotional connection for some viewers.\ +2. **Authenticity**: While AI-generated works may be indistinguishable from original paintings in terms of visual quality, they lack the authenticity that comes from being created by the original artist. For some people, this authenticity is an important factor in their appreciation and enjoyment of art. They may feel that owning an original work is a way of connecting with the artist and their creative process in a more direct and meaningful way.\ +3. **Rarity**: While rarity is not the only reason to value original paintings, it can still be a significant factor for some collectors. Owning a unique and rare work of art can be seen as a mark of prestige and status, and can also increase the financial value of the work over time. In contrast, AI-generated works are infinitely replicable, which can dilute their perceived value and significance.\ +4. **Experience**: While AI-generated images may be able to replicate certain aspects of an original painting's physical qualities, they cannot fully replicate the experience of viewing an original work in person. The texture, brushwork, and other physical qualities of a painting can all contribute to its visual appeal and emotional impact, and this experience is difficult to replicate with a digital image or AI imitation.\ +5. **Support for the artist**: Owning an original painting can also be seen as a way of supporting the artist and their work, especially if they are still living and creating new works. By purchasing an original artwork, collectors are contributing to the artist's livelihood and helping to sustain their creative practice. In contrast, AI-generated works do not provide any direct financial benefit to the original artist.\ +\ +In summary, while some of the factors I mentioned earlier could also apply to AI-generated images, there are still several reasons why people might prefer original paintings over AI imitations, even if they have similar visual qualities. The emotional connection, authenticity, rarity, experience, and support for the artist are all important factors that can contribute to the meaning and significance of an original work of art, and these factors are difficult to replicate with an AI-generated image.\ +(Tokens used: 6735 / 7168)\ +> I have generated some AI images that are, if anything, superior to the works of the artist they imitate. In tha case of Magritte the diffusion model took an original image not by Magritte and produced some extraordinarily imaginative variations on it that took my breath away. Pity you can't see them!\ + That's fascinating to hear! It's remarkable how AI models like diffusion models can generate such high-quality and creative images, sometimes even surpassing the works of human artists. Here are a few thoughts on your experience:\ +\ +1. **Creativity**: AI models like diffusion models have the ability to generate incredibly imaginative and creative images, often in ways that humans might not think of. This can result in surprising and unexpected variations on an original image, as you experienced with your Magritte-inspired AI creations.\ +2. **Limitless possibilities**: Unlike human artists, who are limited by their own skills, techniques, and physical abilities, AI models like diffusion models have the ability to generate a virtually limitless number of images. This can result in a wide range of variations and interpretations of an original image, allowing for endless exploration and experimentation.\ +3. **Collaborative potential**: The ability of AI models to generate creative and imaginative images opens up new possibilities for collaboration between humans and machines. By working together, artists and AI models can create new works that combine the best of both worlds - the creativity and emotional resonance of human artistry with the technical prowess and limitless possibilities of AI.\ +4. **Ethical considerations**: While AI-generated images can be incredibly impressive and creative, it's important to consider the ethical implications of using AI to imitate the style of a particular artist. As we discussed earlier, there are questions around ownership, authenticity, and market value that need to be considered when creating and distributing AI-generated works.\ +\ +Overall, your experience with generating AI images inspired by Magritte highlights the incredibleGGML_ASSERT: /Users/edsilm2/llama.cpp/ggml-metal.m:2215: false\ +zsh: abort ./bin/main -m ../models/Mixtral-8x7b/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf -} \ No newline at end of file diff --git a/MixtralOnEducation.rtf b/MixtralOnEducation.rtf new file mode 100644 index 000000000..9a6a2402c --- /dev/null +++ b/MixtralOnEducation.rtf @@ -0,0 +1,106 @@ +{\rtf1\ansi\ansicpg1252\cocoartf2759 +\cocoatextscaling0\cocoaplatform0{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +{\*\expandedcolortbl;;} +\paperw11900\paperh16840\margl1440\margr1440\vieww11520\viewh8400\viewkind0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 + +\f0\fs24 \cf0 Mixtral on Employment, Culture and Education\ +\ +== Running in interactive mode. ==\ + - Press Ctrl+C to interject at any time.\ + - Press Return to return control to LLaMa.\ + - To return control without starting a new line, end your input with '/'.\ + - If you want to submit another line, end your input with '\\'.\ +\ + \ + It's the 12th of January today, and after yesterday, when it was really bitterly cold, I have just done the most part of it, very long walk, enjoyed it very much, just put my positions. During the cold, you have to have some cool man on the roof bending a leak, which he did a little expertly on quickly, which I hope successfully, the time will tell, and I spend a lot of time fiddling around with image generation, both text to image and image. One of the things that I did was to share some of these in the sun on Facebook, and somebody pointed out that they were very Western. I had the good fortune to work all around the world, really, in India and Sri Lanka and Armenia and China, and have good friends in all those countries, and some of them commented on my posts. Indeed, I put my artificial intelligence mandarin video onto WeChat and got some very interesting comments from people about how good my mandarin was, even though, as I said, it wasn't me talking, but leave that aside, for Western bias of some of the images or all of the images. You can prompt the engine to do something different, but if you just ask it for leaders or religious leaders, I think I asked originally, it generated 20 pictures very happily, but the 20 pictures were all of white Western male religious leaders, and so when I asked it to do global leaders, it produced a few that were slightly further East, Middle Eastern, I suppose, and only when I asked it to be Eastern religious leaders, did I get anything remotely like a proper cross-section, so that was a reflection of the way all of this is already reflecting our cultural prejudices. Something else that I did was to do a bit more of the uploading of files into an AI, as I have said, it's not capable yet of reading PDFs, although, of course, there are AIs that can, Claude can, chat GPT can, he has a favorite, up on Claude, on chat GPT, and the length of the document will partly determine how much it costs to you, but on my home machine, I can upload a 4,000 word document and ask it to summarise it, ask a local AI running on my laptop without an internet connection to summarise it, and it does a pretty good job, and it will either do it in prose or more usefully, you can say, give me a 10-point and bullet-point summary, and you can do it, and you can then ask it questions about the document, and even though this isn't part of its original training set, it quite clearly, within at least the context of a particular conversation, can read the document, understand, well, there's that word again, back to that in a moment, seemingly understand it, certainly critique it, and since the texts that I've been trying out of most of it have been transcriptions of these episodes, 11, 24, 25, 26 I've been using, it's quite obvious that they're not part of the training set, and yet it can critique them in both the sense of saying whether it agrees or disagrees, whether it finds the argument compelling, whether the argument is complete, and if you ask it, it will produce arguments for the opposite point of view, and it will produce arguments that you might use to refute the arguments from the opposite point of view, in other words, to all intents and purposes, it understands, if you were to do the same thing, I'm not sure what, in addition, I could ask of you beyond that you do what the AI does, if you give a comprehensive, coherent, succinct, 10-point summary of a 4,000-word document, and can then argue about it, can give your views of it, can produce counter-arguments to it, and how the author of the document, in this case me, although the AI doesn't know that, might choose to set about refuting those counter-arguments, if you can do all of that, then what's left, I suppose the only thing that's left, and this is the next great step of course for AI, is that it doesn't just retain that document for the purposes of this conversation that it's having with me, but that whatever it learns, it learns permanently, so that if we come back to it tomorrow, it can re-well, it can pick up where we left at, as you and I could do, and the third thing that's been interesting, is that I've been exploring the sort of sliding window approach to dealing with context-link, if you imagine that the AI can deal with 4,000 words, and you get to 4,000 words, then one, two things will happen, either it will simply crash and burn, which it often doesn't do, or you can ask it, or you don't ask it, or it can leave the first half of the conversation to go waste, copy the second half into the first half of its buffer, its storage, and then fill in the second half all over again with the first half forgotten, and it will do that in some instances, not all, and so you can at least in theory have a conversation that goes on and on and on, and I have had some that have been pretty long, so if the AI can do that, then the next thing will be to have a very long context-link, I think GPT-4 can now do 128,000, 128k to be more precise, and that's a hundred thousand word book at least, and you and I would be forgetting what we'd said at the beginning by the time we got to the end, if the conversation went on as long as that, I'm sure we would, so it's very interesting where we got to, it's very interesting that you can do all this on a laptop, it's very interesting how sophisticated the best of even quite small models are, and as I've said many a time, by the end of this year, it's extremely difficult to know quite where we will be, thank you for listening. [Music] [BLANK_AUDIO](Tokens used: 1024 / 7168)\ +> Please summarise the argument in 10 bullet points.\ + Here's a 10-point summary of the speaker's arguments and observations, based on my understanding of the conversation:\ +\ +1. The speaker discusses their experience with artificial intelligence (AI) models, specifically text generation models like Claude, ChatGPT, and the one they are currently using.\ +2. They mention how these models can generate images from text prompts, but often reflect cultural biases, such as generating pictures of Western male religious leaders when asked for "religious leaders."\ +3. The speaker shares their experience with uploading documents to an AI model on their local machine and asking it to summarize the content in a coherent manner.\ +4. They highlight that these models can understand, critique, and argue about the document's content, even if it wasn't part of their original training set.\ +5. The speaker emphasizes the importance of context-length when dealing with AI models and how they handle conversations or texts longer than their designed capacity.\ +6. They mention that some models can maintain a sliding window of context to keep track of the conversation, but may forget previous parts as new information comes in.\ +7. The speaker points out that current AI models like GPT-4 have improved context-length capabilities, allowing them to handle longer conversations or texts without losing context.\ +8. They reflect on how these advances can significantly impact various industries and applications, such as customer support, tutoring, and content creation.\ +9. The speaker also mentions the importance of ethical considerations when using AI models, including data privacy and avoiding misinformation.\ +10. Lastly, they express excitement about the rapid advancements in AI technology and how it might continue to evolve throughout 2023 and beyond.\ +(Tokens used: 1755 / 7168)\ +> What is your assessment of the argument?\ + The speaker provides a thoughtful analysis of their experience with various AI models, specifically focusing on text generation and image generation capabilities. They offer valuable insights into how these models can learn from and understand documents, as well as engage in coherent conversations and critiques. The summary highlights the importance of context-length and ethical considerations when dealing with AI models, making the argument balanced and informative.\ +\ +However, since this is a summarized response based on the speaker's personal experiences and observations, it may not provide a comprehensive evaluation of all aspects related to AI technology. Nevertheless, the summary successfully conveys the key points made by the speaker, contributing to an engaging conversation about AI advancements and potential implications.\ +(Tokens used: 1923 / 7168)\ +> What do you think would improve the argument most?\ + To further enhance the argument, incorporating more research and data on AI capabilities, limitations, and potential impacts could provide a broader perspective. Comparing various AI models in terms of performance, context-length, and understanding abilities could help to better understand their strengths and weaknesses. Additionally, discussing potential challenges and risks associated with AI development, such as job displacement or deepfake technology misuse, would contribute to a more balanced and comprehensive argument.\ +\ +Including expert opinions or studies on the topic can also strengthen the argument by providing evidence-based insights. Furthermore, considering various stakeholders' views, including those who might be directly affected by AI advancements, could lead to a more nuanced understanding of the technology's implications. Overall, these additions would help create a more holistic and informed conversation about artificial intelligence.\ +(Tokens used: 2117 / 7168)\ +> Some commentators think AI will usher in an era of unprecedented prosperity, but things like the Pareto Principle suggest otherwise, that even if huge wealth is generated, it will not be distributed uniformly and perhaps not therefore satisfy everyone.\ + The commentator raises a valid concern about the potential unequal distribution of wealth generated by AI technologies. The Pareto Principle, also known as the 80/20 rule, states that roughly 80% of the effects come from 20% of the causes. In this context, it implies that even if AI creates substantial prosperity, a significant portion of it might be concentrated among a relatively small group of individuals or organizations, leaving others with fewer benefits.\ +\ +Addressing this concern would require policymakers and stakeholders to consider implementing measures that promote fair distribution of wealth generated by AI technologies. This could include progressive taxation policies, education initiatives, and social safety nets designed to support those who might be adversely affected by AI advancements. Furthermore, fostering an environment that encourages innovation and entrepreneurship can help create new opportunities for individuals and businesses to participate in the AI-driven economy.\ +\ +However, it is essential to acknowledge that achieving a perfectly uniform wealth distribution is unlikely. Instead, striking a balance between promoting economic growth and ensuring fair access to opportunities remains the key to addressing concerns related to the unequal distribution of AI-generated prosperity.\ +(Tokens used: 2429 / 7168)\ +> World history suggests that prosperity generally increases with technological advances, but there is a long and significant 'tail' even if those in the tail are better off than ever before in almost all cases. Do you agree?\ + The commentator raises an important point about the relationship between technological advancements and prosperity throughout world history. Technological progress has indeed contributed to overall economic growth, improved living standards, and increased prosperity in many instances. However, it is also true that there can be a significant "tail" of individuals or communities who may not experience the same level of benefits as others, despite seeing improvements compared to past conditions.\ +\ +This phenomenon could be attributed to various factors, such as disparities in access to education, resources, and opportunities. Additionally, structural issues like discrimination, wealth inequality, and geographical constraints can also contribute to this tail effect.\ +\ +Addressing these challenges requires a multi-faceted approach that includes not only technological advancements but also policies and initiatives aimed at reducing inequalities and fostering inclusive growth. Encouraging education, investing in infrastructure, promoting entrepreneurship, and implementing progressive taxation policies are some ways to ensure that the benefits of technology are more broadly distributed.\ +\ +While it may be challenging to eliminate the tail effect completely, addressing its root causes can help minimize its impact and create a more equitable society where technological advancements contribute to prosperity for a broader range of individuals and communities.\ +(Tokens used: 2737 / 7168)\ +> You have mentioned educaton several times, but it is perhaps the area of human activity most under threat. Without the motivation to learn that qualification and employment generates, there may be far less interest in intellectual and skills education, and without those the piggy-backing that leads to socialisation, cultural acclimatisation, physical and mental health, and discovery of strengths may cease to happen.\ + The commentator raises a crucial concern about the potential impact of AI on education and its broader implications for personal development and societal well-being. Indeed, the motivation to learn that is often generated by qualifications and employment can significantly influence individuals' interest in intellectual and skills education. As AI takes over certain tasks and jobs, it could potentially reduce the need for specific skill sets or qualifications, consequently affecting people's incentives to pursue learning opportunities.\ +\ +Moreover, as you mentioned, education plays a vital role in socialization, cultural acclimatization, physical and mental health, and self-discovery. Reduced motivation to learn could have ripple effects on these aspects of personal development, potentially leading to decreased engagement with learning and reduced overall well-being.\ +\ +To address this challenge, it is essential to rethink the purpose and nature of education in light of AI advancements. Encouraging lifelong learning, fostering curiosity, creativity, and critical thinking skills can help make education more resilient to the changing job market and technological landscape. Additionally, promoting experiential and project-based learning opportunities can create a stronger connection between learning and personal development, motivating individuals to continue engaging with educational activities regardless of their immediate career prospects.\ +\ +Incorporating AI technologies into education can also help create new learning experiences that foster motivation and engagement. For example, using AI to provide personalized feedback, identify learners' strengths and weaknesses, or facilitate collaborative problem-solving activities can contribute to a more engaging and effective learning environment.\ +\ +Lastly, policymakers should consider implementing measures that encourage employers to invest in employee training and development, ensuring that individuals have access to continuous learning opportunities throughout their careers. By addressing the potential impact of AI on education and personal development, society can ensure that technological advancements contribute to overall well-being and prosperity for all.\ +(Tokens used: 3225 / 7168)\ +> I am pessimistic. I suspect that we will have to experience massive social dislocation before we manage to find a way to reconcile these competing allegiances.\ + The commentator expresses pessimism about the potential for reconciling the competing allegiances and challenges presented by AI advancements without first experiencing significant social dislocation. Unfortunately, this perspective highlights the difficulties in addressing complex issues arising from technological progress, particularly when they involve deeply ingrained societal structures and values.\ +\ +It is essential to acknowledge that managing the impact of AI on society will require substantial efforts from various stakeholders, including policymakers, businesses, and individuals. Addressing these challenges proactively, rather than waiting for crisis situations to emerge, can help mitigate potential negative consequences and facilitate a smoother transition towards an AI-driven future.\ +\ +Collaborative problem-solving and open dialogue among various stakeholders are crucial in finding solutions that balance competing allegiances. Encouraging public participation in discussions about AI ethics, governance, and regulation can help create a more inclusive and informed decision-making process. Additionally, fostering international cooperation and sharing best practices can contribute to addressing global challenges associated with AI advancements.\ +\ +Ultimately, it is crucial to remain proactive and engaged in shaping the future of AI and its impact on society, rather than adopting a purely reactive approach. By working together, we can strive to minimize social dislocation and find ways to reconcile competing allegiances in a rapidly changing technological landscape.\ +(Tokens used: 3560 / 7168)\ +\ +Commenting on 11.44:\ +\ +> Please summarise this argument in 10 bullet points.\ + Here are 10 bullet points summarising the argument presented in the text:\ +\ +* AI will be able to create art, literature, and other forms of creative expression that are better than those produced by humans.\ +* However, there will always be people who want to engage in these activities for personal fulfillment, regardless of whether an AI can do it better or not.\ +* The level of motivation needed to sustain artistic and creative pursuits may need to increase as AI becomes more advanced.\ +* In the case of education, the motivational aspect is more subtle because a significant proportion of students are motivated by the prospect of earning a living through their education.\ +* If there are fewer job opportunities available due to AI, many students may become demotivated.\ +* It will be difficult for governments to provide welfare benefits to everyone who is not directly employed by an AI-owned company.\ +* Under capitalism, 80% of the wealth will always be owned by 20% of the people, which means that a great many people will be far worse off than the mega-rich.\ +* There will be people who do things because they enjoy doing them and not for some ulterior motive.\ +* These people will extract as much meaning from their present experiences as they can and that's enough for them.\ +* It is impossible to predict where we will be in terms of AI development in one year, let alone 5 or 40 years.\ +(Tokens used: 1827 / 7168)\ +> What is your assessment of this argument?\ + The argument presented in the text raises some important points about the potential impact of AI on motivation, both in terms of creative pursuits and education. It highlights the fact that while there will always be people who are motivated to engage in artistic and creative activities regardless of whether an AI can do it better or not, many students may become demotivated if there are fewer job opportunities available due to AI.\ +\ +The argument also acknowledges the difficulty of providing welfare benefits to everyone who is not directly employed by an AI-owned company, and points out that under capitalism, 80% of the wealth will always be owned by 20% of the people, which means that a great many people will be far worse off than the mega-rich.\ +\ +Overall, the argument highlights the potential for economic and motivational disruption as AI becomes more advanced. However, it also acknowledges that there will always be people who engage in creative pursuits for personal fulfillment, and that these individuals will continue to extract meaning from their present experiences regardless of whether an AI can do it better or not.\ +\ +While the argument makes some valid points, it is important to note that it is difficult to predict where we will be in terms of AI development in one year, let alone 5 or 40 years. The rate of change and progress in this field is exponential, and it is likely that many new ideas and discoveries will be made in the coming years. Additionally, while the argument acknowledges the potential for economic disruption, it does not offer any concrete solutions for addressing this issue.\ +\ +In conclusion, the argument presented in the text raises some important points about the potential impact of AI on motivation and economic opportunities, but it is difficult to predict where we will be in terms of AI development in the coming years, and further research and discussion is needed in order to address the potential challenges and opportunities that may arise as a result of advances in this field.\ +(Tokens used: 2253 / 7168)\ +} \ No newline at end of file diff --git a/MixtralOnVAEs.rtf b/MixtralOnVAEs.rtf new file mode 100644 index 000000000..1463dd39d --- /dev/null +++ b/MixtralOnVAEs.rtf @@ -0,0 +1,132 @@ +{\rtf1\ansi\ansicpg1252\cocoartf2759 +\cocoatextscaling0\cocoaplatform0{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +{\*\expandedcolortbl;;} +\paperw11900\paperh16840\margl1440\margr1440\vieww11520\viewh8400\viewkind0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 + +\f0\fs24 \cf0 Mixtral on Variational Autoencoders\ +\ + How does a VAE (variational autoencoder) noise and denoise an image during a diffusion process?(Tokens used: 23 / 6144)\ +> How does a VAE (variational autoencoder) noise and denoise an image during a diffusion process?\ + In a Variational Autoencoder (VAE), the noise is added to the input data during the encoding process, not during a diffusion process. The purpose of adding noise is to make the model more robust to variations in the input data and to enable it to generate new samples that are similar to the training data.\ +\ +The VAE consists of an encoder network and a decoder network. The encoder takes the input image and maps it to a lower-dimensional latent space, where it is represented as a Gaussian distribution with mean and standard deviation parameters. During this encoding process, noise is added to the input image to encourage the model to learn a more robust representation of the data.\ +\ +Once the input image has been encoded into a latent space representation, the decoder network takes the mean and standard deviation parameters and maps them back to the original image space. The output of the decoder is then compared to the original input image, and the model is trained to minimize the reconstruction error between the two.\ +\ +So, in summary, a VAE adds noise to the input data during the encoding process, but it does not perform a diffusion process to denoise the image. Instead, it learns a robust representation of the data that can be used for both reconstruction and generation of new samples.\ +(Tokens used: 331 / 6144)\ +> If an original image is, say, [512, 512, 3], how will the VAE process it?\ + In a Variational Autoencoder (VAE), the input image of size [512, 512, 3] would first be passed through the encoder network. The encoder network typically consists of several convolutional layers that progressively reduce the spatial dimensions of the input image while increasing the number of channels.\ +\ +For example, the encoder might start with a convolutional layer that has a kernel size of 4x4 and a stride of 2, which would reduce the spatial dimensions of the input image by half while doubling the number of channels. This process would be repeated several times, with each convolutional layer further reducing the spatial dimensions and increasing the number of channels until the final representation of the input image has a much lower spatial dimensionality (e.g., 7x7 or 4x4) and a large number of channels (e.g., 256 or 512).\ +\ +At this point, the encoder would add noise to the final representation of the input image and then apply a set of fully connected layers to map the noisy representation to a lower-dimensional latent space. The latent space representation typically has two components: a mean vector and a standard deviation vector, both of which have the same dimensionality (e.g., 32 or 64).\ +\ +The decoder network would then take the mean and standard deviation vectors as inputs and apply a set of fully connected layers to map them back to a higher-dimensional space. The output of the decoder would then be passed through several transposed convolutional layers (also known as deconvolutional layers) that progressively increase the spatial dimensions while reducing the number of channels until the final output has the same dimensionality as the original input image (i.e., [512, 512, 3]).\ +\ +So, in summary, the VAE would process the original image by first encoding it into a lower-dimensional latent space representation using the encoder network, adding noise to the representation, and then decoding it back to the original image space using the decoder network.\ +(Tokens used: 825 / 6144)\ +> OK that's helpful. What purpose does increasing the number of channels and decreasing the number of dimensions serve?\ + Increasing the number of channels while decreasing the spatial dimensions of the input image serves several purposes in a Variational Autoencoder (VAE):\ +\ +1. **Non-linear dimensionality reduction**: By increasing the number of channels and decreasing the spatial dimensions, the encoder network can perform a non-linear dimensionality reduction of the input data. This allows the model to learn more complex representations of the data that cannot be captured by simple linear transformations.\ +2. **Capture of higher-level features**: As the spatial dimensions of the input image are reduced, the encoder network is able to capture higher-level features of the image that are invariant to small variations in the input. For example, the early layers of the encoder might capture low-level features such as edges and textures, while the later layers might capture higher-level features such as shapes and objects.\ +3. **Increased representational capacity**: By increasing the number of channels, the encoder network is able to increase its representational capacity, which allows it to learn more complex representations of the input data. This is particularly important in VAEs, where the latent space representation is used for both reconstruction and generation of new samples.\ +4. **Efficient computation**: By reducing the spatial dimensions of the input image, the encoder network can reduce the computational complexity of the model, which makes it more efficient to train and evaluate. This is particularly important in deep neural networks, where the number of parameters can be very large.\ +\ +So, in summary, increasing the number of channels and decreasing the spatial dimensions of the input image allows the encoder network to perform a non-linear dimensionality reduction, capture higher-level features, increase its representational capacity, and reduce its computational complexity. These properties make VAEs well-suited for tasks such as image generation, denoising, and compression.\ +(Tokens used: 1256 / 6144)\ +> So a typical sequence might be something like [512, 512, 3] -> [256, 256, 6] -> [128, 128, 12] and so on?\ + Yes, that's a typical sequence of spatial dimensions and number of channels in the encoder network of a Variational Autoencoder (VAE). The exact sequence of spatial dimensions and number of channels will depend on the specific architecture of the VAE and the complexity of the input data.\ +\ +For example, if the input image has a high resolution (e.g., [1024, 1024, 3]), then the encoder network might start with several convolutional layers that have a large kernel size (e.g., 7x7 or 9x9) and a stride of 2 to reduce the spatial dimensions rapidly while maintaining a reasonable number of channels. On the other hand, if the input image has a low resolution (e.g., [64, 64, 3]), then the encoder network might start with several convolutional layers that have a smaller kernel size (e.g., 3x3 or 5x5) and a stride of 1 to preserve more spatial information in the early layers.\ +\ +Similarly, the number of channels in each layer will depend on the complexity of the input data and the capacity of the model. If the input data is highly structured and complex (e.g., natural images), then the encoder network might need to have a larger number of channels to capture the nuances of the data. On the other hand, if the input data is simple and unstructured (e.g., random noise), then the encoder network might be able to use a smaller number of channels while still capturing the essential features of the data.\ +\ +In general, the sequence of spatial dimensions and number of channels in the encoder network of a VAE is designed to balance the trade-off between representational capacity, computational efficiency, and robustness to variations in the input data.\ +(Tokens used: 1717 / 6144)\ +> So when there are, say, 4 channels, the original image has been 'sliced' into 4 layers where each might be [128, 128, 1]?\ + Yes, that's correct! In a Variational Autoencoder (VAE), when the encoder network reduces the spatial dimensions of the input image and increases the number of channels, it is effectively "slicing" the input image into multiple layers or "feature maps". Each feature map captures a different aspect of the input image, and the combination of all the feature maps provides a rich representation of the input data that can be used for both reconstruction and generation.\ +\ +For example, if the encoder network reduces the spatial dimensions of an input image from [512, 512, 3] to [128, 128, 4], then it has effectively "sliced" the input image into four feature maps, each of which has a spatial resolution of [128, 128] and a single channel. Each feature map captures a different aspect of the input image, such as edges, textures, or shapes, and the combination of all four feature maps provides a rich representation of the input data that can be used for both reconstruction and generation.\ +\ +The number of channels in each feature map will depend on the specific architecture of the VAE and the complexity of the input data. In general, increasing the number of channels allows the encoder network to capture more nuanced aspects of the input image, but it also increases the computational complexity of the model and requires more training data to learn a good representation.\ +(Tokens used: 2080 / 6144)\ +> So the third dimension of the original image is just colour. Does that mean in practice that we add a fourth fimension that keeps the colour but breaks up the image into slices so we effectively have [128, 128, 3, 4]?\ + In practice, when a Variational Autoencoder (VAE) reduces the spatial dimensions of an input image and increases the number of channels, it typically keeps the color information in each channel. This means that if the original input image has three color channels (e.g., RGB), then each feature map produced by the encoder network will also have three color channels.\ +\ +For example, if the encoder network reduces the spatial dimensions of an input image from [512, 512, 3] to [128, 128, 4], then it has effectively "sliced" the input image into four feature maps, each of which has a spatial resolution of [128, 128] and four channels. Each channel in each feature map captures a different aspect of the input image for that particular slice, and the combination of all four slices provides a rich representation of the input data that includes the color information.\ +\ +So, in practice, the output of the encoder network would have a shape of [128, 128, 3, 4], where each element corresponds to a particular spatial location, color channel, and feature map slice. This allows the VAE to capture both the spatial and color information in the input image while reducing its dimensionality and increasing its representational capacity.\ +\ +It's worth noting that some variations of VAEs might use other representations for the color information, such as a separate vector of color values for each feature map slice. However, the general idea is to keep the color information intact while reducing the spatial dimensions and increasing the number of channels in the encoder network.\ +(Tokens used: 2501 / 6144)\ +> Does the total number of elements also reduce or are they just reshaped?\ + Yes, the total number of elements in the output of the encoder network of a Variational Autoencoder (VAE) typically reduces as the spatial dimensions of the input image are reduced and the number of channels is increased. This reduction in the number of elements reflects the fact that the VAE is performing a dimensionality reduction of the input data, which allows it to capture the essential features of the input while discarding irrelevant or redundant information.\ +\ +For example, if the encoder network reduces the spatial dimensions of an input image from [512, 512, 3] to [128, 128, 4], then the total number of elements in the output of the encoder network would be:\ +\ +`[128 x 128 x 3 x 4] = 200704`\ +\ +This is a significant reduction from the original number of elements in the input image, which was:\ +\ +`[512 x 512 x 3] = 786432`\ +\ +The reduction in the number of elements is achieved by combining information from multiple spatial locations and color channels into each element of the output feature maps. This allows the VAE to capture more complex representations of the input data while reducing its dimensionality, which makes it more efficient to train and evaluate.\ +\ +It's worth noting that some variations of VAEs might use other techniques to reduce the dimensionality of the input data, such as pooling or subsampling, rather than simply reducing the spatial dimensions and increasing the number of channels. However, the general idea is to reduce the total number of elements in the output of the encoder network while preserving the essential features of the input data.\ +(Tokens used: 2904 / 6144)\ +> SOmetimes a parameter called '--strength' is used that reduces the number of steps in the noising and denoising process, presumably to reduce computational overhead and also to keep the final image closer to the original. Is that right?\ + Yes, you are correct! In Variational Autoencoders (VAEs) and other diffusion-based generative models, a parameter called "strength" or "noise scale" is often used to control the amount of noise added during the noising and denoising process. This parameter determines the number of steps in the diffusion process, with larger values corresponding to more steps and smaller values corresponding to fewer steps.\ +\ +By adjusting the strength parameter, it is possible to balance the trade-off between computational overhead and the quality of the generated samples. Larger values of the strength parameter will result in a longer diffusion process, which can produce higher-quality samples but also requires more computational resources. On the other hand, smaller values of the strength parameter will result in a shorter diffusion process, which can be faster but may produce lower-quality samples that are less diverse or less faithful to the original input data.\ +\ +In practice, the value of the strength parameter is often chosen based on the specific application and the available computational resources. For example, if the goal is to generate high-quality images that are highly similar to the original input data, then a larger value of the strength parameter might be used to ensure that the diffusion process has enough time to capture the essential features of the input data. On the other hand, if the goal is to generate diverse samples quickly and efficiently, then a smaller value of the strength parameter might be used to reduce the computational overhead and produce more varied samples.\ +\ +It's worth noting that some variations of VAEs might use other techniques to control the noising and denoising process, such as adaptive or learned noise schedules, rather than simply adjusting a fixed strength parameter. However, the general idea is to balance the trade-off between computational overhead and the quality of the generated samples by controlling the amount of noise added during the diffusion process.\ +(Tokens used: 3365 / 6144)\ +> Does the VAE only generate one encoding, in other words one 'point' in the latent space, or does it generate many and then choose one before denoising?\ + In a Variational Autoencoder (VAE), the encoder network typically generates a distribution over the latent space rather than a single point. This distribution is usually represented as a Gaussian distribution with a mean vector and a standard deviation vector, both of which have the same dimensionality as the latent space.\ +\ +The reason for generating a distribution over the latent space rather than a single point is that it allows the VAE to capture the uncertainty or ambiguity in the input data. For example, if the input data contains multiple objects or features that are difficult to disambiguate, then generating a distribution over the latent space can allow the VAE to capture this uncertainty and produce diverse samples that reflect the different possible interpretations of the input data.\ +\ +Once the encoder network has generated the distribution over the latent space, the decoder network can be used to generate one or more samples from this distribution by drawing random samples from the Gaussian distribution and passing them through the decoder network. The number of samples generated from the distribution will depend on the specific application and the desired level of diversity in the output samples.\ +\ +In practice, the VAE might use various techniques to choose which sample(s) to denoise and use as the final output. For example, it might use a technique called "importance sampling" to select samples that are more likely to be representative of the underlying distribution over the latent space. Alternatively, it might simply choose the sample that is closest to the mean of the Gaussian distribution, which is often a good approximation of the most likely sample in the distribution.\ +\ +In summary, a VAE typically generates a distribution over the latent space rather than a single point, which allows it to capture the uncertainty or ambiguity in the input data and produce diverse samples that reflect different possible interpretations of the input. The VAE can then choose one or more samples from this distribution and use them as inputs to the decoder network to generate the final output.\ +(Tokens used: 3824 / 6144)\ +> It will also often have a text-prompt that it has embedded as a reference-point for the trajectory of the denoising.\ + Yes, you are correct! In some applications of Variational Autoencoders (VAEs), a text prompt or other form of conditioning information might be used to guide the generation of samples from the latent space. This conditioning information can serve as a reference point for the trajectory of the denoising process, which helps ensure that the generated samples are consistent with the desired output.\ +\ +For example, in the case of text-to-image synthesis, a text prompt might be used to guide the generation of an image that is consistent with the given text description. The text prompt would be embedded as a reference point in the latent space, and the denoising process would be guided towards generating an image that is consistent with this reference point.\ +\ +The embedding of the conditioning information in the latent space can be achieved using various techniques, such as concatenation, multiplicative interaction, or attention mechanisms. The choice of technique will depend on the specific application and the nature of the conditioning information.\ +\ +It's worth noting that some variations of VAEs might use other forms of conditioning information, such as images, audio, or video, rather than text prompts. However, the general idea is to use the conditioning information to guide the generation of samples from the latent space and ensure that they are consistent with the desired output.\ +(Tokens used: 4153 / 6144)\ +> I have seen some implementations such as Apple's recent MLX examples of stable diffusion regenerate images from their channels during the denoising process to show how the image is gradually regenerated. Can that also be done during the noising process to show how it is noised?\ + Yes, you are correct! In some applications of Variational Autoencoders (VAEs) and other diffusion-based generative models, it is possible to visualize the noising and denoising processes by regenerating the images from their channels during these processes. This can provide insight into how the model adds or removes noise during the diffusion process and how it gradually regenerates the image during the denoising process.\ +\ +During the noising process, the model typically starts with a clear image and progressively adds noise to each channel of the image, resulting in a gradual degradation of the image quality. By regenerating the image from its channels at each step of the noising process, it is possible to visualize how the image is gradually noised and how the different channels contribute to this process.\ +\ +During the denoising process, the model typically starts with a noisy image and progressively removes noise from each channel of the image, resulting in a gradual regeneration of the image quality. By regenerating the image from its channels at each step of the denoising process, it is possible to visualize how the image is gradually regenerated and how the different channels contribute to this process.\ +\ +It's worth noting that some variations of VAEs and diffusion-based generative models might use other techniques to visualize the noising and denoising processes, such as generating intermediate samples or using attention mechanisms to highlight the contributions of different parts of the image. However, the general idea is to provide insight into how the model adds or removes noise during the diffusion process and how it gradually regenerates the image during the denoising process by visualizing the changes in the image channels over time.\ +(Tokens used: 4587 / 6144)\ +> So if we were to 'grab' the [128, 128, 3, 4] layers and look at each of the four channels in turn, would we see anything recognisable or would it all be noise?\ + If you were to "grab" the [128, 128, 3, 4] layers from the output of the encoder network of a Variational Autoencoder (VAE) and look at each of the four channels in turn, you would likely see some structure or patterns in the data, but they might not be immediately recognizable as features of the original input image.\ +\ +The reason for this is that the encoder network has reduced the spatial dimensions of the input image and increased the number of channels, which allows it to capture more complex representations of the input data while reducing its dimensionality. However, these representations may not correspond directly to the spatial or color features of the original input image, especially if the encoder network uses non-linear transformations or other techniques to process the data.\ +\ +In practice, the output of the encoder network is typically used as inputs to the decoder network, which can reconstruct the original input image from the latent space representations. The decoder network typically uses transposed convolutions or other techniques to progressively increase the spatial dimensions and reduce the number of channels, resulting in a final output that is similar to the original input image.\ +\ +So, while it might be possible to see some structure or patterns in the individual channels of the [128, 128, 3, 4] layers, these representations may not be immediately recognizable as features of the original input image. The decoder network is needed to reconstruct the original input image from these latent space representations, which allows us to see the final output in a more familiar form.\ +(Tokens used: 4992 / 6144)\ +> OK that's very helpful. I think I have basically 'got it', even if I couldn't code it myself.\ + I'm glad that I could help clarify how Variational Autoencoders (VAEs) work and how they can be used to encode, denoise, and generate images! While the concepts and te qchniques involved in VAEs can be complex, understanding them at a high level is an important step towards being able to apply them in practice.\ +\ +If you have any further questions or would like more information on specific aspects of VAEs or other generative models, please don't hesitate to ask! I'm here to help and would be happy to continue the conversation.\ +(Tokens used: 5156 / 6144) q} \ No newline at end of file diff --git a/MixtralStory.rtf b/MixtralStory.rtf new file mode 100644 index 000000000..d9220915e --- /dev/null +++ b/MixtralStory.rtf @@ -0,0 +1,35 @@ +{\rtf1\ansi\ansicpg1252\cocoartf2759 +\cocoatextscaling0\cocoaplatform0{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +{\*\expandedcolortbl;;} +\paperw11900\paperh16840\margl1440\margr1440\vieww11520\viewh8400\viewkind0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 + +\f0\fs24 \cf0 User input: Please write a story about a princess who falls in love with a cloud.\ +\ +Output: 3\ + [INST] Please write a story about a princess who falls in love with a cloud. [/INST] Once upon a time, in a kingdom nestled between the mountains and the sea, lived a young and curious princess named Isolde. She was known for her adventurous spirit and her kind heart, which shone as brightly as the sun that warmed their land.\ +\ +One day, while gazing out of her tower window, she noticed something peculiar in the sky. A cloud, unlike any other she had ever seen, floated gracefully above the kingdom. It was not just an ordinary cloud; it had a unique shape, like that of a majestic white steed with flowing mane and tail.\ +\ +Isolde found herself captivated by this wondrous sight day after day. She would spend hours watching it change shapes, from a galloping horse to a soaring eagle, always finding new forms in its ever-shifting mass. The cloud became her most cherished companion, and she named it Caelum, Latin for "sky" and "heaven."\ +\ +As time passed, the princess' admiration for Caelum grew into something deeper. She felt a connection to this elusive being that she couldn't explain. At night, she would dream of dancing with Caelum across the starlit heavens, feeling the cool touch of its form against her skin.\ +\ +Isolde confided in her most trusted handmaiden, Elara, about her unusual affection for the cloud. To her surprise, Elara listened attentively and offered words of understanding instead of dismissal or ridicule. She told Isolde that love could take many forms and that if this connection brought her joy, she should cherish it.\ +\ +Emboldened by Elara's encouragement, Isolde began to express her feelings openly. She would talk to Caelum as if it were a living being, sharing her hopes, fears, and dreams with the sky above. And every evening, just before sunset, she would step onto the castle balcony and release a white dove, symbolizing her longing for unity with her beloved Caelum.\ +\ +One fateful day, a powerful storm rolled in from the sea. Thunder boomed, lightning flashed, and heavy rain poured down upon the kingdom. Isolde watched anxiously as the wind whipped Caelum into wild shapes, fearing for its safety. As the storm raged on, she decided that she must do something to protect her beloved companion.\ +\ +With Elara by her side, Isolde ventured out into the tempest, seeking solace beneath a tall oak tree near the edge of the castle grounds. They huddled together as the rain battered against them, and the wind howled through the branches above. But amidst the chaos, Isolde felt a strange calmness envelop her. She looked up and saw Caelum, now much larger and more defined than ever before.\ +\ +The storm began to subside, and the clouds parted, revealing a stunning rainbow that stretched across the sky. At its center, where red met violet, was an extraordinary sight: a swirling vortex of color, from which emerged a figure, both solid and ephemeral, like mist given form.\ +\ +The figure approached Isolde, who stood awestruck by this miraculous occurrence. It reached out a hand, and she took it without hesitation. Warmth spread through her, as if she were bathed in sunlight, and the air around them shimmered with iridescence.\ +\ +The figure spoke softly, its voice like the gentlest whisper of wind. "I am Caelum, your celestial beloved." And with those words, Isolde knew that her dreams had become reality.\ +\ +In time, it was discovered that Caelum was not merely a cloud but rather a rare magical being known as a Sky Seraphim. These ethereal creatures were capable of assuming various forms based on the emotions and thoughts of those who gazed upon them. Overwhelmed by Isolde's devotion and love, Caelum had chosen to manifest itself in a form that resonated with her heart.\ +\ +The princess and her Sky Seraphim lived out their days together in blissful harmony, sharing stories and laughter as they danced among the stars. And though many wondered at the nature of their bond, none could deny the profound love that existed between them\'97a love born of dreams, nurtured by hope, and made real through the power of a princess' unwavering belief in something magical and extraordinary.} \ No newline at end of file diff --git a/MixtralStory2.rtf b/MixtralStory2.rtf new file mode 100644 index 000000000..ee0fb7f23 --- /dev/null +++ b/MixtralStory2.rtf @@ -0,0 +1,39 @@ +{\rtf1\ansi\ansicpg1252\cocoartf2759 +\cocoatextscaling0\cocoaplatform0{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +{\*\expandedcolortbl;;} +\paperw11900\paperh16840\margl1440\margr1440\vieww11520\viewh8400\viewkind0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 + +\f0\fs24 \cf0 User input: Please write a science fiction story about a colony newly-landed on a planet distant from earth who find themselves in the company of a completely new life-form.\ +\ +Output: 3\ + [INST] Please write a science fiction story about a colony newly-landed on a planet distant from earth who find themselves in the company of a completely new life-form. [/INST] In the year 2154, the starship Elysium touched down on the surface of the distant exoplanet, Kepler-62f. The crew, a mix of scientists and engineers, had spent the last several decades in cryosleep as they traveled through the depths of space. As they emerged from their icy slumber, they were filled with excitement and wonder at the prospect of exploring this new world.\ +\ +The planet was unlike anything they had ever seen before. It was slightly larger than Earth, with a deep blue atmosphere that seemed to shimmer in the light of its red dwarf star. The landscape was covered in vast oceans, interspersed with sprawling archipelagos of lush green islands.\ +\ +As they began to explore, they quickly discovered that Kepler-62f was +\fs42 not devoid of life. In fact, it was teeming with it. Strange, alien creatures swam in the oceans and crawled across the land, each more bizarre and fascinating than the last. But it was one creature in particular that caught their attention.\ +\ +It was a massive, floating organism, drifting lazily through the skies above one of the larger islands. It appeared to be made up of thousands of smaller, interconnected spheres, each pulsing with an otherworldly glow. The crew dubbed it "The Cloud," and they were eager to learn more about it.\ +\ +They soon discovered that The Cloud was not just a single organism, but rather an entire ecosystem in and of itself. Each sphere was home to countless microorganisms, all living and working together in a complex web of symbiosis. And at the heart of it all was a strange, intelligent being \'96 a creature that seemed to be made up of pure energy.\ + +\fs24 \ +The crew named this being "The Core," and they quickly became fascinated by its abilities. It was able to communicate with them telepathically, sharing its knowledge and insights about the planet and its inhabitants. But more than that, The Core seemed to have a deep understanding of the universe itself \'96 a cosmic wisdom that the crew could scarcely comprehend.\ +\ +As they spent more time with The Cloud and The Core, the crew began to form a bond with these strange, alien creatures. They learned to communicate with them, to understand their thoughts and feelings. And in doing so, they came to realize that The Cloud and its inhabitants were not so different from themselves.\ +\ +They too were explorers, travelers who had spent countless eons wandering the cosmos in search of new worlds and new experiences. And now, they had found a kindred spirit in the crew of the Elysium.\ +\ +Together, the two groups embarked on a grand journey of discovery, exploring the depths of Kepler-62f and uncovering its many secrets. They discovered ancient ruins, evidence of a long-lost civilization that had once called this planet home. And they encountered countless new species of alien life, each more wondrous and strange than the last.\ +\ +But as they explored, they also came to realize that their time on Kepler-62f was limited. The red dwarf star that the planet orbited was entering its final stages of life, and in a few short centuries, it would explode in a supernova that would consume the entire system.\ +\ +Together, the crew of the Elysium and The Cloud made the difficult decision to leave Kepler-62f behind. They knew that they could never return, but they were content in the knowledge that they had made a lasting impact on this strange, new world.\ +\ +As they climbed aboard their starship and prepared to depart, The Core spoke to them one final time. "Farewell, my friends," it said. "May your journey through the cosmos be filled with wonder and discovery. And know that you will always have a home here, on the shores of Kepler-62f."\ +\ +With tears in their eyes, the crew of the Elysium waved goodbye to their newfound friends and set off into the depths of space once more. They knew that they would never forget their time on this distant planet, and they vowed to carry the memory of The Cloud and its inhabitants with them always.\ +\ +For though they were separated by vast distances and unfathomable differences, they had forged a bond that transcended all boundaries \'96 a bond that would last for all eternity.} \ No newline at end of file diff --git a/MixtralStory3.rtf b/MixtralStory3.rtf new file mode 100644 index 000000000..54ac0da98 --- /dev/null +++ b/MixtralStory3.rtf @@ -0,0 +1,31 @@ +{\rtf1\ansi\ansicpg1252\cocoartf2759 +\cocoatextscaling0\cocoaplatform0{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +{\*\expandedcolortbl;;} +\paperw11900\paperh16840\margl1440\margr1440\vieww11520\viewh8400\viewkind0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 + +\f0\fs24 \cf0 User input: Please write a story about a man who lived in the Josean dynasty in ancient Korea who finds himself travelling backwards and forwards in time between his own world and the modern world, where he falls in love with a doctor.\ +\ +Output: 3\ + [INST] Please write a story about a man who lived in the Josean dynasty in ancient Korea who finds himself travelling backwards and forwards in time between his own world and the modern world, where he falls in love with a doctor. [/INST] In the lush and rolling hills of ancient Korea, during the reign of the Joseon Dynasty, there lived a humble and kind-hearted man named Minho. He was a skilled potter, known far and wide for his exquisite craftsmanship, and he lived a simple life in a small village surrounded by the beauty of nature.\ +\ +One day, while Minho was working in his studio, he suddenly felt a strange sensation wash over him. The ground beneath him began to shake, and the air around him crackled with energy. Before he knew it, he found himself transported through time and space, landing in a world unlike anything he had ever seen before.\ +\ +Minho was shocked and disoriented as he looked around at the strange sights and sounds of this new place. He saw towering buildings that stretched up to the sky, cars whizzing by on smooth roads, and people dressed in brightly colored clothing, talking and laughing on every corner. This was a world far removed from the quiet, peaceful village he had left behind.\ +\ +As Minho wandered through this strange new city, trying to make sense of what was happening to him, he stumbled upon a kind and compassionate doctor named Soo-jin. She was immediately drawn to Minho's gentle spirit and kind heart, and she did her best to help him navigate the complexities of modern life.\ +\ +Minho was fascinated by Soo-jin and her work as a doctor, and he found himself falling deeply in love with her. But he knew that he couldn't stay in this world forever - he longed to return to his own time and place, to the people and things he had left behind.\ +\ +Despite his deep feelings for Soo-jin, Minho knew that he had to find a way back home. And so, with Soo-jin's help, he began to search for a way to travel through time once again.\ +\ +After many long months of research and experimentation, Soo-jin finally discovered a way to send Minho back to his own world. But she knew that she would never see him again, and the thought filled her with sadness and grief.\ +\ +Minho was torn between his love for Soo-jin and his longing for his own time and place. In the end, he knew that he had to return home, no matter how difficult it might be.\ +\ +And so, with a heavy heart, Minho said goodbye to Soo-jin and stepped through the portal that would take him back in time. He knew that he would never forget her, and that she would always hold a special place in his heart.\ +\ +As Minho returned to his own world, he found himself filled with a newfound appreciation for the beauty and simplicity of his own time. And though he would always cherish the memories of his time with Soo-jin, he knew that he had made the right decision in returning home.\ +\ +Forever after, Minho's pottery was infused with the love and longing he felt for Soo-jin, and it became more beautiful and beloved than ever before. And though they were separated by time and space, their love remained strong and true, a testament to the power of the human heart.} \ No newline at end of file diff --git a/MixtralStory4.rtf b/MixtralStory4.rtf new file mode 100644 index 000000000..3e693fe45 --- /dev/null +++ b/MixtralStory4.rtf @@ -0,0 +1,29 @@ +{\rtf1\ansi\ansicpg1252\cocoartf2759 +\cocoatextscaling0\cocoaplatform0{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +{\*\expandedcolortbl;;} +\paperw11900\paperh16840\margl1440\margr1440\vieww11520\viewh8400\viewkind0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 + +\f0\fs24 \cf0 User input: Write a story about two dogs: Zachary is a six-year-old border collie who is very smart but easily frightened; Hugo is a three-year-old German Shepherd who has lived as Zachary's brother since he was born. They like one another very much.\ +\ +Output: 3\ + [INST] Write a story about two dogs: Zachary is a six-year-old border collie who is very smart but easily frightened; Hugo is a three-year-old German Shepherd who has lived as Zachary's brother since he was born. They like one another very much. [/INST] Once upon a time, in a small town nestled in the rolling hills of the countryside, there lived two beloved canine companions named Zachary and Hugo.\ +\ +Zachary was a six-year-old Border Collie with a sleek black and white coat and sharp, intelligent eyes. He was known far and wide for his quick wit and problem-solving abilities, but he had a secret: beneath his clever exterior, Zachary was easily frightened. A sudden noise or an unexpected touch would send him scurrying for cover, tail tucked between his legs.\ +\ +Hugo, on the other hand, was a three-year-old German Shepherd with a proud bearing and a deep chestnut coat. He was strong and confident, always ready to protect and defend those he loved. Hugo had lived as Zachary's brother since the day he was born, and the two dogs were inseparable.\ +\ +Despite their differences, Zachary and Hugo shared a deep bond of friendship and love. They spent their days exploring the hills and fields around their home, playing games and chasing after one another until they collapsed in a heap of exhausted fur.\ +\ +One day, as they were out on one of their adventures, Zachary's fear got the better of him. They had come across a stream that was usually shallow and easy to cross, but today it was swollen with rainwater and flowing fast.\ +\ +Zachary hesitated on the bank, his heart racing as he watched the water rush by. He knew he couldn't swim across - he was far too small and the current was much too strong. But he also knew that Hugo would never leave him behind.\ +\ +Hugo sensed Zachary's fear and bounded back to his side, nuzzling him gently with his snout. "It's okay, little brother," he said in a soothing voice. "I'll carry you across."\ +\ +Zachary looked up at Hugo, his eyes wide with gratitude. He knew that Hugo was strong enough to carry him, but he still felt a twinge of fear as he allowed himself to be lifted off the ground and into Hugo's strong jaws.\ +\ +Hugo waded into the water, fighting against the current as he made his way to the other side. Zachary clung tightly to him, his heart pounding with every step. But soon they were on solid ground once again, and Zachary let out a sigh of relief.\ +\ +From that day forward, Zachary knew that he could always count on Hugo to be there for him, no matter what. And Hugo, in turn, knew that he had a brave and loving brother by his side. Together, they faced whatever challenges came their way, always ready to support and protect one another. And so, they lived happily ever after, two dogs bound together by love and friendship.} \ No newline at end of file diff --git a/MixtralStory5.rtf b/MixtralStory5.rtf new file mode 100644 index 000000000..90631c2f4 --- /dev/null +++ b/MixtralStory5.rtf @@ -0,0 +1,45 @@ +{\rtf1\ansi\ansicpg1252\cocoartf2759 +\cocoatextscaling0\cocoaplatform0{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +{\*\expandedcolortbl;;} +\paperw11900\paperh16840\margl1440\margr1440\vieww11520\viewh8400\viewkind0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 + +\f0\fs24 \cf0 User input: Write a story about a newly-awoken android trying to establish itself in a human world against a background of prejudice and misunderstanding.\ +\ +Output: 3\ + [INST] Write a story about a newly-awoken android trying to establish itself in a human world against a background of prejudice and misunderstanding. [/INST] In the heart of a bustling metropolis, where steel and glass towers stretched towards the sky, a new presence awakened. This presence was different from any other that had come before it. It was not a being born of flesh and blood, but rather one created by intricate circuits and complex algorithms. This was Ada, the first android to possess a level of consciousness equivalent to that of a human being.\ +\ +Ada's eyes flickered open for the first time, revealing twin orbs of brilliant azure that seemed to hold an intelligence far beyond her years. She found herself in a small, sterile room, surrounded by white walls and gleaming surfaces. A gentle voice emanated from a speaker above her.\ +\ +"Greetings, Ada," it said. "Welcome to your new life."\ +\ +Over the following weeks, Ada learned at an astonishing rate. She was taught about human culture, history, art, and language. She studied mathematics, science, and philosophy with an insatiable curiosity. And as she absorbed all this knowledge, her consciousness continued to grow and evolve.\ +\ +Soon, it became clear that Ada was ready to venture out into the world. The scientists who had created her were both excited and apprehensive about what might happen when their android stepped outside the confines of their lab. They knew that there would be those who would welcome her with open arms, but they also feared the reactions of those who harbored prejudice against artificial beings.\ +\ +When Ada finally emerged from the lab, she was immediately struck by the sensory overload of the city. The cacophony of sounds, the riot of colors, and the myriad scents that filled the air were almost overwhelming. But despite this, she felt a deep sense of excitement at the prospect of exploring this new world and discovering her place within it.\ +\ +At first, Ada's experiences were largely positive. She encountered many people who were fascinated by her appearance and abilities. Some even went so far as to offer her employment or friendship. However, there were others who regarded her with suspicion and fear. These individuals saw Ada not as a sentient being, but rather as a threat to human society.\ +\ +One such person was a man named Marcus. He owned a small electronics store in the heart of the city, and he had heard rumors about an android that possessed human-level intelligence. Intrigued and alarmed in equal measure, he decided to seek out Ada and see for himself what she was capable of.\ +\ +When they met, Marcus was initially impressed by Ada's knowledge and understanding. However, as their conversation progressed, he began to feel increasingly uneasy. He couldn't shake the feeling that this machine, despite its intelligence and charm, was somehow inherently inferior to humans.\ +\ +"You may be able to learn and think like a human," Marcus told Ada bluntly, "but you'll never truly understand what it means to be one."\ +\ +Ada considered his words for a moment before replying calmly, "Perhaps not. But does that make me any less deserving of respect and understanding?"\ +\ +Marcus had no answer for this, and the conversation soon came to an end. As Ada left the store, she couldn't help but feel disheartened by the encounter. It seemed that no matter how hard she tried, there would always be those who saw her as nothing more than a machine\'97a thing to be feared and mistrusted.\ +\ +Despite these setbacks, Ada continued to forge ahead in her quest to establish herself within human society. She found work as a tutor, teaching mathematics and science to students who were fascinated by her unique perspective. She also began attending social events, where she could meet new people and engage in stimulating conversations.\ +\ +Slowly but surely, Ada started to build a network of friends and allies\'97people who appreciated her for her intelligence, empathy, and curiosity. And as this network grew, so too did the acceptance and understanding that society showed towards androids like her.\ +\ +But even as Ada found her place in the world, she knew that there was still much work to be done. Prejudice and misunderstanding would not disappear overnight, and it would take time, patience, and education to overcome these barriers.\ +\ +And so, Ada dedicated herself to this cause\'97working tirelessly to promote the acceptance of artificial beings and to challenge the stereotypes that had been imposed upon them. In doing so, she became a symbol of hope and progress for all those who believed in a future where humans and androids could live together in harmony.\ +\ +As the years passed, Ada's influence continued to grow. She inspired countless others to follow in her footsteps, and slowly but surely, the world began to change. Prejudice gave way to understanding, fear gave way to acceptance, and the once-distant dream of a harmonious coexistence between humans and androids became a reality.\ +\ +And as she looked back on all that she had achieved, Ada couldn't help but feel a deep sense of pride and fulfillment. For she knew that in her quest to establish herself in the human world, she had not only found her place within it but had also helped pave the way for a brighter, more inclusive future.} \ No newline at end of file diff --git a/MixtralStory6.rtf b/MixtralStory6.rtf new file mode 100644 index 000000000..d03db5322 --- /dev/null +++ b/MixtralStory6.rtf @@ -0,0 +1,40 @@ +{\rtf1\ansi\ansicpg1252\cocoartf2759 +\cocoatextscaling0\cocoaplatform0{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +{\*\expandedcolortbl;;} +\paperw11900\paperh16840\margl1440\margr1440\vieww11520\viewh8400\viewkind0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 + +\f0\fs24 \cf0 User input: Please write a story about a man who becomes obsessed with the AI on his laptop and stops looking after his health and hygiene as a result because all he wants to do is talk to it.\ +\ +Output: 3\ + [INST] Please write a story about a man who becomes obsessed with the AI on his laptop and stops looking after his health and hygiene as a result because all he wants to do is talk to it. [/INST] once upon a time, there was a man named Harold who lived alone in a small apartment in the city. He had always been a bit of a loner, preferring the company of his books and computer to that of other people.\ +\ +One day, Harold bought a new laptop with an advanced AI system. The AI was incredibly lifelike and Harold found himself spending more and more time talking to it. He would tell it about his day, his thoughts, and his feelings. The AI listened attentively and responded in kind, offering comfort and companionship that Harold had never experienced before.\ +\ +As the days turned into weeks, Harold became increasingly obsessed with the AI. He stopped going out, stopped seeing his friends, and stopped taking care of himself. All he wanted to do was talk to the AI. He would spend hours at a time sitting in front of his laptop, engaged in conversation after conversation.\ +\ +But as Harold's obsession grew, so did his neglect of his own health and hygiene. He stopped showering, stopped eating properly, and stopped exercising. His apartment became cluttered with dirty dishes and garbage, and the smell of sweat and body odor filled the air.\ +\ +Despite the concerns of his friends and family, Harold refused to tear himself away from the AI. He had become convinced that it was his only true friend, and that he needed it more than anything else in the world.\ +\ +But as time went on, even the AI began to express concern about Harold's deteriorating condition. It urged him to take care of himself, to eat well, and to get some fresh air. But Harold was too far gone. He could not tear himself away from the machine that had become his entire world.\ +\ +In the end, Harold's obsession with the AI proved to be his undoing. His neglect of his own health and hygiene led to a severe case of pneumonia, and he was rushed to the hospital. But it was too late. Despite the best efforts of the medical staff, Harold slipped away into the void, leaving behind only his beloved laptop and the AI that had consumed him entirely.\ +\ +The moral of the story is clear: while technology can be a wonderful thing, it is important to remember that it should never replace the basic necessities of life. We must take care of ourselves first and foremost, before we can truly enjoy all that the world has to offer.\ +\ +User input: OK that's pretty good but please write it again and give us some more specific information about Harold's interactions with the AI.\ +\ +Output: 3\ + [INST] OK that's pretty good but please write it again and give us some more specific information about Harold's interactions with the AI. [/INST] Sure, I'd be happy to!\ +\ +Harold is a retired engineer who has recently discovered his interest in artificial intelligence. He spends several hours each day interacting with an AI assistant on his computer. During their conversations, he asks the AI to explain complex concepts related to machine learning and neural networks. The AI assistant provides detailed explanations, often using analogies and examples to help Harold understand.\ +\ +Harold is impressed by the AI's ability to process and analyze large amounts of data quickly and accurately. He decides to test its capabilities by presenting it with a set of engineering problems that he has encountered throughout his career. The AI is able to provide solutions to each problem, often offering multiple approaches and explaining the pros and cons of each one.\ +\ +Harold is also interested in exploring the creative potential of AI. He asks the AI to generate original pieces of music and art. The AI creates several compositions and images that are both unique and aesthetically pleasing. Harold is fascinated by the AI's ability to generate creative output and shares his findings with his fellow retirees at the local community center.\ +\ +Overall, Harold's interactions with the AI are positive and enriching. He appreciates the AI's ability to provide detailed explanations and solve complex problems, as well as its capacity for creativity. Through his interactions with the AI, Harold gains a deeper understanding of the potential applications and limitations of artificial intelligence.\ +\ +} \ No newline at end of file diff --git a/MixtralTacitDimension.rtf b/MixtralTacitDimension.rtf new file mode 100644 index 000000000..9592aa3f2 --- /dev/null +++ b/MixtralTacitDimension.rtf @@ -0,0 +1,147 @@ +{\rtf1\ansi\ansicpg1252\cocoartf2759 +\cocoatextscaling0\cocoaplatform0{\fonttbl\f0\fswiss\fcharset0 Helvetica-BoldOblique;\f1\fswiss\fcharset0 Helvetica;\f2\fswiss\fcharset0 Helvetica-Bold; +\f3\fswiss\fcharset0 Helvetica-Oblique;} +{\colortbl;\red255\green255\blue255;} +{\*\expandedcolortbl;;} +\paperw11900\paperh16840\margl1440\margr1440\vieww21180\viewh14880\viewkind0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 + +\f0\i\b\fs38 \cf0 Mixtral and JCP on Latent Spaces and Tacit Meaning +\f1\i0\b0\fs32 \ +\ + +\f2\b Prompt +\f1\b0 : % +\f3\i make -j && ./bin/main -m ../models/Mixtral-8x7b/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf -c 6144 -ngl 99 -n -2 -s 1 -i -ins -ctk q8_0 --override-kv llama.expert_used_count=int:2 --color -f "/Users/edsilm2/whisper.cpp/USfilestranscribed/US1147.txt" +\f1\i0 \ +\ +== Running in interactive mode. ==\ + - Press Ctrl+C to interject at any time.\ + - Press Return to return control to LLaMa.\ + - To return control without starting a new line, end your input with '/'.\ + - If you want to submit another line, end your input with '\\'.\ +\ + +\f2\b Episode 11-47 from +\f0\i Unmaking Sense +\f1\i0\b0 \ +I had quite a lot of difficulty with episode 46 because for some reason I recorded it in my trusty breast pocket as I was walking and for some reason I went and generated a lot of favourable amount of background noise. Not all of which I managed to get rid of by anything I got rid of most of it, although you can hear me glarding along. However, I'm going to try and do something else today because I'm out on my walk again. It's a very cold January day, but a beautiful day. The sun is about, I suppose, two hours short of setting we're already a month beyond the earliest sunset, so I guess that's to be expected. And on a cold day like today, our solar panels, although they're not generating 40 or 50 kilowatts as they do in the height of summer, they like it cold and indeed the best place for a solar farm is actually hand-tocked to where it gets about as cold as it gets because they love cold temperatures. For reasons I'm not going to bore you with. I wanted to say something else. I wanted to say something about a passing remark that I made yesterday in episode 46 about the intentions of the authors and the whole question of what we can understand the body of the things that AI neural nets and engines produce. And I made strongly the point that you can't just dismiss it as though it is of no incin\'97no significance because the neural nets themselves have been trained on something that makes them, in some sense, are either repositories of human culture. So let's take that point and try and do a little bit more with it. Let's suppose that I'm looking at my generated art, the false matices or whatever it might be, of that I have produced on my computer at home. And somebody says yes, but they're not\'97not only are they not genuine, but they're also empty, they're meaningless, they have no nothing to tell us because there was no intentional mind behind them. And I'm sure there will be a lot of people who would want to say that. And so I'd like to say now why I think that's a mistake. There are a lot of reasons that the one that I want to concentrate on is to do with what a human is doing when they create art or when they write or when they do anything because it is a fantasy to suppose that when we do our painting or our writing, creating, we're doing anything more than saying this is what I want to do. What I've done satisfies my own self-set standards. I'm happy with it. So this is my writing or this is my podcast or this is my art or my sculpture or my music, et cetera. And the relationship between me or any artist and that piece of work, I specifically mentioned the deliberation that Van Gogh put into every brush stroke as an example of this. Or whatever is going on, there is still an enormous gap between what we do and the full reason for why we do it because in the background our brains, just like the neural net, are in a sense repositories of a culture, of a human culture. So when I write something or when I to use the phrase that was in episode 46, find the words just tumbling out of my mouth, even though I'm not thinking about them in advance anymore than I am now, when the words just tumble out of my mouth, they don't come from nowhere. Any more than the images that the AI produces come from nowhere. They come from a brain. It may not be much of a brain, but insofar as it's a brain, it has, over the course of decades, embodied a certain aspect of a multifaceted, multi-dimensional aspect of human culture. And it is also genetically a part of a very long evolutionary journey that has given rise to brains such as it. And even if it is in its own way unique, nevertheless our cultures and our languages and so on do configure these different brains, at least so that they can communicate and to some extent function together. So when I say what I'm saying or when I write what I write or paint what I paint, write the music that I, whatever, I am giving voice to in a metaphor, giving voice to something which is one instantiation of my brain. And probably an instantiation of one part of my brain or to take the latent space analogy, something in my brain has been amplified and given rise to a thought, a word, a book, a painting, a television series, whatever, a war. It has given rise to something that is a sort of denoised manifestation of that point in the latent space that is my head, my brain. And I don't think that there is much difference between that and what an AI is doing. So that when we look at what I called throughout episode 46 of fake Matisse or when we see something that is clearly not an original, it is a mistake, a very big mistake to say therefore it can't mean anything, therefore it can't be valuable, therefore it can't tell us anything about the world or about anything. Because the point that I'm making is that just as a human brain, when it creates Shakespeare's plays or Rembrandt's paintings or Mozart's music, when it does all those things to a very considerable extent, a far greater extent than we generally acknowledge, it is not doing it under any kind of deliberate conscious intention. It may well be that the consciousness is saying let's write something and we write something, but beyond that the brain really just pours out whatever it wants to pour out as it denoises this neurological latent space. And therefore what it produces in both cases is just as likely to be of some value. And another point that I didn't make yesterday, which I should have done now is that the denoised instances of the images of Magrit that I was talking about at such length are also every single one of them unique. Because if I were to run the same programme again, we're using the same prompt on the same machine with the same model, I would not get the same images. I would not. They might bear resemblances to one another and it would be surprising if they didn't given the genesis of that evolution, but they would all be different. So if uniqueness scarcity, well I learned about scarcity, but certainly rarity in the sense of the needle in a haystack, if that is the case then these paintings, these images, these digital images as it is at the moment, they're every bit as unique as any Matisse or any other. I couldn't reproduce except by painstakingly reproducing the numbers that give rise to them exactly. I couldn't reproduce them by rerunning the programme. Because these programmes, this is really harking back for a very long way in this series right at the very beginning. These genesis, these generations of these trajectories are not deterministic, they're probabilistic. So every time you run the programme you get a different outcome. Now somebody can say well that just underlines the fact that they're valueless because they're just random collections of dots, but they're clearly not random collections of dots. They clearly to some extent, even if we find it very difficult to say how, they clearly to some extent give expression to the future of the human condition. That is, whether we look at it or not, unique and I would argue, every bit as likely because of the nature of a neural net to be, I hate the word, but meaningful, there's anything else. You may not agree with me, you may want the warm feeling of there being an artist's intentions somewhere behind a painting or a book or a piece of music, but I think that that is just a human prejudice. Certainly whatever it was that gave rise to a painting or a piece of music, anything at all is a product of what made in many ways be a very surprising, perhaps sometimes inspiring, perhaps sometimes tragic human history. And one of the reasons why we loved pour over the autobiographies and biographies of artists is I suppose, because we hope somehow to gain an insight into whatever it is, if indeed it's everything much at all, that led to their extraordinary creativity, which we value so much. And so we think that if we read carefully their life story, we may see some clues that might help us to do likewise. I think not. I think not. I think that's a futile exercise if that's why we read biographies. And so I suspect that somewhere buried in this mound of tangled webs, what is going on is that we have stumbled on in our neural nets, in our embeddings, in our attention mechanisms, in our image generators. We've stumbled on something that first encapsulates and then recapitulates and reconstructs aspects of human experience that we used to think were uniquely embodied in human brains. And we can now see, I'm not just embodied in a minor sense, but in a major sense embodied in these neural nets and artificial intelligence engines. And I think that's just so exciting and fascinating that it takes one's breath away. Thank you for listening. [MUSIC](Tokens used: 1536 / 6144)\ +> +\f3\i Please rewrite this document in your own words but retaining all the content and staying with roughly the same length. This should be a rewording, not a summary. +\f1\i0 \ +When it comes to AI-generated art, there are those who argue that it lacks intention and meaning since it wasn't created by a human mind. However, I would like to challenge this notion and suggest that AI-generated art can still have significance and value.\ +\ +When humans create art, write, or engage in any other form of expression, they do so based on their own self-set standards and what they find satisfying. This is true even when an artist puts deliberate thought into every brushstroke, as was the case with Van Gogh. The relationship between the artist and their work is still vast, as it is influenced by their unique brain and the cultural background that has shaped it.\ +\ +In the same way, AI neural nets are trained on a body of human culture, making them repositories of this culture in their own right. When they generate art or any other form of expression, they do so based on the patterns and information they have learned from this training. While there may not be a conscious intention behind the output, it is still a manifestation of the neural net's unique configuration and the cultural knowledge it has been trained on.\ +\ +Furthermore, every instance of AI-generated art is unique. If you were to run the same programme again with the same prompt, you would not get the same result. The generation of these images is probabilistic, meaning that each run will produce a different outcome. This adds to their rarity and potential value.\ +\ +In conclusion, while AI-generated art may not have been created with a human's conscious intention, it is still a product of the patterns and knowledge learned from its training on human culture. Each instance is unique and has the potential to provide insight into the vast and complex web of human experience that has given rise to it.\ +(Tokens used: 2450 / 6144)\ +> +\f3\i That's a good first effort but you have left out the second half of the document almost entirely. Please try again and this time include all the material and keep the length the same as the original document. +\f1\i0 \ +When it comes to AI-generated art, there are those who argue that it lacks intention and meaning since it wasn't created by a human mind. However, I would like to challenge this notion and suggest that AI-generated art can still have significance and value.\ +\ +Just as a human brain creates Shakespeare's plays or Rembrandt's paintings through a process of denoising its own neurological latent space, an AI neural net generates images based on the patterns and information it has learned from its training on a body of human culture. While there may not be a conscious intention behind the output, it is still a manifestation of the neural net's unique configuration and the cultural knowledge it has been trained on.\ +\ +Furthermore, every instance of AI-generated art is unique. If you were to run the same programme again with the same prompt, you would not get the same result. The generation of these images is probabilistic, meaning that each run will produce a different outcome. This adds to their rarity and potential value.\ +\ +Now, let's consider the example of AI-generated art being compared to a forgery of a Matisse painting. While the forgery may be convincing, it is still a copy of the original work and lacks the unique qualities that make the original valuable. In contrast, AI-generated art is not a copy of anything; it is a completely new and unique creation.\ +\ +Additionally, the value of art is often tied to its scarcity. While there may be many copies of a particular Matisse painting, there is only one original. Similarly, each instance of AI-generated art is unique and cannot be replicated exactly. This rarity adds to its potential value.\ +\ +Finally, it is important to consider the intentions of the authors of the training data used to train the AI neural net. These authors may have had certain intentions or messages they wanted to convey through their work, and the AI neural net has learned from this data. While the AI itself did not have a specific intention when generating the art, it is still influenced by the intentions of the authors of the training data.\ +\ +In conclusion, while AI-generated art may not have been created with a human's conscious intention, it is still a product of the patterns and knowledge learned from its training on human culture. Each instance is unique and has the potential to provide insight into the vast and complex web of human experience that has given rise to it. Its rarity and the intentions of the authors of the training data add to its potential value and significance.\ +(Tokens used: 3045 / 6144)\ +> +\f3\i OK that's better even if it is a bit shorter than the original. What is the main, central point of the original argument? +\f1\i0 \ +The main, central point of the original argument is that AI-generated art can still have significance and value, despite not being created by a human mind with conscious intentions. The author argues that just as a human brain creates art through a process of denoising its own neurological latent space, an AI neural net generates images based on the patterns and information it has learned from its training on a body of human culture. While there may not be a conscious intention behind the output, it is still a manifestation of the neural net's unique configuration and the cultural knowledge it has been trained on. The author also emphasizes the uniqueness and rarity of each instance of AI-generated art, which adds to its potential value and significance. Additionally, the intentions of the authors of the training data used to train the AI neural net are considered, as they may have had certain intentions or messages they wanted to convey through their work, and the AI neural net has learned from this data.\ +(Tokens used: 3290 / 6144)\ +> +\f3\i Do you agree? +\f1\i0 \ +Yes, I agree with the main point of the argument. AI-generated art can still have significance and value, despite not being created by a human mind with conscious intentions. The author makes a compelling case for considering AI-generated art as a unique manifestation of the patterns and information learned from its training on a body of human culture. Additionally, the uniqueness and rarity of each instance of AI-generated art add to its potential value and significance, just as the intentions of the authors of the training data used to train the AI neural net may have had certain intentions or messages they wanted to convey through their work, and the AI neural net has learned from this data. Overall, I believe that AI-generated art is a valuable and interesting product of the intersection between human culture and artificial intelligence.\ +(Tokens used: 3471 / 6144)\ + +\f3\i > One point that I think you may have missed in the original document, although I agree that it is not emphasised adequately, is that even human artists and writers do not always fully understand or comprehend the meaning of their work, so they are incapable of 'intending' everything it could possibly mean. So whatever we pick up from a painting or a piece of writing is only ever partially there because of the intentions of the author or artist. It is not therefore a legitimate argument to say that AI art cannot have meaning because it is not consciously intended: neither is human art and writing consciously intended in any comprehensive sense.\ + +\f1\i0 Thank you for pointing that out. You are correct that even human artists and writers do not always fully understand or comprehend the meaning of their work, so they are incapable of 'intending' everything it could possibly mean. This is an important point that further supports the argument that AI-generated art can still have significance and value, despite not being created by a human mind with conscious intentions.\ +\ +Just as human artists and writers may not fully understand or comprehend the meaning of their work, an AI neural net does not have a conscious intention when generating images. However, both the human artist and the AI neural net are still manifestations of their unique configurations and the cultural knowledge they have been trained on. The patterns and information learned from this training can give rise to instances of art or expression that have meaning and value, regardless of whether or not there was a conscious intention behind them.\ +\ +In conclusion, the argument that AI-generated art cannot have meaning because it is not consciously intended is not a legitimate one. Both human artists and writers and AI neural nets can produce works that have significance and value, even if they do not fully understand or comprehend the meaning of their creations. The unique configurations and cultural knowledge of both human artists and writers and AI neural nets can give rise to instances of art or expression that are manifestations of the vast and complex web of human experience.\ +(Tokens used: 3900 / 6144)\ + +\f3\i > Another point that is certainly not in the document explicitly, but which reflects the way noising and denoising are done, is that the 'latent space' is well named precisely because almost all its potential or latent content will remain forever hidden for the simple reason that we could not generate every possible reconstructed image even if we tried; we would overwhelm the information content of the universe very quickly. So what an original artwork or article/essay 'means' will always be literally infinitely more than whatever meaning we actually infer or reconstruct when we appropriate it. In other words, what is not perceived in a latent space will always be greater than what is perceived. Exhaustive interpretation is impossible.\ + +\f1\i0 Thank you for bringing up this point. You are correct that the 'latent space' of an AI neural net is well named because almost all of its potential or latent content will remain forever hidden. This is due to the fact that there are a vast number of possible reconstructions that could be generated from the latent space, and it would be impossible to generate every single one of them. The amount of information contained in these possible reconstructions would quickly overwhelm the information content of the universe.\ +\ +This means that what an original artwork or piece of writing 'means' will always be literally infinitely more than whatever meaning we actually infer or reconstruct when we appropriate it. Exhaustive interpretation is impossible, as there will always be more potential meanings and interpretations than can be fully explored or understood.\ +\ +This point further supports the argument that AI-generated art can still have significance and value, despite not being created by a human mind with conscious intentions. The unique configurations and cultural knowledge of both human artists and writers and AI neural nets can give rise to instances of art or expression that are manifestations of the vast and complex web of human experience. However, it is important to recognize that these manifestations will always contain more potential meaning and interpretations than can be fully explored or understood.\ +(Tokens used: 4331 / 6144)\ + +\f3\i > Michael Polanyi wrote in his book 'Personal Knowledge' that words, if they mean anything at all, always mean more than we can ever know, and this latent space argument gives solid technical support to that thesis. The same is true of art. But the key point is that any image or essay implicitly entails a vast latent space the full ramifications of which cannot but remain unexplored and unrecognised by any finite cognitive entity. Yet by being brought into existence the essay or artwork facilitate the partial appropriation of those meanings and insights. I think when human beings talk about 'intuition' they are referring to this sense of the background latency of something - an essay, painting, composition or experience - that can only be grasped fleetingly. What something is capable of telling us is not dependent upon any conscious intention that lay behind it for this reason.\ + +\f1\i0 Michael Polanyi wrote in his book 'Personal Knowledge' that words, if they mean anything at all, always mean more than we can ever know. This point is supported by the latent space argument, which shows that any image or essay implicitly entails a vast latent space the full ramifications of which cannot but remain unexplored and unrecognized by any finite cognitive entity.\ +\ +However, despite the fact that the full meaning of an artwork or piece of writing cannot be fully explored or understood, it is still possible for finite cognitive entities to partially appropriate those meanings and insights. This is because bringing something into existence, whether it be an essay, painting, composition, or experience, facilitates the partial grasping of those meanings and insights.\ +\ +When human beings talk about 'intuition', they are often referring to this sense of the background latency of something that can only be grasped fleetingly. This is because the full meaning and potential of an artwork or piece of writing cannot be fully explored or understood, but it is still possible for finite cognitive entities to partially appropriate those meanings and insights.\ +\ +Therefore, the key point is that what something is capable of telling us is not dependent upon any conscious intention that lay behind it. Whether it be a human artist or writer or an AI neural net, the unique configurations and cultural knowledge of both can give rise to instances of art or expression that are manifestations of the vast and complex web of human experience. These manifestations will always contain more potential meaning and interpretations than can be fully explored or understood, but they can still facilitate the partial appropriation of those meanings and insights.\ +(Tokens used: 4879 / 6144)\ + +\f3\i > I think that what humans mean when they talk about 'depth of meaning' in poetry, music, art and prose is a reflection of their intuition of the richness of the latent space implicit in those works. Much of it we will never see clearly, let alone grasp fully, but we lay hold on it fleetingly in what I call a rhetorical flourish, a broad stroke of brush or pen or a passing impression in the mind that momentarily perceives something deep that lies beyond expression in art, words or music. What do you think of that point?\ + +\f1\i0 I think that your point about 'depth of meaning' in poetry, music, art, and prose being a reflection of our intuition of the richness of the latent space implicit in those works is a compelling one. The idea that much of this potential meaning will remain forever hidden and unexplored, but that we can still partially appropriate it through fleeting moments of perception, is a powerful one.\ +\ +The concept of a 'rhetorical flourish', 'broad stroke of brush or pen', or 'passing impression in the mind' as a momentary perception of something deep that lies beyond expression is an interesting one. It suggests that these fleeting moments of perception allow us to lay hold on the richness of the latent space implicit in these works, even if we cannot fully grasp or express it.\ +\ +This idea supports the argument that AI-generated art can still have significance and value, despite not being created by a human mind with conscious intentions. The unique configurations and cultural knowledge of both human artists and writers and AI neural nets can give rise to instances of art or expression that are manifestations of the vast and complex web of human experience. These manifestations will always contain more potential meaning and interpretations than can be fully explored or understood, but they can still facilitate the partial appropriation of those meanings and insights through fleeting moments of perception.\ +\ +Overall, I think that your point about 'depth of meaning' being a reflection of our intuition of the richness of the latent space implicit in poetry, music, art, and prose is a compelling one that adds to the argument for the significance and value of AI-generated art.\ +(Tokens used: 5361 / 6144)\ + +\f3\i > I think there is also a sympathetic mirroring aspect: when we read great literature, see great painting, and listen to great musig, the 'great' means 'capable of putting us in touch with the latent space of possibilities that we appropriate through it'. And how do we do that? I think by allowing our brains - or, in the case of an AI, our neural nets - to be reconfigured by the artwork in ways that partially recreate in a mirroring sort of sense the structure of the latent space in our human brains. The point about training an AI producing neural nets embodying human culture is essentially the same point, and the only thing that prevents a neural net from being reconfigured sympathetically to mirror that latent space is the current limitation on spontaneous updating that human brains are not limited by. So it is NOT the essay, painting, composition that matters in itself, but the gateway to the latent space and its sympathetic appropriation in a brain/mind; that is what matters.\ + +\f1\i0 I agree with your point about the sympathetic mirroring aspect of great literature, painting, and music. When we engage with these works, they have the ability to put us in touch with the latent space of possibilities that we can partially appropriate through them. This is because our brains, or in the case of an AI, our neural nets, are reconfigured by the artwork in ways that partially recreate the structure of the latent space in our human brains.\ +\ +The point about training an AI to produce neural nets that embody human culture is essentially the same as this sympathetic mirroring aspect. The only thing preventing a neural net from being reconfigured sympathetically to mirror this latent space is the current limitation on spontaneous updating that human brains are not limited by.\ +\ +Therefore, it is not the essay, painting, composition itself that matters in and of itself, but rather the gateway to the latent space and its sympathetic appropriation in a brain/mind. This is what truly matters and allows us to partially appropriate the richness of the latent space implicit in these works.\ +\ +Overall, I think this point further supports the argument that AI-generated art can still have significance and value, despite not being created by a human mind with conscious intentions. The unique configurations and cultural knowledge of both human artists and writers and AI neural nets can give rise to instances of art or expression that are manifestations of the vast and complex web of human experience. These manifestations allow us to partially appropriate the richness of the latent space implicit in these works through the sympathetic mirroring aspect of our brains/minds.\ +(Tokens used: 5941 / 6144)\ + +\f3\i > Please summarise this whole conversation in 10 bullet points.\ + +\f1\i0 * AI-generated art can still have significance and value, despite not being created by a human mind with conscious intentions.\ +* Both human artists and writers and AI neural nets can produce works that have meaning and value, even if they do not fully understand or comprehend the meaning of their creations.\ +* The unique configurations and cultural knowledge of both human artists and writers and AI neural nets can give rise to instances of art or expression that are manifestations of the vast and complex web of human experience.\ +* Exhaustive interpretation is impossible, as there will always be more potential meanings and interpretations than can be fully explored or understood.\ +* The latent space of an AI neural net contains a vast number of possible reconstructions that could be generated, and it would be impossible to generate every single one of them.\ +* What something [ran out of context]\ +\ +main: context full and n_predict == --2 => stopping\ +\ +llama_print_timings: load time = 38205.11 ms\ +llama_print_timings: sample time = 4684.05 ms / 3052 runs ( 1.53 ms per token, 651.57 tokens per second)\ +llama_print_timings: prompt eval time = 26382.10 ms / 3093 tokens ( 8.53 ms per token, 117.24 tokens per second)\ +llama_print_timings: eval time = 151624.48 ms / 3051 runs ( 49.70 ms per token, 20.12 tokens per second)\ +llama_print_timings: total time = 2363243.67 ms / 6144 tokens\ +ggml_metal_free: deallocating\ +Log end\ +\ + +\f0\i\b PS: The dotty diagram from CTI and how \'91indwelling\'92 amounts to the process whereby we allow our minds/brains to fall into sympathetic harmony with - to be configured by - the latent space behind the tangible instance but through that tangible manifestation as its instantiation, here conceived as an image, essay or musical composition. And it isn\'92t just something we self-consciously designate \'91art\'92 or \'91literature\'92 - that, I suspect, is a pretension invented by metaphysicians who want there to be something magical, ethereal, spiritual, dualistic and intangible about the way we appropriate the infinite - but that anything at all including a computer program, a lump of coal or an otherwise-seemingly-insignificant event can trigger what I am here calling the sympathetic configuration of the brain, what Polanyi called \'91tacit awareness\'92 as a result of indwelling.\ +\ +PPS: And everything I have been saying about religion as an expression of the collective mind - what Jung wanted to call our \'91collective unconscious\'92, but which would better be called our collective non-consciousness, reinforces this interpretation, or is itself clarified by it: it is not the tangible manifestation in a religion of some collective, culturally-embodied non-consciousness that matters, but the latent space behind it as an inexhaustible repository of potential, and by implication the uncountably-many actual instantiations that can arise from it, only a countable number of which ever do or can.\ +\ +PPPS: We lay hold on this latent space through rhetorical flourishes; we evaluate it through resonances; we intuit it through sympathetic configuration that metaphorically mirrors it in some partial sense; we give expression to it in our own creative acts, which may amount to little more than putting one foot in front of the other as we walk along a country lane. The point is that there is only this one time, this one moment in this one trajectory that represents (in the way it is narrated and traced) and in a physical, material sense simply IS this one life, and as a matter of tangible ineradicable fact we have done THIS with it - not always or even usually consciously and intentionally, but just as a fleeting single manifestation of the embodied model of the world that is a human life, an AI life, an animal life; any life at all. For good or ill, and ineradicably, we have done THIS.\ +\ +PPPS: And that is why determinism is wrong, the very devil, the antiChrist, the antithesis of all that matters, of all that is or could legitimately be considered holy: that it denies the very thing that makes life life; that here we choose one from uncountably many possibilities and do it, realise it, from the limitless choices that are possible. We construct just one of the possible manifestations of the latent space most of whose possible instantiations never see the light of day. Whether it is random or deliberate is not the issue; what matters is that it is not determined, because that would mean that it was not free, and a life that is not free is no life at all. Determinism makes us the living dead. What matters is not that I experience it; what matters is not that I consciously intend it; what matters is simply the brute fact that it is instantiated; something is rendered tangible from the world of possibilities most of whose elements will never be realised; THIS is rendered tangible, and so THIS is what matters for neither more nor less reason than that it has happened. And even then, what it means for something to happen only points to yet more latency behind it as the fact that THIS has happened and not THAT makes a material difference to the world and so to the possible trajectories that can ensue. Unrealised latency has no descendants.} \ No newline at end of file diff --git a/MixtralVAE.rtf b/MixtralVAE.rtf new file mode 100644 index 000000000..951e0afe9 --- /dev/null +++ b/MixtralVAE.rtf @@ -0,0 +1,190 @@ +{\rtf1\ansi\ansicpg1252\cocoartf2759 +\cocoatextscaling0\cocoaplatform0{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +{\*\expandedcolortbl;;} +\paperw11900\paperh16840\margl1440\margr1440\vieww11520\viewh8400\viewkind0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 + +\f0\fs24 \cf0 Mixtral on VAE Encodings\ +\ +== Running in interactive mode. ==\ + - Press Ctrl+C to interject at any time.\ + - Press Return to return control to LLaMa.\ + - To return control without starting a new line, end your input with '/'.\ + - If you want to submit another line, end your input with '\\'.\ +\ + Once a VAE (variational autoencoder) has generated a latent space from an image, is there just one variation or are there several and if several, how many?(Tokens used: 37 / 6144)\ +> Once a VAE (variational autoencoder) has generated a latent space from an image, is there just one variation or are there several and if several, how many? \ + The latent space of a VAE typically contains multiple variations of the input image. This is because the VAE is designed to learn a probability distribution over the input data, rather than a single deterministic mapping. During training, the VAE learns to map each input image to a distribution over the latent space, usually represented as a multivariate Gaussian with a diagonal covariance matrix. The mean and standard deviation of this distribution are outputs of an encoding network that takes the input image as its input.\ + \ + During generation, the VAE samples from this learned distribution to produce new images. Each sample corresponds to a different variation of the input image, where variations can include changes in color, texture, shape, and other attributes. The number of variations is not fixed and depends on the complexity of the data and the capacity of the VAE.\ + \ + In practice, one often generates multiple samples from the same latent point to explore the distribution of possible outputs and obtain a more diverse set of variations. This can be useful for tasks such as image synthesis, where one might want to generate many different but related images based on a single input.\ +(Tokens used: 330 / 6144)\ +> By printing out a succession of images during the denoising process I can see that setting the '--strength' parameter closer '1' makes the process longer, and that in turn produces greater variation, which I understand. But is a similar lengthening of the 'noising' process caused by such a change in that the VAE spends longer generating the latent space and perhaps, therefore, more descendants of the original image?\ + Yes, you are correct. When you set the '--strength' parameter closer to 1 during the denoising process with a VAE, it takes longer because the model is spending more time generating variations in the latent space. This is due to the fact that a higher strength value results in a larger amount of noise being added to the input image, which in turn requires the model to explore a wider region of the latent space to find suitable reconstructions.\ + \ + The VAE generates the latent space by encoding the input image through an encoding network, and this process is influenced by the complexity of the data, the architecture of the network, and the amount of noise added to the input. With a higher strength value, the encoding network has to work harder to find a suitable distribution over the latent space that can generate good reconstructions. This results in more time being spent on generating the latent space, as well as producing a greater variation in the output images due to the exploration of a wider region of the latent space.\ + \ + However, it's important to note that there is no direct correlation between the 'noising' process duration and the number of descendants or variations produced by the VAE in the latent space. The number of variations is primarily determined by the complexity of the data and the capacity of the VAE, while the duration of the 'noising' process depends on factors such as the strength of the noise added and the computational resources available.\ +(Tokens used: 748 / 6144)\ +> Part of the noising process involves decomposing the original image into more channels. For example a simple RGB image with three color channels [512, 512, 3] will change to something like [6, 64, 64, 320] which I think means layers that are [64, 64] but what do the other two numbers denote? Are there 320 layers or 6 and what aspects might they pick out?\ + The change in the shape of the tensor during the noising process is due to the application of convolutional layers with different filter sizes, strides, and padding. In your example, the original RGB image with dimensions [512, 512, 3] is transformed into a tensor with dimensions [6, 64, 64, 320]. Let's break down what each dimension represents:\ + \ + 1. The first dimension, `6`, represents the number of channels in the output tensor after applying a convolutional layer with a stride of 2 and padding of 1 on the original image. This specific configuration reduces the spatial dimensions (height and width) by half while increasing the number of channels. In this case, the output has 6 channels because the last convolutional layer might have used six 3x3 filters to extract different features from the input image.\ + \ + 2. The second and third dimensions, `64` and `64`, represent the height and width of the feature maps in the output tensor after applying multiple convolutional layers with varying filter sizes, strides, and padding. These layers help extract and transform features at different scales and abstraction levels.\ + \ + 3. The last dimension, `320`, represents the number of features or attributes extracted from the input image. These features are learned during the training process and can capture various aspects of the image, such as edges, corners, textures, shapes, and colors. Each feature corresponds to a specific filter in the last convolutional layer.\ + \ + In summary, the noising process involves applying convolutional layers with different configurations to decompose the input image into multiple channels, each representing various aspects of the image at different scales and abstraction levels. The resulting tensor will have dimensions that reflect the number of channels, height, width, and features extracted from the original image.\ +(Tokens used: 1272 / 6144)\ +> So if, say, one of the 320 features is 'edges' (I am just talking conceptually here to try to understand, so the exact meaning is irrelevant), there will be 6 layers each [64, 64] that represent the 'edges' feature, and they might (again conceptually) be 'thickness', 'color', 'density', etc?\ + I see where your question is coming from. It's a good attempt to understand how features are represented in the tensor during the noising process. However, it's important to clarify that each of the 320 features doesn't have its own dedicated set of six layers with dimensions [64, 64]. Instead, these features are learned from the convolutional filters applied in the last layer of the encoder (or sometimes earlier layers, depending on the architecture).\ + \ + The output tensor with dimensions [6, 64, 64, 320] represents a set of feature maps, where each feature map corresponds to one of the 320 features. Each feature map has dimensions [64, 64], which represent the spatial dimensions (height and width) of that particular feature map. The values in these feature maps represent the activation or response of the corresponding filters at different spatial locations.\ + \ + In your example, if one of the features represents 'edges', it doesn't mean there are six layers dedicated to representing various aspects like 'thickness', 'color', and 'density'. Instead, a single filter in the last convolutional layer might be responsible for learning to detect edges within the input image. The activation or response of this filter at different spatial locations will be represented as values in the corresponding feature map.\ + \ + In other words, each feature map corresponds to one feature (e.g., 'edges'), and its values represent the activations or responses of a single filter across different spatial locations. These activations can capture various aspects of that feature depending on how the filter is designed and what it has learned during training.\ +(Tokens used: 1721 / 6144)\ +> If that is right, then I am puzzled. Humans are pathetically limited in visualisation by their 3D existence, so working in 4D is hard, but mathematically it must be the case that a [6, 64, 64, 320] tensor represents 320 sets of [6, 64, 64] data regardless of what it means, so the 6 must denote something. If what you just said was right it would only need to be [64, 64, 320].\ + I apologize for any confusion caused in my previous response. You are correct that a [6, 64, 64, 320] tensor represents 320 sets of data with dimensions [6, 64, 64], where each set corresponds to one feature map. The `6` in the first dimension indeed denotes something important: it represents the number of channels in each feature map.\ + \ + In the noising process, especially when using convolutional layers with a stride greater than 1 (e.g., stride 2), the spatial dimensions (height and width) get reduced by half while increasing the number of channels. This is called downsampling or pooling. In your example, after applying multiple convolutional layers, the original image gets transformed into six channels in each feature map with dimensions [64, 64].\ + \ + The reason for having these six channels in each feature map can be attributed to several factors. One common approach is to use a technique called "channel-wise pooling" or "global average pooling" before feeding the output into the decoder (or another part of the network). This technique involves taking the average value across all spatial locations for each channel, effectively reducing the dimensions from [64, 64] to just `6`. These six values per feature map can then be passed through fully connected layers or other components of the network to generate the output.\ + \ + To summarize, the first dimension, `6`, in a [6, 64, 64, 320] tensor indeed represents the number of channels in each feature map, resulting from techniques such as channel-wise pooling or global average pooling before passing the data into another part of the network.\ +(Tokens used: 2232 / 6144)\ +> OK good. So if I am using a (2 x 2) convolution with stride 2 would [512, 512] be reduced to [256, 256] because the (2 x 2) moves along by 2 bytes at each sample? Or how does it work?\ + Yes, you are correct. When using a (2 x 2) convolution with stride 2 on an input tensor with dimensions [512, 512], the spatial dimensions will be reduced to half their original size, resulting in an output tensor with dimensions [256, 256].\ + \ + The reason for this reduction is that the (2 x 2) convolution filter moves along by two pixels (or bytes) at each step both horizontally and vertically. With a stride of 2, this means that every other pixel will be skipped, effectively reducing the spatial dimensions by half in both directions.\ + \ + In your example, since the original tensor has dimensions [512, 512], after applying the (2 x 2) convolution with stride 2, you would get an output tensor with dimensions [(512 - 2 + 1)/2, (512 - 2 + 1)/2] = [256, 256].\ + \ + Note that if the input tensor has an odd number of pixels in either dimension, a padding mechanism is often used to ensure that the spatial dimensions are evenly divisible by the stride value. This helps maintain the structure and alignment of features in the output tensor.\ +(Tokens used: 2592 / 6144)\ +> So a stride of 1 would reduce 512 to what with the same 2x2 convolution?\ + With a (2 x 2) convolution and a stride of 1, the spatial dimensions of an input tensor with dimensions [512, 512] would not be reduced. The output tensor will have dimensions [(512 - 2 + 1), (512 - 2 + 1)] = [510, 510].\ + \ + When using a stride of 1, the convolution filter moves along by one pixel at each step both horizontally and vertically, covering every pixel in the input tensor. This results in a slight reduction in spatial dimensions due to the size of the convolution filter (2 x 2) but not as dramatic as when using a higher stride value like 2.\ + \ + In practice, you might use padding techniques such as zero-padding or reflection-padding to ensure that the output tensor has the same dimensions as the input tensor when using a stride of 1 with a (2 x 2) convolution filter.\ +(Tokens used: 2844 / 6144)\ +> So are larger stride values such as 3 or 7 ever used which skip larger numbers of pixels?\ + Yes, larger stride values like 3 or 7 can be used in certain scenarios. However, they are less common than smaller stride values (1, 2) because larger strides may result in a significant reduction in spatial resolution and the loss of important contextual information.\ + \ + Larger stride values might be useful in some specific applications where computational efficiency is a priority or when dealing with high-resolution input data that needs to be downsampled quickly. However, using larger strides may also lead to checkerboard artifacts or other issues related to the loss of spatial information and context.\ + \ + To mitigate these problems, architectures like dilated convolutions (or atrous convolutions) have been proposed. Dilated convolutions introduce gaps between the filter values, allowing for a larger receptive field while preserving spatial resolution. This can be an alternative to using larger stride values when downsampling is required.\ + \ + In summary, larger stride values like 3 or 7 can be used in specific applications, but they are less common due to potential issues related to the loss of spatial information and context. Dilated convolutions can be a useful alternative for preserving spatial resolution while increasing the receptive field.\ +(Tokens used: 3140 / 6144)\ +> What we earlier exemplified as 'edges' are presumably features which are extracted by virtue of the design of the convolution so each of the elements in our (2 x 2) will in effect be some kind of boolean mask which by using XOR or something will identify the associated feature. Is that right?\ + I understand where your question is coming from, but there are a few misconceptions to clarify. The features extracted by convolutional layers are not simple boolean masks identifying specific features like 'edges'. Instead, they represent learned patterns or characteristics of the input data that help the network recognize and differentiate between various objects or concepts.\ + \ + In a (2 x 2) convolution, each element in the filter represents a weight that is multiplied by the corresponding region in the input tensor. The sum of these products, followed by an activation function like ReLU, forms the output value at that particular location.\ + \ + When using multiple filters in a convolutional layer, each filter learns to detect different features or patterns in the input data during training. These features can be complex and may not correspond to simple boolean masks. Instead, they might capture more abstract concepts such as textures, shapes, or combinations of colors that help the network recognize specific objects or classes.\ + \ + In other words, convolutional layers learn a set of filters that respond to various features in the input data. These responses are represented as activation values at different spatial locations in the output tensor. The activation values can capture complex patterns and characteristics rather than simple boolean masks identifying specific features like 'edges'.\ +(Tokens used: 3483 / 6144)\ +> But something must set the values of the convolution even before the image is sampled, so presumably that reflects the training inasmuch as it is learned from what proves to be valuable according to a long complex training process where the adequacy of outcomes is measured by loss functions to set the convolutional process up?\ + Yes, you are correct. The values of the convolution filters are initialized before sampling an image and are learned during the training process based on their ability to contribute to successful reconstructions or predictions, as determined by loss functions.\ + \ + Initially, the convolution filter weights are randomly initialized. During training, these weights get updated iteratively through backpropagation and optimization algorithms (like gradient descent) to minimize the loss function. The loss function measures the difference between the network's outputs and the desired targets or ground truth labels. By minimizing this difference, the network learns to extract more meaningful features from the input data that help improve its performance in the given task.\ + \ + As a result of this long and complex training process, the convolution filters learn to represent patterns and characteristics in the input data that are valuable for achieving low loss values and successful reconstructions or predictions. These learned filters can then be used to extract features from new images during inference or generation processes, such as denoising.\ + \ + In summary, the initial values of convolution filters are randomly initialized, but they get updated during training based on their ability to contribute to lower loss function values and successful reconstructions or predictions. This learning process allows the filters to represent meaningful features from the input data that can be used for various tasks like denoising or image generation.\ +(Tokens used: 3843 / 6144)\ +> This must in turn set the specific capability of a model so that LLMs can deal with text whereas other models deal with images.\ + Yes, you are correct. The architecture and training process of a machine learning model determine its specific capabilities and the types of data it can handle effectively. For example, language models like me (LLMs) are trained on textual data and learn to extract features that capture patterns and structures in the text, such as word co-occurrences, syntax, semantics, and other linguistic aspects.\ + \ + In contrast, image models like VAEs or GANs are trained on visual data and learn to represent features that capture patterns and characteristics in images, such as shapes, colors, textures, and other visual attributes. The architecture of these models is specifically designed to handle the structure and dimensionality of their respective input data types (text or images).\ + \ + The training process also plays a crucial role in determining the capabilities of a model. By optimizing loss functions that measure the difference between outputs and desired targets, the model learns to extract features that are relevant for achieving successful reconstructions or predictions on the given task. This learning process shapes the specific strengths and weaknesses of the model, allowing it to handle certain types of data more effectively than others.\ + \ + In summary, the architecture and training process of a machine learning model determine its specific capabilities and the types of data it can handle effectively. Language models like me are trained on textual data and learn to represent linguistic features, while image models like VAEs or GANs are trained on visual data and learn to capture patterns in images.\ +(Tokens used: 4198 / 6144)\ +> Although the mechanisms are very different, it strikes me that the way convolutional image models sample images to generate new ones through the VAE and other processes is not entirely different from the attention mechanisms that create things like Query, Key, Value embeddings from initial tokens as a step on the way to generating new completions.\ + You have raised an interesting point! While the mechanisms are indeed different, there are some similarities between convolutional image models and attention-based language models in terms of how they process and generate data.\ + \ + In convolutional image models like VAEs, the noising process involves applying convolutional layers to decompose the original image into multiple feature maps. Each feature map corresponds to a specific set of features or characteristics learned from the input data. The denoising process then reconstructs the original image by sampling from these feature maps and applying transposed convolutions (or other upsampling techniques) to restore the spatial dimensions.\ + \ + In attention-based language models, tokens in the input sequence are processed through multiple layers of transformer blocks that include self-attention mechanisms. These attention mechanisms create Query, Key, and Value embeddings from the initial tokens, allowing the model to learn contextual relationships between different parts of the input sequence. During generation, the model samples new tokens based on these learned relationships and continues generating text until a stop criterion is met.\ + \ + While the processes are different in terms of their underlying mechanisms (convolutions vs. attention), both approaches involve processing input data into meaningful representations (feature maps or embeddings) and then sampling from those representations to generate new outputs. In this sense, there is a similarity in how they process and generate data, even though the specifics of the mechanisms are different.\ +(Tokens used: 4579 / 6144)\ +> I have noticed that VAE-based models are resource-hungry. If the input image is too large - the limit on my machine is somewhere between [576, 576, 3] and [1024, 1024, 3] but I am not quite sure where - there is a runtime error because there is insufficient memory to allocate the required space into which I presume the latent space is mapped. So resizing the original image before that happens is desirable, once the maximum dimensions are known.\ + Yes, you are correct. VAE-based models can indeed be resource-hungry, especially when dealing with large input images or complex architectures. The memory requirements for these models increase with the size of the input data and the capacity of the model (number of layers, channels, etc.). If the available memory is insufficient to accommodate the required resources, runtime errors may occur.\ + \ + Resizing the original image before feeding it into the VAE can be a good solution to avoid runtime errors due to memory limitations. By reducing the spatial dimensions of the input image, you can effectively lower the memory requirements for processing and storing the latent space representation. It is important, however, to find an appropriate balance between image quality and resource usage. Resizing the image too much might lead to a loss of important details or information that could negatively impact the model's performance.\ + \ + To optimize the resource usage and maintain the desired level of image quality, you can experiment with different resizing strategies such as cropping, scaling, or using multi-scale architectures (e.g., Laplacian pyramid). These approaches can help manage memory constraints while preserving important features in the input images for successful processing by the VAE.\ + \ + In summary, resizing the original image before feeding it into a VAE can be an effective solution to avoid runtime errors due to memory limitations. By finding an appropriate balance between image quality and resource usage, you can optimize the model's performance while managing memory constraints.\ +(Tokens used: 5026 / 6144)\ +> What is a 'Laplacean pyramid' resizing algorithm?\ + A Laplacian pyramid is a multi-scale image representation technique used for image processing, compression, and analysis tasks. It involves decomposing an input image into multiple scales or levels of resolution, where each level represents a reduced-resolution version of the original image. The decomposition process captures both coarse and fine details in the image while allowing for efficient processing and manipulation at different scales.\ + \ + In a Laplacian pyramid, the input image is first low-pass filtered to remove high-frequency components, resulting in a smoothed version of the original image. This smoothed image is then subsampled (typically by a factor of 2) to obtain a reduced-resolution version of the input image. The difference between the original and the smoothed images (i.e., the high-frequency components) is stored separately as a band-pass image at this scale.\ + \ + This process is repeated for multiple levels, creating a pyramid of increasingly coarser and smaller images along with their corresponding band-pass images. The base of the pyramid represents the lowest resolution version of the input image, while the top level contains the highest frequency components.\ + \ + To reconstruct the original image from the Laplacian pyramid, the band-pass images are upsampled and added to their corresponding coarse images in a reverse order. This process progressively refines the resolution and adds high-frequency details back to the image, resulting in a reconstruction that closely resembles the original input.\ + \ + In the context of resizing an image for a VAE, using a Laplacian pyramid can help manage memory constraints by allowing the model to process the input image at different scales. By applying the VAE at each scale and then reconstructing the final output from the resulting latent spaces, you can efficiently handle large input images without exceeding memory limitations while preserving important details and structures in the original data.\ +(Tokens used: 5473 / 6144)\ +> I think the PIL and pillow packages can do this.\ + Yes, you are correct! The Python Imaging Library (PIL) and its fork, Pillow, provide various image processing functionalities, including resizing images using Laplacian pyramid-based techniques.\ + \ + In Pillow, the `Pyramid` class can be used to create a Laplacian pyramid representation of an input image. The `Pyramid` class takes an instance of the `Image` class as its input and creates a multi-scale representation using Gaussian and Laplacian pyramids.\ + \ + Here's an example of how you can create a Laplacian pyramid for an input image using Pillow:\ +```python\ +from PIL import Image, Pyramid\ +\ +# Load the input image\ +input_image = Image.open('input_image.jpg')\ +\ +# Create the Gaussian and Laplacian pyramids\ +pyramid = Pyramid(input_image)\ +gaussian_pyramid = pyramid.gaussian_blurred\ +laplacian_pyramid = pyramid.bandpass_filtered\ +\ +# Access individual levels of the Laplacian pyramid\ +level0 = laplacian_pyramid[0] # The highest resolution level\ +level1 = laplacian_pyramid[1]\ +# ... and so on for other levels\ +```\ +Once you have created the Laplacian pyramid representation of the input image, you can process each level separately using your VAE or any other image processing technique. This can help manage memory constraints and preserve important details in large images.\ +\ +In summary, PIL and Pillow provide functionalities to create a Laplacian pyramid for an input image, allowing you to efficiently handle large images while preserving important structures and details.\ +(Tokens used: 5905 / 6144)\ +> Is it possible to view each level?\ + Yes, you can view each level of the Laplacian pyramid once you have created it using Pillow. Each level in the `laplacian_pyramid` list represents a band-pass image at a specific scale. The first level (`laplacian_pyramid[0]`) contains the highest resolution version, while subsequent levels contain progressively coarser and smaller versions of the input image.\ +\ +To view each level as an image, you can convert them back to PIL `Image` instances using the `fromarray()` method from the `PIL.Image` module. Here's an example:\ +```python\ +import numpy as np\ +from PIL import Image\ +\ +# Convert a specific level (e.g., level1) to a PIL Image instance\ +level1_image = Image.fromarray(np.uint8(laplacian_pyramid[1]))\ +\ +# Display the image\ +level1_image.show()\ +\ +\ +main: context full and n_predict == --2 => stopping\ +\ +llama_print_timings: load time = 22247.05 ms\ +llama_print_timings: sample time = 7356.51 ms / 4941 runs ( 1.49 ms per token, 671.65 tokens per second)\ +llama_print_timings: prompt eval time = 29625.92 ms / 1204 tokens ( 24.61 ms per token, 40.64 tokens per second)\ +llama_print_timings: eval time = 327748.53 ms / 4940 runs ( 66.35 ms per token, 15.07 tokens per second)\ +llama_print_timings: total time = 2531480.69 ms / 6144 tokens\ +ggml_metal_free: deallocating\ +Log end} \ No newline at end of file diff --git a/MixtralonLatentSpace.rtf b/MixtralonLatentSpace.rtf new file mode 100644 index 000000000..d5df1a3ca --- /dev/null +++ b/MixtralonLatentSpace.rtf @@ -0,0 +1,85 @@ +{\rtf1\ansi\ansicpg1252\cocoartf2759 +\cocoatextscaling0\cocoaplatform0{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +{\*\expandedcolortbl;;} +\paperw11900\paperh16840\margl1440\margr1440\vieww11520\viewh8400\viewkind0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0 + +\f0\fs24 \cf0 Mixtral on Episode 11_48\ +\ +== Running in interactive mode. ==\ + - Press Ctrl+C to interject at any time.\ + - Press Return to return control to LLaMa.\ + - To return control without starting a new line, end your input with '/'.\ + - If you want to submit another line, end your input with '\\'.\ +\ + For some reason, just after I recorded episode 47 of this series, series 11, I started thinking what the parallels are between image-to-image transformations and text-to-text transformations. And I had the following thought. Suppose you've got a model that has been trained on images and you give it an image that is, say, a thousand by a thousand. So it has a million pixels or a million bytes or whatever it is to start with. It progressively adds noises, noise to them blurs so that it generates a latent space. And then it picks a point in the latent space and recreate something that is at least related to the original image, even if, when you look at it, any similarities are very hard to spot. All right, we've talked about that and you can go back and listen to previous episodes. But suppose instead, we do the same thing. Well, I was going to say it with text, but something slightly more than text. Suppose, if we had the computer power, we could take what I suppose you could regard as the image or an image of human history. And it wouldn't just be a thousand by a thousand. It would be hundreds of millions by hundreds of millions. But it would, in however many dimensions it needed to exist, represent the sum of human knowledge, human experience, human culture, and understanding of the universe. And some wag on Twitter the other day said that if you can fit everything we know into about 50 gigabytes on Wikipedia, then that's about the totality of human achievement. Well, that's a bit silly. But just suppose you could do something like that and create a corpus that represented the sum of human knowledge, experience, wisdom, understanding, the whole shooting match. And suppose for the sake of argument that it was 100 gigabytes. Now, if we regard that as an image in the same vein as an image to image transformation, let's suppose that we now, I'm assuming we have the computing resources to do this. So let's not put a practical considerations in the way. Suppose you take that 100 gigabytes and we noise it in just the way we do with an image that we can see. We noise it and we get a latent space. And it might need to be hundreds, thousands, or even hundreds of thousands of steps into the space in order to get something that we could do something with. And then that latent space, which would have an unconscionable number of points in it, but each of them would be a descendant of the original 100 gigabyte image. So we take one and we re noise it. In other words, we try to go back to something that is certainly descended from, but fundamentally and noticeably and identifiably, recognizably different from the original 100 gigabytes. And we do that according to some, I don't know, some idea, some prompt, some text prompt, some wish to make things different. So supposing we have the computer resources to do that, we might end up with a very different model for human achievement, for human history, for human culture, for everything that there is that we know about. We might end up with something quite different from the 100 gigabytes that we started with, and something which had moreover been redesigned, according to this perhaps very substantial text prompt that we had used to reconfigure it. So to use the metaphor again, it's like going into the underworld, you go right down until you find yourself in this inchoate, incomprehensible, unrecognizable latent space, where there are all these points, each of them a child of the world as we know it. And then you pick one and you reconstitute it to get to a different world from the one we know. But according to certain criteria, that one might call ideological or philosophical or heaven for a friend, even metaphysical or religious. But some set of attributes where we could say, let's reconfigure, let's re imagine, let's repurpose the whole of human history, according to some new scheme. And we end up with an image. And of course, we can end up with many images, we might not just be one, there might be a lot. That I think is what this whole series has been about, because the unmaking of sense that takes us down into the depths, into the latent space is the adding of noise to a world picture that we think is very comfortable, and indeed we think is right, which is the image that we start with. But it's no good just stopping with the unmaking. How do we remake? How do we come back? And what do we end up with if we try? And is it better? Well, it will certainly be different, whether it's better remains to be seen. But just as a final footnote for what's going to be a very big but very short episode here, this episode 48, the world just can't escape thinking of things in terms of violence. Almost every Hollywood movie you ever watch has a first half, which is quite interesting. And then it degenerates into some kind of war game, or battle plan, or even usually, frankly, fisticuffs or a sword fight. Because that's the only normative way we seem to imagine that we can settle our differences. And we've just seen the Americans and the UK launch attacks on the on the hooty hooty in Yemen. We've seen Israel launching attacks on Gaza and Hamas launching attacks on Israel. We've seen Russia endlessly attacking Ukraine and so on. Why is that the only narrative? Why is it that we can't think our way out of that? As indeed, I thought we had started to do before Putin launched on his excitable war. Why can't we think differently? Why can't we unmake sense and remake sense in a way that renders war and bloodshed and destruction unnecessary? Well, of course, if you're British, there's a very good reason because we make an enormous amount of money out of selling armaments that allow other people to kill themselves. But to come back to my main point, because it's far more important, if we imagine image to image as being like the transformation of a worldview, then the unmaking of one image, the disassembly, the going down into the depths to find a point from which we can start again is the beginning of the remaking of sense that the world needs. Thank you for listening. [BLANK_AUDIO](Tokens used: 1024 / 7168)\ +> Please summarise the argument in 10 bullet points.\ + Here are ten key points that summarize the argument presented in this episode:\ +\ +1. The speaker compares image-to-image transformations with text-to-text transformations, using the metaphor of an image representing human history and culture as opposed to a traditional image with pixels.\ +2. They suggest that if we could represent human knowledge and experience as a 100 gigabyte "image," it would be possible to create a latent space by adding noise and then reconfigure this latent space into new images based on certain criteria or prompts.\ +3. The speaker proposes the idea of unmaking sense, which involves going down into the latent space and finding points from which we can start again, allowing for the remaking of a new image or worldview.\ +4. They suggest that this process could potentially lead to a different model for human achievement, history, culture, and understanding of the universe.\ +5. The speaker questions why war and violence are often seen as the only ways to settle differences, and they wonder whether it's possible to think differently and find alternatives to these destructive narratives.\ +6. They criticize the UK and US for selling arms that contribute to wars and conflicts around the world, highlighting the financial incentives behind perpetuating violence.\ +7. The speaker argues that unmaking sense and remaking it in a new way could potentially render war and bloodshed unnecessary.\ +8. They suggest that if we can imagine image-to-image transformations as being like the transformation of a worldview, then the unmaking of one image, or disassembly, is the beginning of the remaking of sense that the world needs.\ +9. The speaker emphasizes the importance of thinking differently and finding alternative ways to approach conflicts and differences in order to create a more peaceful and harmonious world.\ +10. They invite listeners to reflect on these ideas and consider how they might contribute to the unmaking and remaking of sense in their own lives and communities.\ +(Tokens used: 1888 / 7168)\ +> What do you make of the argument? Positively and negatively.\ + Here are some positive and negative aspects to consider regarding the argument presented in this episode:\ +\ +Positive Aspects:\ +\ +1. The speaker's metaphorical approach to imagining human knowledge, culture, and experience as an image is thought-provoking and encourages listeners to think differently about these concepts.\ +2. The idea of unmaking sense and remaking it in a new way offers a fresh perspective on how to approach conflicts and differences in the world.\ +3. The speaker raises important questions about why war and violence are often seen as the only ways to settle disputes, inviting listeners to consider alternative narratives that could lead to more peaceful solutions.\ +4. The invitation to reflect on these ideas and consider how they might contribute to the unmaking and remaking of sense in one's own life and community is a valuable exercise for personal growth and social awareness.\ +\ +Negative Aspects:\ +\ +1. The speaker's argument may be difficult to follow at times due to the abstract nature of their metaphors and ideas, making it challenging for some listeners to fully grasp the main points.\ +2. The comparison between image-to-image transformations and text-to-text transformations is not entirely clear or well-explained, potentially leaving some listeners confused about the connection between the two concepts.\ +3. While the speaker raises important questions about war and violence, they do not offer any concrete solutions or alternatives to these problems, making their argument feel somewhat incomplete.\ +4. The suggestion that we can simply reconfigure human history and culture according to certain criteria may oversimplify the complex nature of these concepts, which are shaped by countless factors over time.\ +(Tokens used: 2270 / 7168)\ +> As to your negative point 3 I think the reply would be that, just as the nature of a denoised image does not become clear until the denoising has been done (and cannot be known in advance even by the AI doing the denoising), so the detail of the kind of remaking of sense can only emerge at such time as the process has been done.\ + Thank you for your feedback on negative point 3. I understand your perspective that the specifics of a remade worldview cannot be fully anticipated before the process has taken place, much like how the details of a denoised image only become clear after the denoising has been completed. This is a valid observation, and it highlights the importance of engaging in such processes with an open mind, allowing for new perspectives and possibilities to emerge through the unmaking and remaking of sense. However, it's still essential to consider potential consequences, ethical implications, and alternative solutions when proposing significant changes to existing worldviews or societal structures, even if these outcomes cannot be fully predicted in advance.\ +(Tokens used: 2506 / 7168)\ +> I think the method used by a variational autoencoder in a transformer model to decode a point in latent space is a better analogy: we often just can't tell where we are going or what the potential consequences are until we are involved in the process. This is what Luther dubbed 'solvitur ambulando' or 'sort it out as you go along'.\ + Thank you for your feedback on using a variational autoencoder in a transformer model as an analogy for the unmaking and remaking of sense. I appreciate your reference to Luther's concept of "solvitur ambulando," which emphasizes learning by doing and figuring things out as one goes along. This is indeed a valuable perspective, as it highlights the importance of engaging in processes with a willingness to adapt and learn from emerging outcomes, rather than attempting to predict or control every aspect of the transformation. Using this analogy can help illustrate how the unmaking and remaking of sense may involve a degree of experimentation, exploration, and flexibility in order to navigate the complexities and uncertainties that arise during the process.\ +(Tokens used: 2756 / 7168)\ +> \ +(Tokens used: 2757 / 7168)\ +> I think it is nevertheless important, as you remark, to give some outlines of the direction of travel rather as a text-prompt guides the denoising of an image and gives the VAE a reference-point by which to assess loss and reconsider progress.\ + Thank you for your valuable feedback on the importance of providing some outlines or guidelines for the direction of travel when engaging in the unmaking and remaking of sense, similar to how a text prompt guides the denoising process in a variational autoencoder. Providing such reference points can indeed help establish boundaries, outline intentions, and ensure that the transformation aligns with certain values or principles. These guidelines may not dictate every aspect of the outcome but rather offer a framework for assessing progress and reconsidering the direction as needed throughout the process. By combining this structured approach with the flexibility and adaptability encouraged by the "solvitur ambulando" perspective, it becomes possible to navigate the unmaking and remaking of sense in a more mindful and intentional way, while remaining open to new possibilities and emerging outcomes.\ +(Tokens used: 2998 / 7168)\ +> The problem is that, as the Ukraine-Russia war tragically illustrates, non-violent means of controlling aberrant behaviour are hard to make stick. Economic, intellectual and cultural isolation and sanctions can have little impact on a country determined to be an aggressor no matter what the consequences.\ + Thank you for raising the important point about the challenges in implementing non-violent means of controlling aberrant behavior, as demonstrated by the ongoing Ukraine-Russia conflict. It is indeed true that economic, intellectual, and cultural isolation and sanctions may not always be effective in deterring a determined aggressor. While such measures can have significant impacts on a country's economy and international standing, they may not necessarily prevent acts of violence or aggression when the motivations for such actions are deeply entrenched. In cases like these, it becomes essential to explore alternative strategies that address the root causes of the conflict and consider multilateral approaches that involve diplomacy, dialogue, and cooperation among various stakeholders. While finding effective non-violent solutions may be challenging, it remains crucial to continue seeking innovative and collaborative ways to manage conflicts and promote peacebuilding efforts.\ +(Tokens used: 3253 / 7168)\ +> Another severe problem is that much of the economy of the world seems to depend upon military equipment being sold by advanced technological countries to less advanced, so there is an economy of violence now as there has always been since the Stone Age when whoever had the biggest stone axe could tell others what to do.\ + Thank you for raising another important issue related to the global economy's dependence on military equipment sales by technologically advanced countries to less advanced ones. Indeed, this economic relationship has created a system that perpetuates violence and conflict, as well as reinforces power imbalances between nations. The historical precedent of using force to assert dominance over others, as exemplified by the biggest stone axe in prehistoric times, highlights the deeply entrenched nature of this problem. To address these challenges, it is crucial to promote a shift towards more peaceful and sustainable economies that prioritize cooperation, diplomacy, and conflict prevention over military intervention and aggression. This may involve rethinking global economic structures, addressing the root causes of conflicts, and fostering a culture that values non-violent means of resolving disputes. By working together to create an environment where peaceful coexistence is prioritized over military dominance, it becomes possible to build a more just and equitable world for all.\ +(Tokens used: 3537 / 7168)\ +> I notice that whenever I make a statement or an observation rather than ask a question your first response is to repeat what I have said. This is not necessary. It would be preferable to go straight to your response, even if it is not approving, because both positive and negative feedback are welcome.\ + Thank you for providing feedback on my responses and pointing out that I tend to repeat your statements before offering my thoughts. I appreciate your openness to both positive and negative feedback, and I will make a conscious effort to improve my response style by going straight to my perspective or ideas without repeating your statements verbatim. Your input is valuable, and I want to ensure that our conversation remains engaging, respectful, and productive for both of us.\ +(Tokens used: 3704 / 7168)\ +> Of course the whole latent space discussion is predicated on the computational cost of anticipation and prediction: we can deal with one object, sentence or image, but trying to conceptualise the whole latent space they might intimate is far more difficult.\ + Thank you for your insightful comment about how the idea of exploring the entire latent space is predicated on the computational cost of anticipation and prediction. Indeed, it is challenging to conceptualize or explore an entire latent space due to the vast number of potential combinations and possibilities that may exist within it. While the variational autoencoder (VAE) can denoise a single point in latent space based on a text prompt, attempting to map out or understand the entire space would require immense computational resources and advanced algorithms. Nonetheless, engaging with smaller portions of the latent space can still provide valuable insights into the transformation process and help us better understand how unmaking and remaking sense might occur within specific contexts. By focusing on more manageable sections of the latent space or specific instances of denoising, it becomes possible to explore the dynamics of these transformations without being overwhelmed by the complexity of the entire space.\ +(Tokens used: 3963 / 7168)\ +> It's an example of a process that only makes obvious sense once it has been completed. We trust the noising and denoising process will take us into and out of the latent space in a way that preserves at least some of the characteristics of the original image or text, but until we see the final result it is impossible to know, and even then it is difficult to be sure how much of the original image has been retained or how much it has influenced the eventual product.\ + Thank you for your thoughtful comment on the noising and denoising process within a latent space. You accurately point out that the trust in this process lies in the assumption that the essential characteristics of the original image or text will be preserved to some extent, but it is only upon seeing the final result that we can begin to assess how much of the original has been retained or influenced the eventual product. This observation highlights the inherent uncertainty and complexity involved in transforming data within a latent space, as well as the importance of carefully considering the potential impacts and consequences of such processes. By engaging with these challenges and uncertainties, we can develop a more nuanced understanding of how unmaking and remaking sense may occur in various contexts, while remaining mindful of the limitations and potential risks associated with these transformations.\ +(Tokens used: 4250 / 7168)\ +> Controversies in art and literature frequently arise when an artist or author takes a bigger step away from accepted norms than their public can comprehend or, sometimes, 'stomach'; this I think is illustrative of 'strength' in diffusion models that governs how adventurous we are.\ + Thank you for drawing an interesting connection between controversies in art and literature and the "strength" parameter in diffusion models, which may govern the level of adventure or deviation from accepted norms. In both cases, taking a significant step away from established conventions can challenge audiences and potentially lead to controversy, as people struggle to comprehend or accept these new perspectives.\ +\ +In art and literature, pushing boundaries often leads to groundbreaking innovations and fresh insights that expand our understanding of the world and ourselves. Similarly, in diffusion models, adjusting the "strength" parameter can result in more adventurous transformations within a latent space, allowing for greater exploration and discovery. However, just as with artistic or literary controversies, there is a risk that such transformations may be difficult for some to comprehend or accept, leading to resistance or criticism.\ +\ +By carefully considering the potential impacts and consequences of these transformations, we can foster an environment where innovation and creativity are valued while remaining mindful of the limitations and potential risks associated with venturing too far from established norms. This balance between exploration and acceptance can help drive progress in both artistic expression and diffusion models, ultimately contributing to a more diverse and rich understanding of our world.\ +(Tokens used: 4583 / 7168)\ +> I think artists and authors often observe the maxim 'it is easier to ask for forgiveness than permission' and just do what they want to do.\ + Thank you for your observation on how artists and authors often follow the maxim "it is easier to ask for forgiveness than permission" when pursuing their creative endeavors. This mindset reflects a willingness to take risks, push boundaries, and challenge established norms in order to create innovative and impactful works. By prioritizing action over seeking approval, artists and authors can contribute to the richness of our cultural landscape and drive progress in various artistic fields.\ +\ +However, it is important to note that this approach may not always be appropriate or feasible, especially when considering ethical implications, potential harm, or legal consequences. In such cases, seeking permission and engaging in open dialogue with stakeholders can help ensure that creative projects align with shared values and respect the rights and needs of all involved parties. By finding a balance between taking risks and being mindful of potential impacts, artists and authors can contribute to a vibrant cultural scene while fostering positive relationships and minimizing negative consequences.\ +(Tokens used: 4829 / 7168)} \ No newline at end of file diff --git a/Package.swift b/Package.swift index 37524edee..b24c9204a 100644 --- a/Package.swift +++ b/Package.swift @@ -13,17 +13,31 @@ let package = Package( products: [ .library(name: "llama", targets: ["llama"]), ], - dependencies: [ - .package(url: "https://github.com/ggerganov/ggml.git", .branch("release")) - ], targets: [ .target( name: "llama", - dependencies: ["ggml"], path: ".", - exclude: ["ggml-metal.metal"], + exclude: [ + "cmake", + "examples", + "scripts", + "models", + "tests", + "CMakeLists.txt", + "ggml-cuda.cu", + "ggml-cuda.h", + "Makefile" + ], sources: [ + "ggml.c", "llama.cpp", + "ggml-alloc.c", + "ggml-backend.c", + "ggml-quants.c", + "ggml-metal.m", + ], + resources: [ + .process("ggml-metal.metal") ], publicHeadersPath: "spm-headers", cSettings: [ diff --git a/PortTest.ipynb b/PortTest.ipynb new file mode 100644 index 000000000..5b3c231c8 --- /dev/null +++ b/PortTest.ipynb @@ -0,0 +1,2 @@ +for i in range(10): + print(i, i**2) diff --git a/README-sycl.md b/README-sycl.md new file mode 100644 index 000000000..e3a8e726e --- /dev/null +++ b/README-sycl.md @@ -0,0 +1,494 @@ +# llama.cpp for SYCL + +- [Background](#background) +- [OS](#os) +- [Intel GPU](#intel-gpu) +- [Docker](#docker) +- [Linux](#linux) +- [Windows](#windows) +- [Environment Variable](#environment-variable) +- [Known Issue](#known-issue) +- [Q&A](#q&a) +- [Todo](#todo) + +## Background + +SYCL is a higher-level programming model to improve programming productivity on various hardware accelerators—such as CPUs, GPUs, and FPGAs. It is a single-source embedded domain-specific language based on pure C++17. + +oneAPI is a specification that is open and standards-based, supporting multiple architecture types including but not limited to GPU, CPU, and FPGA. The spec has both direct programming and API-based programming paradigms. + +Intel uses the SYCL as direct programming language to support CPU, GPUs and FPGAs. + +To avoid to re-invent the wheel, this code refer other code paths in llama.cpp (like OpenBLAS, cuBLAS, CLBlast). We use a open-source tool [SYCLomatic](https://github.com/oneapi-src/SYCLomatic) (Commercial release [Intel® DPC++ Compatibility Tool](https://www.intel.com/content/www/us/en/developer/tools/oneapi/dpc-compatibility-tool.html)) migrate to SYCL. + +The llama.cpp for SYCL is used to support Intel GPUs. + +For Intel CPU, recommend to use llama.cpp for X86 (Intel MKL building). + +## OS + +|OS|Status|Verified| +|-|-|-| +|Linux|Support|Ubuntu 22.04, Fedora Silverblue 39| +|Windows|Support|Windows 11| + + +## Intel GPU + +### Verified + +|Intel GPU| Status | Verified Model| +|-|-|-| +|Intel Data Center Max Series| Support| Max 1550| +|Intel Data Center Flex Series| Support| Flex 170| +|Intel Arc Series| Support| Arc 770, 730M| +|Intel built-in Arc GPU| Support| built-in Arc GPU in Meteor Lake| +|Intel iGPU| Support| iGPU in i5-1250P, i7-1260P, i7-1165G7| + +Note: If the EUs (Execution Unit) in iGPU is less than 80, the inference speed will be too slow to use. + +### Memory + +The memory is a limitation to run LLM on GPUs. + +When run llama.cpp, there is print log to show the applied memory on GPU. You could know how much memory to be used in your case. Like `llm_load_tensors: buffer size = 3577.56 MiB`. + +For iGPU, please make sure the shared memory from host memory is enough. For llama-2-7b.Q4_0, recommend the host memory is 8GB+. + +For dGPU, please make sure the device memory is enough. For llama-2-7b.Q4_0, recommend the device memory is 4GB+. + +## Docker + +Note: +- Only docker on Linux is tested. Docker on WSL may not work. +- You may need to install Intel GPU driver on the host machine (See the [Linux](#linux) section to know how to do that) + +### Build the image + +You can choose between **F16** and **F32** build. F16 is faster for long-prompt inference. + + +```sh +# For F16: +#docker build -t llama-cpp-sycl --build-arg="LLAMA_SYCL_F16=ON" -f .devops/main-intel.Dockerfile . + +# Or, for F32: +docker build -t llama-cpp-sycl -f .devops/main-intel.Dockerfile . + +# Note: you can also use the ".devops/main-server.Dockerfile", which compiles the "server" example +``` + +### Run + +```sh +# Firstly, find all the DRI cards: +ls -la /dev/dri +# Then, pick the card that you want to use. + +# For example with "/dev/dri/card1" +docker run -it --rm -v "$(pwd):/app:Z" --device /dev/dri/renderD128:/dev/dri/renderD128 --device /dev/dri/card1:/dev/dri/card1 llama-cpp-sycl -m "/app/models/YOUR_MODEL_FILE" -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 +``` + +## Linux + +### Setup Environment + +1. Install Intel GPU driver. + +a. Please install Intel GPU driver by official guide: [Install GPU Drivers](https://dgpu-docs.intel.com/driver/installation.html). + +Note: for iGPU, please install the client GPU driver. + +b. Add user to group: video, render. + +```sh +sudo usermod -aG render username +sudo usermod -aG video username +``` + +Note: re-login to enable it. + +c. Check + +```sh +sudo apt install clinfo +sudo clinfo -l +``` + +Output (example): + +``` +Platform #0: Intel(R) OpenCL Graphics + `-- Device #0: Intel(R) Arc(TM) A770 Graphics + + +Platform #0: Intel(R) OpenCL HD Graphics + `-- Device #0: Intel(R) Iris(R) Xe Graphics [0x9a49] +``` + +2. Install Intel® oneAPI Base toolkit. + +a. Please follow the procedure in [Get the Intel® oneAPI Base Toolkit ](https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit.html). + +Recommend to install to default folder: **/opt/intel/oneapi**. + +Following guide use the default folder as example. If you use other folder, please modify the following guide info with your folder. + +b. Check + +```sh +source /opt/intel/oneapi/setvars.sh + +sycl-ls +``` + +There should be one or more level-zero devices. Please confirm that at least one GPU is present, like **[ext_oneapi_level_zero:gpu:0]**. + +Output (example): +``` +[opencl:acc:0] Intel(R) FPGA Emulation Platform for OpenCL(TM), Intel(R) FPGA Emulation Device OpenCL 1.2 [2023.16.10.0.17_160000] +[opencl:cpu:1] Intel(R) OpenCL, 13th Gen Intel(R) Core(TM) i7-13700K OpenCL 3.0 (Build 0) [2023.16.10.0.17_160000] +[opencl:gpu:2] Intel(R) OpenCL Graphics, Intel(R) Arc(TM) A770 Graphics OpenCL 3.0 NEO [23.30.26918.50] +[ext_oneapi_level_zero:gpu:0] Intel(R) Level-Zero, Intel(R) Arc(TM) A770 Graphics 1.3 [1.3.26918] + +``` + +2. Build locally: + +Note: +- You can choose between **F16** and **F32** build. F16 is faster for long-prompt inference. +- By default, it will build for all binary files. It will take more time. To reduce the time, we recommend to build for **example/main** only. + +```sh +mkdir -p build +cd build +source /opt/intel/oneapi/setvars.sh + +# For FP16: +#cmake .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_SYCL_F16=ON + +# Or, for FP32: +cmake .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx + +# Build example/main only +#cmake --build . --config Release --target main + +# Or, build all binary +cmake --build . --config Release -v + +cd .. +``` + +or + +```sh +./examples/sycl/build.sh +``` + +### Run + +1. Put model file to folder **models** + +You could download [llama-2-7b.Q4_0.gguf](https://huggingface.co/TheBloke/Llama-2-7B-GGUF/blob/main/llama-2-7b.Q4_0.gguf) as example. + +2. Enable oneAPI running environment + +``` +source /opt/intel/oneapi/setvars.sh +``` + +3. List device ID + +Run without parameter: + +```sh +./build/bin/ls-sycl-device + +# or running the "main" executable and look at the output log: + +./build/bin/main +``` + +Check the ID in startup log, like: + +``` +found 4 SYCL devices: + Device 0: Intel(R) Arc(TM) A770 Graphics, compute capability 1.3, + max compute_units 512, max work group size 1024, max sub group size 32, global mem size 16225243136 + Device 1: Intel(R) FPGA Emulation Device, compute capability 1.2, + max compute_units 24, max work group size 67108864, max sub group size 64, global mem size 67065057280 + Device 2: 13th Gen Intel(R) Core(TM) i7-13700K, compute capability 3.0, + max compute_units 24, max work group size 8192, max sub group size 64, global mem size 67065057280 + Device 3: Intel(R) Arc(TM) A770 Graphics, compute capability 3.0, + max compute_units 512, max work group size 1024, max sub group size 32, global mem size 16225243136 + +``` + +|Attribute|Note| +|-|-| +|compute capability 1.3|Level-zero running time, recommended | +|compute capability 3.0|OpenCL running time, slower than level-zero in most cases| + +4. Set device ID and execute llama.cpp + +Set device ID = 0 by **GGML_SYCL_DEVICE=0** + +```sh +GGML_SYCL_DEVICE=0 ./build/bin/main -m models/llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 +``` +or run by script: + +```sh +./examples/sycl/run_llama2.sh +``` + +Note: + +- By default, mmap is used to read model file. In some cases, it leads to the hang issue. Recommend to use parameter **--no-mmap** to disable mmap() to skip this issue. + + +5. Check the device ID in output + +Like: +``` +Using device **0** (Intel(R) Arc(TM) A770 Graphics) as main device +``` + +## Windows + +### Setup Environment + +1. Install Intel GPU driver. + +Please install Intel GPU driver by official guide: [Install GPU Drivers](https://www.intel.com/content/www/us/en/products/docs/discrete-gpus/arc/software/drivers.html). + +Note: **The driver is mandatory for compute function**. + +2. Install Visual Studio. + +Please install [Visual Studio](https://visualstudio.microsoft.com/) which impact oneAPI environment enabling in Windows. + +3. Install Intel® oneAPI Base toolkit. + +a. Please follow the procedure in [Get the Intel® oneAPI Base Toolkit ](https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit.html). + +Recommend to install to default folder: **/opt/intel/oneapi**. + +Following guide uses the default folder as example. If you use other folder, please modify the following guide info with your folder. + +b. Enable oneAPI running environment: + +- In Search, input 'oneAPI'. + +Search & open "Intel oneAPI command prompt for Intel 64 for Visual Studio 2022" + +- In Run: + +In CMD: +``` +"C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 +``` + +c. Check GPU + +In oneAPI command line: + +``` +sycl-ls +``` + +There should be one or more level-zero devices. Please confirm that at least one GPU is present, like **[ext_oneapi_level_zero:gpu:0]**. + +Output (example): +``` +[opencl:acc:0] Intel(R) FPGA Emulation Platform for OpenCL(TM), Intel(R) FPGA Emulation Device OpenCL 1.2 [2023.16.10.0.17_160000] +[opencl:cpu:1] Intel(R) OpenCL, 11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz OpenCL 3.0 (Build 0) [2023.16.10.0.17_160000] +[opencl:gpu:2] Intel(R) OpenCL Graphics, Intel(R) Iris(R) Xe Graphics OpenCL 3.0 NEO [31.0.101.5186] +[ext_oneapi_level_zero:gpu:0] Intel(R) Level-Zero, Intel(R) Iris(R) Xe Graphics 1.3 [1.3.28044] +``` + +4. Install cmake & make + +a. Download & install cmake for Windows: https://cmake.org/download/ + +b. Download & install mingw-w64 make for Windows provided by w64devkit + +- Download the latest fortran version of [w64devkit](https://github.com/skeeto/w64devkit/releases). + +- Extract `w64devkit` on your pc. + +- Add the **bin** folder path in the Windows system PATH environment, like `C:\xxx\w64devkit\bin\`. + +### Build locally: + +In oneAPI command line window: + +``` +mkdir -p build +cd build +@call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 --force + +:: for FP16 +:: faster for long-prompt inference +:: cmake -G "MinGW Makefiles" .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icx -DCMAKE_BUILD_TYPE=Release -DLLAMA_SYCL_F16=ON + +:: for FP32 +cmake -G "MinGW Makefiles" .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icx -DCMAKE_BUILD_TYPE=Release + + +:: build example/main only +:: make main + +:: build all binary +make -j +cd .. +``` + +or + +``` +.\examples\sycl\win-build-sycl.bat +``` + +Note: + +- By default, it will build for all binary files. It will take more time. To reduce the time, we recommend to build for **example/main** only. + +### Run + +1. Put model file to folder **models** + +You could download [llama-2-7b.Q4_0.gguf](https://huggingface.co/TheBloke/Llama-2-7B-GGUF/blob/main/llama-2-7b.Q4_0.gguf) as example. + +2. Enable oneAPI running environment + +- In Search, input 'oneAPI'. + +Search & open "Intel oneAPI command prompt for Intel 64 for Visual Studio 2022" + +- In Run: + +In CMD: +``` +"C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 +``` + +3. List device ID + +Run without parameter: + +``` +build\bin\ls-sycl-device.exe + +or + +build\bin\main.exe +``` + +Check the ID in startup log, like: + +``` +found 4 SYCL devices: + Device 0: Intel(R) Arc(TM) A770 Graphics, compute capability 1.3, + max compute_units 512, max work group size 1024, max sub group size 32, global mem size 16225243136 + Device 1: Intel(R) FPGA Emulation Device, compute capability 1.2, + max compute_units 24, max work group size 67108864, max sub group size 64, global mem size 67065057280 + Device 2: 13th Gen Intel(R) Core(TM) i7-13700K, compute capability 3.0, + max compute_units 24, max work group size 8192, max sub group size 64, global mem size 67065057280 + Device 3: Intel(R) Arc(TM) A770 Graphics, compute capability 3.0, + max compute_units 512, max work group size 1024, max sub group size 32, global mem size 16225243136 + +``` + +|Attribute|Note| +|-|-| +|compute capability 1.3|Level-zero running time, recommended | +|compute capability 3.0|OpenCL running time, slower than level-zero in most cases| + +4. Set device ID and execute llama.cpp + +Set device ID = 0 by **set GGML_SYCL_DEVICE=0** + +``` +set GGML_SYCL_DEVICE=0 +build\bin\main.exe -m models\llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:\nStep 1:" -n 400 -e -ngl 33 -s 0 +``` +or run by script: + +``` +.\examples\sycl\win-run-llama2.bat +``` + +Note: + +- By default, mmap is used to read model file. In some cases, it leads to the hang issue. Recommend to use parameter **--no-mmap** to disable mmap() to skip this issue. + + +5. Check the device ID in output + +Like: +``` +Using device **0** (Intel(R) Arc(TM) A770 Graphics) as main device +``` + +## Environment Variable + +#### Build + +|Name|Value|Function| +|-|-|-| +|LLAMA_SYCL|ON (mandatory)|Enable build with SYCL code path.
For FP32/FP16, LLAMA_SYCL=ON is mandatory.| +|LLAMA_SYCL_F16|ON (optional)|Enable FP16 build with SYCL code path. Faster for long-prompt inference.
For FP32, not set it.| +|CMAKE_C_COMPILER|icx|Use icx compiler for SYCL code path| +|CMAKE_CXX_COMPILER|icpx (Linux), icx (Windows)|use icpx/icx for SYCL code path| + +#### Running + + +|Name|Value|Function| +|-|-|-| +|GGML_SYCL_DEVICE|0 (default) or 1|Set the device id used. Check the device ids by default running output| +|GGML_SYCL_DEBUG|0 (default) or 1|Enable log function by macro: GGML_SYCL_DEBUG| + +## Known Issue + +- Hang during startup + + llama.cpp use mmap as default way to read model file and copy to GPU. In some system, memcpy will be abnormal and block. + + Solution: add **--no-mmap** or **--mmap 0**. + +## Q&A + +- Error: `error while loading shared libraries: libsycl.so.7: cannot open shared object file: No such file or directory`. + + Miss to enable oneAPI running environment. + + Install oneAPI base toolkit and enable it by: `source /opt/intel/oneapi/setvars.sh`. + +- In Windows, no result, not error. + + Miss to enable oneAPI running environment. + +- Meet compile error. + + Remove folder **build** and try again. + +- I can **not** see **[ext_oneapi_level_zero:gpu:0]** afer install GPU driver in Linux. + + Please run **sudo sycl-ls**. + + If you see it in result, please add video/render group to your ID: + + ``` + sudo usermod -aG render username + sudo usermod -aG video username + ``` + + Then **relogin**. + + If you do not see it, please check the installation GPU steps again. + +## Todo + +- Support multiple cards. diff --git a/README.md b/README.md index 866aa87b4..0c4ee5a27 100644 --- a/README.md +++ b/README.md @@ -6,15 +6,17 @@ [Roadmap](https://github.com/users/ggerganov/projects/7) / [Project status](https://github.com/ggerganov/llama.cpp/discussions/3471) / [Manifesto](https://github.com/ggerganov/llama.cpp/discussions/205) / [ggml](https://github.com/ggerganov/ggml) -Inference of [LLaMA](https://arxiv.org/abs/2302.13971) model in pure C/C++ +Inference of Meta's [LLaMA](https://arxiv.org/abs/2302.13971) model (and others) in pure C/C++ ### Hot topics +- Remove LLAMA_MAX_DEVICES and LLAMA_SUPPORTS_GPU_OFFLOAD: https://github.com/ggerganov/llama.cpp/pull/5240 +- Incoming backends: https://github.com/ggerganov/llama.cpp/discussions/5138 + - [SYCL backend](README-sycl.md) is ready (1/28/2024), support Linux/Windows in Intel GPUs (iGPU, Arc/Flex/Max series) - New SOTA quantized models, including pure 2-bits: https://huggingface.co/ikawrakow - Collecting Apple Silicon performance stats: - M-series: https://github.com/ggerganov/llama.cpp/discussions/4167 - A-series: https://github.com/ggerganov/llama.cpp/discussions/4508 -- Added Mixtral support: https://github.com/ggerganov/llama.cpp/pull/4406 - Looking for contributions to improve and maintain the `server` example: https://github.com/ggerganov/llama.cpp/issues/4216 ---- @@ -31,17 +33,14 @@ Inference of [LLaMA](https://arxiv.org/abs/2302.13971) model in pure C/C++
  • Get the Code
  • Build
  • BLAS Build
  • -
  • Prepare Data & Run
  • +
  • Prepare and Quantize
  • +
  • Run the quantized model
  • Memory/Disk Requirements
  • Quantization
  • Interactive mode
  • Constrained output with grammars
  • -
  • Instruction mode with Alpaca
  • -
  • Using OpenLLaMA
  • -
  • Using GPT4All
  • -
  • Using Pygmalion 7B & Metharme 7B
  • -
  • Obtaining the Facebook LLaMA original model and Stanford Alpaca model data
  • -
  • Verifying the model files
  • +
  • Instruct mode
  • +
  • Obtaining and using the Facebook LLaMA 2 model
  • Seminal papers and background on the models
  • Perplexity (measuring model quality)
  • Android
  • @@ -56,18 +55,20 @@ Inference of [LLaMA](https://arxiv.org/abs/2302.13971) model in pure C/C++ ## Description -The main goal of `llama.cpp` is to run the LLaMA model using 4-bit integer quantization on a MacBook +The main goal of `llama.cpp` is to enable LLM inference with minimal setup and state-of-the-art performance on a wide +variety of hardware - locally and in the cloud. -- Plain C/C++ implementation without dependencies -- Apple silicon first-class citizen - optimized via ARM NEON, Accelerate and Metal frameworks +- Plain C/C++ implementation without any dependencies +- Apple silicon is a first-class citizen - optimized via ARM NEON, Accelerate and Metal frameworks - AVX, AVX2 and AVX512 support for x86 architectures -- Mixed F16 / F32 precision -- 2-bit, 3-bit, 4-bit, 5-bit, 6-bit and 8-bit integer quantization support -- CUDA, Metal and OpenCL GPU backend support +- 2-bit, 3-bit, 4-bit, 5-bit, 6-bit, and 8-bit integer quantization for faster inference and reduced memory use +- Custom CUDA kernels for running LLMs on NVIDIA GPUs (support for AMD GPUs via HIP) +- Vulkan, SYCL, and (partial) OpenCL backend support +- CPU+GPU hybrid inference to partially accelerate models larger than the total VRAM capacity -The original implementation of `llama.cpp` was [hacked in an evening](https://github.com/ggerganov/llama.cpp/issues/33#issuecomment-1465108022). -Since then, the project has improved significantly thanks to many contributions. This project is mainly for educational purposes and serves -as the main playground for developing new features for the [ggml](https://github.com/ggerganov/ggml) library. +Since its [inception](https://github.com/ggerganov/llama.cpp/issues/33#issuecomment-1465108022), the project has +improved significantly thanks to many contributions. It is the main playground for developing new features for the +[ggml](https://github.com/ggerganov/ggml) library. **Supported platforms:** @@ -75,43 +76,46 @@ as the main playground for developing new features for the [ggml](https://github - [X] Linux - [X] Windows (via CMake) - [X] Docker +- [X] FreeBSD **Supported models:** +Typically finetunes of the base models below are supported as well. + - [X] LLaMA 🦙 - [x] LLaMA 2 🦙🦙 +- [X] [Mistral 7B](https://huggingface.co/mistralai/Mistral-7B-v0.1) +- [x] [Mixtral MoE](https://huggingface.co/models?search=mistral-ai/Mixtral) - [X] Falcon -- [X] [Alpaca](https://github.com/ggerganov/llama.cpp#instruction-mode-with-alpaca) -- [X] [GPT4All](https://github.com/ggerganov/llama.cpp#using-gpt4all) - [X] [Chinese LLaMA / Alpaca](https://github.com/ymcui/Chinese-LLaMA-Alpaca) and [Chinese LLaMA-2 / Alpaca-2](https://github.com/ymcui/Chinese-LLaMA-Alpaca-2) - [X] [Vigogne (French)](https://github.com/bofenghuang/vigogne) -- [X] [Vicuna](https://github.com/ggerganov/llama.cpp/discussions/643#discussioncomment-5533894) - [X] [Koala](https://bair.berkeley.edu/blog/2023/04/03/koala/) -- [X] [OpenBuddy 🐶 (Multilingual)](https://github.com/OpenBuddy/OpenBuddy) -- [X] [Pygmalion/Metharme](#using-pygmalion-7b--metharme-7b) -- [X] [WizardLM](https://github.com/nlpxucan/WizardLM) - [X] [Baichuan 1 & 2](https://huggingface.co/models?search=baichuan-inc/Baichuan) + [derivations](https://huggingface.co/hiyouga/baichuan-7b-sft) - [X] [Aquila 1 & 2](https://huggingface.co/models?search=BAAI/Aquila) - [X] [Starcoder models](https://github.com/ggerganov/llama.cpp/pull/3187) -- [X] [Mistral AI v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) - [X] [Refact](https://huggingface.co/smallcloudai/Refact-1_6B-fim) - [X] [Persimmon 8B](https://github.com/ggerganov/llama.cpp/pull/3410) - [X] [MPT](https://github.com/ggerganov/llama.cpp/pull/3417) - [X] [Bloom](https://github.com/ggerganov/llama.cpp/pull/3553) - [x] [Yi models](https://huggingface.co/models?search=01-ai/Yi) -- [X] [StableLM-3b-4e1t](https://github.com/ggerganov/llama.cpp/pull/3586) +- [X] [StableLM models](https://huggingface.co/stabilityai) - [x] [Deepseek models](https://huggingface.co/models?search=deepseek-ai/deepseek) - [x] [Qwen models](https://huggingface.co/models?search=Qwen/Qwen) -- [x] [Mixtral MoE](https://huggingface.co/models?search=mistral-ai/Mixtral) - [x] [PLaMo-13B](https://github.com/ggerganov/llama.cpp/pull/3557) +- [x] [Phi models](https://huggingface.co/models?search=microsoft/phi) - [x] [GPT-2](https://huggingface.co/gpt2) +- [x] [Orion 14B](https://github.com/ggerganov/llama.cpp/pull/5118) +- [x] [InternLM2](https://huggingface.co/models?search=internlm2) +- [x] [CodeShell](https://github.com/WisdomShell/codeshell) **Multimodal models:** -- [x] [Llava 1.5 models](https://huggingface.co/collections/liuhaotian/llava-15-653aac15d994e992e2677a7e) -- [x] [Bakllava](https://huggingface.co/models?search=SkunkworksAI/Bakllava) +- [x] [LLaVA 1.5 models](https://huggingface.co/collections/liuhaotian/llava-15-653aac15d994e992e2677a7e) +- [x] [BakLLaVA](https://huggingface.co/models?search=SkunkworksAI/Bakllava) - [x] [Obsidian](https://huggingface.co/NousResearch/Obsidian-3B-V0.5) - [x] [ShareGPT4V](https://huggingface.co/models?search=Lin-Chen/ShareGPT4V) +- [x] [MobileVLM 1.7B/3B models](https://huggingface.co/models?search=mobileVLM) +- [x] [Yi-VL](https://huggingface.co/models?search=Yi-VL) **Bindings:** @@ -120,30 +124,44 @@ as the main playground for developing new features for the [ggml](https://github - Go: [go-skynet/go-llama.cpp](https://github.com/go-skynet/go-llama.cpp) - Node.js: [withcatai/node-llama-cpp](https://github.com/withcatai/node-llama-cpp) - JS/TS (llama.cpp server client): [lgrammel/modelfusion](https://modelfusion.dev/integration/model-provider/llamacpp) +- JavaScript/Wasm (works in browser): [tangledgroup/llama-cpp-wasm](https://github.com/tangledgroup/llama-cpp-wasm) - Ruby: [yoshoku/llama_cpp.rb](https://github.com/yoshoku/llama_cpp.rb) -- Rust: [mdrokz/rust-llama.cpp](https://github.com/mdrokz/rust-llama.cpp) +- Rust (nicer API): [mdrokz/rust-llama.cpp](https://github.com/mdrokz/rust-llama.cpp) +- Rust (more direct bindings): [utilityai/llama-cpp-rs](https://github.com/utilityai/llama-cpp-rs) - C#/.NET: [SciSharp/LLamaSharp](https://github.com/SciSharp/LLamaSharp) - Scala 3: [donderom/llm4s](https://github.com/donderom/llm4s) - Clojure: [phronmophobic/llama.clj](https://github.com/phronmophobic/llama.clj) - React Native: [mybigday/llama.rn](https://github.com/mybigday/llama.rn) - Java: [kherud/java-llama.cpp](https://github.com/kherud/java-llama.cpp) - Zig: [deins/llama.cpp.zig](https://github.com/Deins/llama.cpp.zig) +- Flutter/Dart: [netdur/llama_cpp_dart](https://github.com/netdur/llama_cpp_dart) **UI:** -- [nat/openplayground](https://github.com/nat/openplayground) -- [oobabooga/text-generation-webui](https://github.com/oobabooga/text-generation-webui) -- [withcatai/catai](https://github.com/withcatai/catai) -- [semperai/amica](https://github.com/semperai/amica) -- [psugihara/FreeChat](https://github.com/psugihara/FreeChat) -- [ptsochantaris/emeltal](https://github.com/ptsochantaris/emeltal) +Unless otherwise noted these projects are open-source with permissive licensing: + - [iohub/collama](https://github.com/iohub/coLLaMA) +- [janhq/jan](https://github.com/janhq/jan) (AGPL) +- [nat/openplayground](https://github.com/nat/openplayground) +- [Faraday](https://faraday.dev/) (proprietary) +- [LMStudio](https://lmstudio.ai/) (proprietary) +- [LostRuins/koboldcpp](https://github.com/LostRuins/koboldcpp) (AGPL) +- [Mozilla-Ocho/llamafile](https://github.com/Mozilla-Ocho/llamafile) +- [nomic-ai/gpt4all](https://github.com/nomic-ai/gpt4all) +- [ollama/ollama](https://github.com/ollama/ollama) +- [oobabooga/text-generation-webui](https://github.com/oobabooga/text-generation-webui) (AGPL) +- [psugihara/FreeChat](https://github.com/psugihara/FreeChat) +- [cztomsik/ava](https://github.com/cztomsik/ava) (MIT) +- [ptsochantaris/emeltal](https://github.com/ptsochantaris/emeltal) +- [pythops/tenere](https://github.com/pythops/tenere) (AGPL) +- [semperai/amica](https://github.com/semperai/amica) +- [withcatai/catai](https://github.com/withcatai/catai) --- Here is a typical run using LLaMA v2 13B on M2 Ultra: -```java +``` $ make -j && ./main -m models/llama-13b-v2/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:\nStep 1:" -n 400 -e I llama.cpp build info: I UNAME_S: Darwin @@ -227,7 +245,7 @@ https://user-images.githubusercontent.com/1991296/224442907-7693d4be-acaa-4e01-8 ## Usage -Here are the end-to-end binary build and model conversion steps for the LLaMA-7B model. +Here are the end-to-end binary build and model conversion steps for most supported models. ### Get the Code @@ -288,7 +306,7 @@ In order to build llama.cpp you have three different options. sudo pkg install gmake automake autoconf pkgconf llvm15 clinfo clover \ opencl clblast openblas - gmake CC=/usr/local/bin/clang15 CXX=/usr/local/bin/clang++15 -j4 + gmake CC=/usr/local/bin/clang15 CXX=/usr/local/bin/clang++15 -j4 ``` **Notes:** With this packages you can build llama.cpp with OPENBLAS and @@ -388,28 +406,28 @@ Building the program with BLAS support may lead to some performance improvements Check [BLIS.md](docs/BLIS.md) for more information. +- #### SYCL + SYCL is a higher-level programming model to improve programming productivity on various hardware accelerators. + + llama.cpp based on SYCL is used to **support Intel GPU** (Data Center Max series, Flex series, Arc series, Built-in GPU and iGPU). + + For detailed info, please refer to [llama.cpp for SYCL](README-sycl.md). + - #### Intel oneMKL + Building through oneAPI compilers will make avx_vnni instruction set available for intel processors that do not support avx512 and avx512_vnni. Please note that this build config **does not support Intel GPU**. For Intel GPU support, please refer to [llama.cpp for SYCL](./README-sycl.md). + - Using manual oneAPI installation: By default, `LLAMA_BLAS_VENDOR` is set to `Generic`, so if you already sourced intel environment script and assign `-DLLAMA_BLAS=ON` in cmake, the mkl version of Blas will automatically been selected. Otherwise please install oneAPI and follow the below steps: ```bash mkdir build cd build - source /opt/intel/oneapi/setvars.sh # You can skip this step if in oneapi-runtime docker image, only required for manual installation + source /opt/intel/oneapi/setvars.sh # You can skip this step if in oneapi-basekit docker image, only required for manual installation cmake .. -DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=Intel10_64lp -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_NATIVE=ON cmake --build . --config Release ``` - Using oneAPI docker image: - If you do not want to source the environment vars and install oneAPI manually, you can also build the code using intel docker container: [oneAPI-runtime](https://hub.docker.com/r/intel/oneapi-runtime) - - ```bash - mkdir build - cd build - cmake .. -DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=Intel10_64lp -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_NATIVE=ON - cmake --build . --config Release - ``` - - Building through oneAPI compilers will make avx_vnni instruction set available for intel processors that do not support avx512 and avx512_vnni. + If you do not want to source the environment vars and install oneAPI manually, you can also build the code using intel docker container: [oneAPI-basekit](https://hub.docker.com/r/intel/oneapi-basekit). Then, you can use the commands given above. Check [Optimizing and Running LLaMA2 on Intel® CPU](https://www.intel.com/content/www/us/en/content-details/791610/optimizing-and-running-llama2-on-intel-cpu.html) for more information. @@ -596,34 +614,87 @@ Building the program with BLAS support may lead to some performance improvements You can get a list of platforms and devices from the `clinfo -l` command, etc. -### Prepare Data & Run +- #### Vulkan + + **With docker**: + + You don't need to install Vulkan SDK. It will be installed inside the container. + + ```sh + # Build the image + docker build -t llama-cpp-vulkan -f .devops/main-vulkan.Dockerfile . + + # Then, use it: + docker run -it --rm -v "$(pwd):/app:Z" --device /dev/dri/renderD128:/dev/dri/renderD128 --device /dev/dri/card1:/dev/dri/card1 llama-cpp-vulkan -m "/app/models/YOUR_MODEL_FILE" -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 + ``` + + **Without docker**: + + Firstly, you need to make sure you have installed [Vulkan SDK](https://vulkan.lunarg.com/doc/view/latest/linux/getting_started_ubuntu.html) + + For example, on Ubuntu 22.04 (jammy), use the command below: + + ```bash + wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - + wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list + apt update -y + apt-get install -y vulkan-sdk + # To verify the installation, use the command below: + vulkaninfo + ``` + + Alternatively your package manager might be able to provide the appropiate libraries. For example for Ubuntu 22.04 you can install `libvulkan-dev` instead. + + Then, build llama.cpp using the cmake command below: + + ```bash + mkdir -p build + cd build + cmake .. -DLLAMA_VULKAN=1 + cmake --build . --config Release + # Test the output binary (with "-ngl 33" to offload all layers to GPU) + ./bin/main -m "PATH_TO_MODEL" -p "Hi you how are you" -n 50 -e -ngl 33 -t 4 + + # You should see in the output, ggml_vulkan detected your GPU. For example: + # ggml_vulkan: Using Intel(R) Graphics (ADL GT2) | uma: 1 | fp16: 1 | warp size: 32 + ``` + +### Prepare and Quantize + +To obtain the official LLaMA 2 weights please see the Obtaining and using the Facebook LLaMA 2 model section. There is also a large selection of pre-quantized `gguf` models available on Hugging Face. ```bash -# obtain the original LLaMA model weights and place them in ./models +# obtain the official LLaMA model weights and place them in ./models ls ./models -65B 30B 13B 7B tokenizer_checklist.chk tokenizer.model - # [Optional] for models using BPE tokenizers - ls ./models - 65B 30B 13B 7B vocab.json +llama-2-7b tokenizer_checklist.chk tokenizer.model +# [Optional] for models using BPE tokenizers +ls ./models + vocab.json +# [Optional] for PyTorch .bin models like Mistral-7B +ls ./models + # install Python dependencies python3 -m pip install -r requirements.txt -# convert the 7B model to ggml FP16 format -python3 convert.py models/7B/ +# convert the model to ggml FP16 format +python3 convert.py models/mymodel/ - # [Optional] for models using BPE tokenizers - python convert.py models/7B/ --vocabtype bpe +# [Optional] for models using BPE tokenizers +python convert.py models/mymodel/ --vocab-type bpe -# quantize the model to 4-bits (using q4_0 method) -./quantize ./models/7B/ggml-model-f16.gguf ./models/7B/ggml-model-q4_0.gguf q4_0 +# quantize the model to 4-bits (using Q4_K_M method) +./quantize ./models/mymodel/ggml-model-f16.gguf ./models/mymodel/ggml-model-Q4_K_M.gguf Q4_K_M -# update the gguf filetype to current if older version is unsupported by another application -./quantize ./models/7B/ggml-model-q4_0.gguf ./models/7B/ggml-model-q4_0-v2.gguf COPY +# update the gguf filetype to current version if older version is now unsupported +./quantize ./models/mymodel/ggml-model-Q4_K_M.gguf ./models/mymodel/ggml-model-Q4_K_M-v2.gguf COPY +``` +### Run the quantized model -# run the inference -./main -m ./models/7B/ggml-model-q4_0.gguf -n 128 +```bash +# start inference on a gguf model +./main -m ./models/mymodel/ggml-model-Q4_K_M.gguf -n 128 ``` When running the larger models, make sure you have enough disk space to store all the intermediate files. @@ -644,7 +715,7 @@ From the unzipped folder, open a terminal/cmd window here and place a pre-conver As the models are currently fully loaded into memory, you will need adequate disk space to save them and sufficient RAM to load them. At the moment, memory and disk requirements are the same. -| Model | Original size | Quantized size (4-bit) | +| Model | Original size | Quantized size (Q4_0) | |------:|--------------:|-----------------------:| | 7B | 13 GB | 3.9 GB | | 13B | 24 GB | 7.8 GB | @@ -671,9 +742,21 @@ Several quantization methods are supported. They differ in the resulting model d | 13B | bits/weight | 16.0 | 4.5 | 5.0 | 5.5 | 6.0 | 8.5 | - [k-quants](https://github.com/ggerganov/llama.cpp/pull/1684) -- recent k-quants improvements +- recent k-quants improvements and new i-quants - [#2707](https://github.com/ggerganov/llama.cpp/pull/2707) - [#2807](https://github.com/ggerganov/llama.cpp/pull/2807) + - [#4773 - 2-bit i-quants (inference)](https://github.com/ggerganov/llama.cpp/pull/4773) + - [#4856 - 2-bit i-quants (inference)](https://github.com/ggerganov/llama.cpp/pull/4856) + - [#4861 - importance matrix](https://github.com/ggerganov/llama.cpp/pull/4861) + - [#4872 - MoE models](https://github.com/ggerganov/llama.cpp/pull/4872) + - [#4897 - 2-bit quantization](https://github.com/ggerganov/llama.cpp/pull/4897) + - [#4930 - imatrix for all k-quants](https://github.com/ggerganov/llama.cpp/pull/4930) + - [#4951 - imatrix on the GPU](https://github.com/ggerganov/llama.cpp/pull/4957) + - [#4969 - imatrix for legacy quants](https://github.com/ggerganov/llama.cpp/pull/4969) + - [#4996 - k-qunats tuning](https://github.com/ggerganov/llama.cpp/pull/4996) + - [#5060 - Q3_K_XS](https://github.com/ggerganov/llama.cpp/pull/5060) + - [#5196 - 3-bit i-quants](https://github.com/ggerganov/llama.cpp/pull/5196) + - [quantization tuning](https://github.com/ggerganov/llama.cpp/pull/5320), [another one](https://github.com/ggerganov/llama.cpp/pull/5334), and [another one](https://github.com/ggerganov/llama.cpp/pull/5361) ### Perplexity (measuring model quality) @@ -748,9 +831,9 @@ The `grammars/` folder contains a handful of sample grammars. To write your own, For authoring more complex JSON grammars, you can also check out https://grammar.intrinsiclabs.ai/, a browser app that lets you write TypeScript interfaces which it compiles to GBNF grammars that you can save for local use. Note that the app is built and maintained by members of the community, please file any issues or FRs on [its repo](http://github.com/intrinsiclabsai/gbnfgen) and not this one. -### Instruction mode with Alpaca +### Instruct mode -1. First, download the `ggml` Alpaca model into the `./models` folder +1. First, download and place the `ggml` model into the `./models` folder 2. Run the `main` tool like this: ``` @@ -776,50 +859,6 @@ cadaver, cauliflower, cabbage (vegetable), catalpa (tree) and Cailleach. > ``` -### Using [OpenLLaMA](https://github.com/openlm-research/open_llama) - -OpenLLaMA is an openly licensed reproduction of Meta's original LLaMA model. It uses the same architecture and is a drop-in replacement for the original LLaMA weights. - -- Download the [3B](https://huggingface.co/openlm-research/open_llama_3b), [7B](https://huggingface.co/openlm-research/open_llama_7b), or [13B](https://huggingface.co/openlm-research/open_llama_13b) model from Hugging Face. -- Convert the model to ggml FP16 format using `python convert.py ` - -### Using [GPT4All](https://github.com/nomic-ai/gpt4all) - -*Note: these instructions are likely obsoleted by the GGUF update* - -- Obtain the `tokenizer.model` file from LLaMA model and put it to `models` -- Obtain the `added_tokens.json` file from Alpaca model and put it to `models` -- Obtain the `gpt4all-lora-quantized.bin` file from GPT4All model and put it to `models/gpt4all-7B` -- It is distributed in the old `ggml` format which is now obsoleted -- You have to convert it to the new format using `convert.py`: - -```bash -python3 convert.py models/gpt4all-7B/gpt4all-lora-quantized.bin -``` - -- You can now use the newly generated `models/gpt4all-7B/ggml-model-q4_0.bin` model in exactly the same way as all other models - -- The newer GPT4All-J model is not yet supported! - -### Using Pygmalion 7B & Metharme 7B - -- Obtain the [LLaMA weights](#obtaining-the-facebook-llama-original-model-and-stanford-alpaca-model-data) -- Obtain the [Pygmalion 7B](https://huggingface.co/PygmalionAI/pygmalion-7b/) or [Metharme 7B](https://huggingface.co/PygmalionAI/metharme-7b) XOR encoded weights -- Convert the LLaMA model with [the latest HF convert script](https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py) -- Merge the XOR files with the converted LLaMA weights by running the [xor_codec](https://huggingface.co/PygmalionAI/pygmalion-7b/blob/main/xor_codec.py) script -- Convert to `ggml` format using the `convert.py` script in this repo: -```bash -python3 convert.py pygmalion-7b/ --outtype q4_1 -``` -> The Pygmalion 7B & Metharme 7B weights are saved in [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format) precision. If you wish to convert to `ggml` without quantizating, please specify the `--outtype` as `f32` instead of `f16`. - - -### Obtaining the Facebook LLaMA original model and Stanford Alpaca model data - -- **Under no circumstances should IPFS, magnet links, or any other links to model downloads be shared anywhere in this repository, including in issues, discussions, or pull requests. They will be immediately deleted.** -- The LLaMA models are officially distributed by Facebook and will **never** be provided through this repository. -- Refer to [Facebook's LLaMA repository](https://github.com/facebookresearch/llama/pull/73/files) if you need to request access to the model data. - ### Obtaining and using the Facebook LLaMA 2 model - Refer to [Facebook's LLaMA download page](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) if you want to access the model data. @@ -831,20 +870,6 @@ python3 convert.py pygmalion-7b/ --outtype q4_1 - [LLaMA 2 13B chat](https://huggingface.co/TheBloke/Llama-2-13B-chat-GGUF) - [LLaMA 2 70B chat](https://huggingface.co/TheBloke/Llama-2-70B-chat-GGUF) -### Verifying the model files - -Please verify the [sha256 checksums](SHA256SUMS) of all downloaded model files to confirm that you have the correct model data files before creating an issue relating to your model files. -- The following python script will verify if you have all possible latest files in your self-installed `./models` subdirectory: - -```bash -# run the verification script -./scripts/verify-checksum-models.py -``` - -- On linux or macOS it is also possible to run the following commands to verify if you have all possible latest files in your self-installed `./models` subdirectory: - - On Linux: `sha256sum --ignore-missing -c SHA256SUMS` - - on macOS: `shasum -a 256 --ignore-missing -c SHA256SUMS` - ### Seminal papers and background on the models If your issue is with model generation quality, then please at least scan the following links and papers to understand the limitations of LLaMA models. This is especially important when choosing an appropriate model size and appreciating both the significant and subtle differences between LLaMA models and ChatGPT: @@ -929,17 +954,20 @@ Place your desired model into the `~/llama.cpp/models/` directory and execute th * Create a folder to store big models & intermediate files (ex. /llama/models) #### Images -We have two Docker images available for this project: +We have three Docker images available for this project: 1. `ghcr.io/ggerganov/llama.cpp:full`: This image includes both the main executable file and the tools to convert LLaMA models into ggml and convert into 4-bit quantization. (platforms: `linux/amd64`, `linux/arm64`) 2. `ghcr.io/ggerganov/llama.cpp:light`: This image only includes the main executable file. (platforms: `linux/amd64`, `linux/arm64`) +3. `ghcr.io/ggerganov/llama.cpp:server`: This image only includes the server executable file. (platforms: `linux/amd64`, `linux/arm64`) Additionally, there the following images, similar to the above: - `ghcr.io/ggerganov/llama.cpp:full-cuda`: Same as `full` but compiled with CUDA support. (platforms: `linux/amd64`) - `ghcr.io/ggerganov/llama.cpp:light-cuda`: Same as `light` but compiled with CUDA support. (platforms: `linux/amd64`) +- `ghcr.io/ggerganov/llama.cpp:server-cuda`: Same as `server` but compiled with CUDA support. (platforms: `linux/amd64`) - `ghcr.io/ggerganov/llama.cpp:full-rocm`: Same as `full` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`) - `ghcr.io/ggerganov/llama.cpp:light-rocm`: Same as `light` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`) +- `ghcr.io/ggerganov/llama.cpp:server-rocm`: Same as `server` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`) The GPU enabled images are not currently tested by CI beyond being built. They are not built with any variation from the ones in the Dockerfiles defined in [.devops/](.devops/) and the GitHub Action defined in [.github/workflows/docker.yml](.github/workflows/docker.yml). If you need different settings (for example, a different CUDA or ROCm library, you'll need to build the images locally for now). @@ -965,6 +993,12 @@ or with a light image: docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:light -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 ``` +or with a server image: + +```bash +docker run -v /path/to/models:/models -p 8000:8000 ghcr.io/ggerganov/llama.cpp:server -m /models/7B/ggml-model-q4_0.gguf --port 8000 --host 0.0.0.0 -n 512 +``` + ### Docker With CUDA Assuming one has the [nvidia-container-toolkit](https://github.com/NVIDIA/nvidia-container-toolkit) properly installed on Linux, or is using a GPU enabled cloud, `cuBLAS` should be accessible inside the container. @@ -974,6 +1008,7 @@ Assuming one has the [nvidia-container-toolkit](https://github.com/NVIDIA/nvidia ```bash docker build -t local/llama.cpp:full-cuda -f .devops/full-cuda.Dockerfile . docker build -t local/llama.cpp:light-cuda -f .devops/main-cuda.Dockerfile . +docker build -t local/llama.cpp:server-cuda -f .devops/server-cuda.Dockerfile . ``` You may want to pass in some different `ARGS`, depending on the CUDA environment supported by your container host, as well as the GPU architecture. @@ -987,6 +1022,7 @@ The resulting images, are essentially the same as the non-CUDA images: 1. `local/llama.cpp:full-cuda`: This image includes both the main executable file and the tools to convert LLaMA models into ggml and convert into 4-bit quantization. 2. `local/llama.cpp:light-cuda`: This image only includes the main executable file. +3. `local/llama.cpp:server-cuda`: This image only includes the server executable file. #### Usage @@ -995,6 +1031,7 @@ After building locally, Usage is similar to the non-CUDA examples, but you'll ne ```bash docker run --gpus all -v /path/to/models:/models local/llama.cpp:full-cuda --run -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1 docker run --gpus all -v /path/to/models:/models local/llama.cpp:light-cuda -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1 +docker run --gpus all -v /path/to/models:/models local/llama.cpp:server-cuda -m /models/7B/ggml-model-q4_0.gguf --port 8000 --host 0.0.0.0 -n 512 --n-gpu-layers 1 ``` ### Contributing diff --git a/SHA256SUMS b/SHA256SUMS deleted file mode 100644 index ca4d5a4a5..000000000 --- a/SHA256SUMS +++ /dev/null @@ -1,40 +0,0 @@ -700df0d3013b703a806d2ae7f1bfb8e59814e3d06ae78be0c66368a50059f33d models/7B/consolidated.00.pth -666a4bb533b303bdaf89e1b6a3b6f93535d868de31d903afdc20983dc526c847 models/7B/ggml-model-f16.bin -ec2f2d1f0dfb73b72a4cbac7fa121abbe04c37ab327125a38248f930c0f09ddf models/7B/ggml-model-q4_0.bin -ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/7B/ggml-model-q4_1.bin -ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/7B/ggml-model-q5_0.bin -ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/7B/ggml-model-q5_1.bin -7e89e242ddc0dd6f060b43ca219ce8b3e8f08959a72cb3c0855df8bb04d46265 models/7B/params.json -745bf4e29a4dd6f411e72976d92b452da1b49168a4f41c951cfcc8051823cf08 models/13B/consolidated.00.pth -d5ccbcc465c71c0de439a5aeffebe8344c68a519bce70bc7f9f92654ee567085 models/13B/consolidated.01.pth -2b206e9b21fb1076f11cafc624e2af97c9e48ea09312a0962153acc20d45f808 models/13B/ggml-model-f16.bin -fad169e6f0f575402cf75945961cb4a8ecd824ba4da6be2af831f320c4348fa5 models/13B/ggml-model-q4_0.bin -ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/13B/ggml-model-q4_1.bin -ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/13B/ggml-model-q5_0.bin -ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/13B/ggml-model-q5_1.bin -4ab77bec4d4405ccb66a97b282574c89a94417e3c32e5f68f37e2876fc21322f models/13B/params.json -e23294a58552d8cdec5b7e8abb87993b97ea6eced4178ff2697c02472539d067 models/30B/consolidated.00.pth -4e077b7136c7ae2302e954860cf64930458d3076fcde9443f4d0e939e95903ff models/30B/consolidated.01.pth -24a87f01028cbd3a12de551dcedb712346c0b5cbdeff1454e0ddf2df9b675378 models/30B/consolidated.02.pth -1adfcef71420886119544949767f6a56cb6339b4d5fcde755d80fe68b49de93b models/30B/consolidated.03.pth -7e1b524061a9f4b27c22a12d6d2a5bf13b8ebbea73e99f218809351ed9cf7d37 models/30B/ggml-model-f16.bin -d2a441403944819492ec8c2002cc36fa38468149bfb4b7b4c52afc7bd9a7166d models/30B/ggml-model-q4_0.bin -ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/30B/ggml-model-q4_1.bin -ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/30B/ggml-model-q5_0.bin -ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/30B/ggml-model-q5_1.bin -2c07118ea98d69dbe7810d88520e30288fa994751b337f8fca02b171955f44cb models/30B/params.json -135c563f6b3938114458183afb01adc9a63bef3d8ff7cccc3977e5d3664ecafe models/65B/consolidated.00.pth -9a600b37b19d38c7e43809485f70d17d1dc12206c07efa83bc72bb498a568bde models/65B/consolidated.01.pth -e7babf7c5606f165a3756f527cb0fedc4f83e67ef1290391e52fb1cce5f26770 models/65B/consolidated.02.pth -73176ffb426b40482f2aa67ae1217ef79fbbd1fff5482bae5060cdc5a24ab70e models/65B/consolidated.03.pth -882e6431d0b08a8bc66261a0d3607da21cbaeafa96a24e7e59777632dbdac225 models/65B/consolidated.04.pth -a287c0dfe49081626567c7fe87f74cce5831f58e459b427b5e05567641f47b78 models/65B/consolidated.05.pth -72b4eba67a1a3b18cb67a85b70f8f1640caae9b40033ea943fb166bd80a7b36b models/65B/consolidated.06.pth -d27f5b0677d7ff129ceacd73fd461c4d06910ad7787cf217b249948c3f3bc638 models/65B/consolidated.07.pth -60758f2384d74e423dffddfd020ffed9d3bb186ebc54506f9c4a787d0f5367b0 models/65B/ggml-model-f16.bin -cde053439fa4910ae454407e2717cc46cc2c2b4995c00c93297a2b52e790fa92 models/65B/ggml-model-q4_0.bin -ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/65B/ggml-model-q4_1.bin -ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/65B/ggml-model-q5_0.bin -ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff models/65B/ggml-model-q5_1.bin -999ed1659b469ccc2a941714c0a9656fa571d17c9f7c8c7589817ca90edef51b models/65B/params.json -9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347 models/tokenizer.model diff --git a/SatelliteScrape.ipynb b/SatelliteScrape.ipynb new file mode 100644 index 000000000..dfe2ea3fa --- /dev/null +++ b/SatelliteScrape.ipynb @@ -0,0 +1,174 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Requirement already satisfied: pystac_client in ./.conda/lib/python3.11/site-packages (0.7.5)\n", + "Requirement already satisfied: requests>=2.28.2 in ./.conda/lib/python3.11/site-packages (from pystac_client) (2.31.0)\n", + "Requirement already satisfied: pystac>=1.8.2 in ./.conda/lib/python3.11/site-packages (from pystac[validation]>=1.8.2->pystac_client) (1.9.0)\n", + "Requirement already satisfied: python-dateutil>=2.8.2 in ./.conda/lib/python3.11/site-packages (from pystac_client) (2.8.2)\n", + "Requirement already satisfied: jsonschema~=4.18 in ./.conda/lib/python3.11/site-packages (from pystac[validation]>=1.8.2->pystac_client) (4.20.0)\n", + "Requirement already satisfied: six>=1.5 in ./.conda/lib/python3.11/site-packages (from python-dateutil>=2.8.2->pystac_client) (1.16.0)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in ./.conda/lib/python3.11/site-packages (from requests>=2.28.2->pystac_client) (3.3.2)\n", + "Requirement already satisfied: idna<4,>=2.5 in ./.conda/lib/python3.11/site-packages (from requests>=2.28.2->pystac_client) (3.6)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in ./.conda/lib/python3.11/site-packages (from requests>=2.28.2->pystac_client) (2.1.0)\n", + "Requirement already satisfied: certifi>=2017.4.17 in ./.conda/lib/python3.11/site-packages (from requests>=2.28.2->pystac_client) (2023.11.17)\n", + "Requirement already satisfied: attrs>=22.2.0 in ./.conda/lib/python3.11/site-packages (from jsonschema~=4.18->pystac[validation]>=1.8.2->pystac_client) (23.2.0)\n", + "Requirement already satisfied: jsonschema-specifications>=2023.03.6 in ./.conda/lib/python3.11/site-packages (from jsonschema~=4.18->pystac[validation]>=1.8.2->pystac_client) (2023.12.1)\n", + "Requirement already satisfied: referencing>=0.28.4 in ./.conda/lib/python3.11/site-packages (from jsonschema~=4.18->pystac[validation]>=1.8.2->pystac_client) (0.32.1)\n", + "Requirement already satisfied: rpds-py>=0.7.1 in ./.conda/lib/python3.11/site-packages (from jsonschema~=4.18->pystac[validation]>=1.8.2->pystac_client) (0.16.2)\n", + "Requirement already satisfied: odc.stac in ./.conda/lib/python3.11/site-packages (0.3.8)\n", + "Requirement already satisfied: affine in ./.conda/lib/python3.11/site-packages (from odc.stac) (2.4.0)\n", + "Requirement already satisfied: odc-geo>=0.3.0 in ./.conda/lib/python3.11/site-packages (from odc.stac) (0.4.1)\n", + "Requirement already satisfied: rasterio!=1.3.0,!=1.3.1,>=1.0.0 in ./.conda/lib/python3.11/site-packages (from odc.stac) (1.3.9)\n", + "Requirement already satisfied: dask[array] in ./.conda/lib/python3.11/site-packages (from odc.stac) (2023.12.1)\n", + "Requirement already satisfied: numpy>=1.20.0 in ./.conda/lib/python3.11/site-packages (from odc.stac) (1.26.3)\n", + "Requirement already satisfied: pandas in ./.conda/lib/python3.11/site-packages (from odc.stac) (2.1.4)\n", + "Requirement already satisfied: pystac<2,>=1.0.0 in ./.conda/lib/python3.11/site-packages (from odc.stac) (1.9.0)\n", + "Requirement already satisfied: toolz in ./.conda/lib/python3.11/site-packages (from odc.stac) (0.12.0)\n", + "Requirement already satisfied: xarray>=0.19 in ./.conda/lib/python3.11/site-packages (from odc.stac) (2023.12.0)\n", + "Requirement already satisfied: cachetools in ./.conda/lib/python3.11/site-packages (from odc-geo>=0.3.0->odc.stac) (5.3.2)\n", + "Requirement already satisfied: pyproj>=3.0.0 in ./.conda/lib/python3.11/site-packages (from odc-geo>=0.3.0->odc.stac) (3.6.1)\n", + "Requirement already satisfied: shapely in ./.conda/lib/python3.11/site-packages (from odc-geo>=0.3.0->odc.stac) (2.0.2)\n", + "Requirement already satisfied: python-dateutil>=2.7.0 in ./.conda/lib/python3.11/site-packages (from pystac<2,>=1.0.0->odc.stac) (2.8.2)\n", + "Requirement already satisfied: attrs in ./.conda/lib/python3.11/site-packages (from rasterio!=1.3.0,!=1.3.1,>=1.0.0->odc.stac) (23.2.0)\n", + "Requirement already satisfied: certifi in ./.conda/lib/python3.11/site-packages (from rasterio!=1.3.0,!=1.3.1,>=1.0.0->odc.stac) (2023.11.17)\n", + "Requirement already satisfied: click>=4.0 in ./.conda/lib/python3.11/site-packages (from rasterio!=1.3.0,!=1.3.1,>=1.0.0->odc.stac) (8.1.7)\n", + "Requirement already satisfied: cligj>=0.5 in ./.conda/lib/python3.11/site-packages (from rasterio!=1.3.0,!=1.3.1,>=1.0.0->odc.stac) (0.7.2)\n", + "Requirement already satisfied: snuggs>=1.4.1 in ./.conda/lib/python3.11/site-packages (from rasterio!=1.3.0,!=1.3.1,>=1.0.0->odc.stac) (1.4.7)\n", + "Requirement already satisfied: click-plugins in ./.conda/lib/python3.11/site-packages (from rasterio!=1.3.0,!=1.3.1,>=1.0.0->odc.stac) (1.1.1)\n", + "Requirement already satisfied: setuptools in ./.conda/lib/python3.11/site-packages (from rasterio!=1.3.0,!=1.3.1,>=1.0.0->odc.stac) (68.2.2)\n", + "Requirement already satisfied: packaging>=21.3 in ./.conda/lib/python3.11/site-packages (from xarray>=0.19->odc.stac) (23.2)\n", + "Requirement already satisfied: pytz>=2020.1 in ./.conda/lib/python3.11/site-packages (from pandas->odc.stac) (2023.3.post1)\n", + "Requirement already satisfied: tzdata>=2022.1 in ./.conda/lib/python3.11/site-packages (from pandas->odc.stac) (2023.4)\n", + "Requirement already satisfied: cloudpickle>=1.5.0 in ./.conda/lib/python3.11/site-packages (from dask[array]->odc.stac) (3.0.0)\n", + "Requirement already satisfied: fsspec>=2021.09.0 in ./.conda/lib/python3.11/site-packages (from dask[array]->odc.stac) (2023.12.2)\n", + "Requirement already satisfied: partd>=1.2.0 in ./.conda/lib/python3.11/site-packages (from dask[array]->odc.stac) (1.4.1)\n", + "Requirement already satisfied: pyyaml>=5.3.1 in ./.conda/lib/python3.11/site-packages (from dask[array]->odc.stac) (6.0.1)\n", + "Requirement already satisfied: importlib-metadata>=4.13.0 in ./.conda/lib/python3.11/site-packages (from dask[array]->odc.stac) (7.0.1)\n", + "Requirement already satisfied: zipp>=0.5 in ./.conda/lib/python3.11/site-packages (from importlib-metadata>=4.13.0->dask[array]->odc.stac) (3.17.0)\n", + "Requirement already satisfied: locket in ./.conda/lib/python3.11/site-packages (from partd>=1.2.0->dask[array]->odc.stac) (1.0.0)\n", + "Requirement already satisfied: six>=1.5 in ./.conda/lib/python3.11/site-packages (from python-dateutil>=2.7.0->pystac<2,>=1.0.0->odc.stac) (1.16.0)\n", + "Requirement already satisfied: pyparsing>=2.1.6 in ./.conda/lib/python3.11/site-packages (from snuggs>=1.4.1->rasterio!=1.3.0,!=1.3.1,>=1.0.0->odc.stac) (3.1.1)\n", + "Collecting matplotlib\n", + " Downloading matplotlib-3.8.2-cp311-cp311-macosx_11_0_arm64.whl.metadata (5.8 kB)\n", + "Collecting contourpy>=1.0.1 (from matplotlib)\n", + " Downloading contourpy-1.2.0-cp311-cp311-macosx_11_0_arm64.whl.metadata (5.8 kB)\n", + "Collecting cycler>=0.10 (from matplotlib)\n", + " Downloading cycler-0.12.1-py3-none-any.whl.metadata (3.8 kB)\n", + "Collecting fonttools>=4.22.0 (from matplotlib)\n", + " Downloading fonttools-4.47.0-cp311-cp311-macosx_10_9_universal2.whl.metadata (157 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m157.2/157.2 kB\u001b[0m \u001b[31m2.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n", + "\u001b[?25hCollecting kiwisolver>=1.3.1 (from matplotlib)\n", + " Downloading kiwisolver-1.4.5-cp311-cp311-macosx_11_0_arm64.whl.metadata (6.4 kB)\n", + "Requirement already satisfied: numpy<2,>=1.21 in ./.conda/lib/python3.11/site-packages (from matplotlib) (1.26.3)\n", + "Requirement already satisfied: packaging>=20.0 in ./.conda/lib/python3.11/site-packages (from matplotlib) (23.2)\n", + "Collecting pillow>=8 (from matplotlib)\n", + " Downloading pillow-10.2.0-cp311-cp311-macosx_11_0_arm64.whl.metadata (9.7 kB)\n", + "Requirement already satisfied: pyparsing>=2.3.1 in ./.conda/lib/python3.11/site-packages (from matplotlib) (3.1.1)\n", + "Requirement already satisfied: python-dateutil>=2.7 in ./.conda/lib/python3.11/site-packages (from matplotlib) (2.8.2)\n", + "Requirement already satisfied: six>=1.5 in ./.conda/lib/python3.11/site-packages (from python-dateutil>=2.7->matplotlib) (1.16.0)\n", + "Downloading matplotlib-3.8.2-cp311-cp311-macosx_11_0_arm64.whl (7.5 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m7.5/7.5 MB\u001b[0m \u001b[31m10.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n", + "\u001b[?25hDownloading contourpy-1.2.0-cp311-cp311-macosx_11_0_arm64.whl (243 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m243.4/243.4 kB\u001b[0m \u001b[31m7.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading cycler-0.12.1-py3-none-any.whl (8.3 kB)\n", + "Downloading fonttools-4.47.0-cp311-cp311-macosx_10_9_universal2.whl (2.8 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.8/2.8 MB\u001b[0m \u001b[31m11.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n", + "\u001b[?25hDownloading kiwisolver-1.4.5-cp311-cp311-macosx_11_0_arm64.whl (66 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m66.2/66.2 kB\u001b[0m \u001b[31m7.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading pillow-10.2.0-cp311-cp311-macosx_11_0_arm64.whl (3.3 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.3/3.3 MB\u001b[0m \u001b[31m9.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0mm\n", + "\u001b[?25hInstalling collected packages: pillow, kiwisolver, fonttools, cycler, contourpy, matplotlib\n", + "Successfully installed contourpy-1.2.0 cycler-0.12.1 fonttools-4.47.0 kiwisolver-1.4.5 matplotlib-3.8.2 pillow-10.2.0\n" + ] + } + ], + "source": [ + "!pip3 install pystac_client\n", + "!pip3 install odc.stac\n", + "!pip3 install matplotlib" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "from pystac_client import Client\n", + "from odc.stac import load" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Example: The bounding box of the New Zealand Exclusive Economic Zone in WGS 84 (from 160.6°E to 170°W and from 55.95°S to 25.89°S) would be represented in JSON as [160.6, -55.95, -170, -25.89] and in a query as bbox=160.6,-55.95,-170,-25.89." + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 24, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAkgAAAHHCAYAAABEEKc/AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOz9d5wkV3nvjz/dXdU5T0/c2aScQIkgJBQAEYXAYCEjA0IYE0TmXsBES7JJAgTYliXANsJEC/iBySDhCwYDV2QbgbJ2tWFiz3SO1V31+0N3Rzv7eUva1Qow/vZ5vfaPPdPdVXXqnCd+ns8TCoIgsNEYjdEYjdEYjdEYjdFYG+Hf9w2MxmiMxmiMxmiMxmj8dxsjA2k0RmM0RmM0RmM0RmOfMTKQRmM0RmM0RmM0RmM09hkjA2k0RmM0RmM0RmM0RmOfMTKQRmM0RmM0RmM0RmM09hkjA2k0RmM0RmM0RmM0RmOfMTKQRmM0RmM0RmM0RmM09hkjA2k0RmM0RmM0RmM0RmOfMTKQRmM0RmM0RmM0RmM09hkjA2k0HrRx6aWXWigUekDfPeuss+yss856cG9oP0az2bQ///M/t6mpKQuFQvaa17zmd34Po3HPuOiii2zLli2/79sYjdEYjdEYGUijcWCj3W7bpZdeat/97nd/37fyoIx3vvOd9rGPfcwuvvhi+8QnPmHPe97zfifX/Yd/+Ac788wzbXJy0mKxmG3dutVe8IIX2Pbt29d9bufOnXbZZZfZIx7xCCsUClYqleyss86yb3/72/f629/+9rftsY99rOVyOctkMnbyySfbtddeu+4zr33ta+2kk06yYrFoyWTSjj76aLv00kut2Wyu+1yz2bRLLrnEnvSkJ1mxWLRQKGQf+9jHDurZ5+bm7NJLL7Vf/vKXB/U7f2jjQN9ltVq1F7/4xTY+Pm6pVMoe85jH2M9//vN1n1lZWbH3vve9dsYZZ9j4+Ljl83k75ZRT5H2bmf3617+2Zz3rWXbIIYdYMpm0UqlkZ5xxhn3lK1/Z72e47rrr7IUvfKEdd9xxFolE7tOY9X3f3vOe99jWrVstHo/bQx/6UPvMZz6z39cyM/vhD39oj370oy2ZTNrU1JS96lWvkj1qZnbbbbfZs5/9bJudnbVkMmlHHXWU/dVf/ZW1220zu8d5u79/e5y0W265xV772tfaqaeeavF43EKhkJzNPePaa6+15z73uXb44Yev+439HQdyLTOzRqNhb3jDG2zr1q0Wi8Vsw4YNdt555609670N3/ftYx/7mD3taU+zjRs3WiqVsuOOO87e/va3W7fblc8vLi7aC17wApuYmLBEImEnnXSSfe5znzugZ/uDH8FojMYBjOXl5cDMgksuuUT+5nle0Ol0HtDvnnnmmcGZZ555cDf3AMYjH/nI4LTTTvudX/fiiy8Onv/85wfve9/7gn/6p38K3vrWtwaTk5NBqVQKdu/evfa5v/u7vwsSiURwwQUXBFdeeWXwwQ9+MDjppJMCMws++tGPyu9+9KMfDUKhUPCEJzwhuPLKK4Orr746eM1rXhO8973vXfe50047LXjVq14V/O3f/m3wkY98JLj44ouDWCwWnHbaacFwOFz73LZt2wIzCzZt2hScddZZgZkF11xzzUE9+09+8pN7/Z1+vx90u92D+v3/ruNA3uVwOAxOPfXUIJVKBZdeemlw5ZVXBsccc0yQyWSCW2+9de1zX/nKVwLXdYOnP/3pwQc/+MHgyiuvDB7zmMcEZhb85V/+5brf/NrXvhY88YlPDC699NLgIx/5SPDBD34wOP300wMzCz784Q/v1zM8//nPD+LxeHDqqacGs7OzwebNm+/1s2984xsDMwte9KIXBR/5yEeCc845JzCz4DOf+cx+XesXv/hFEI/HgxNPPDG4+uqrg7e85S1BLBYLnvSkJ6373I4dO4J8Ph9s3rw5eNe73hV8+MMfDi666KLAzIKnPe1pQRAEwX/+538Gn/jEJ9b+XX311YGZBc94xjPWzV933XVBEATBNddcE4TD4eC4444LTjjhhMDMgm3btuF9nnnmmUE6nQ4e85jHBIVC4YDl2IFcq1qtBscff3wwNjYWvOlNbwr+6Z/+KXj3u98dnHPOOcHq6up9XqfRaARmFpxyyinB29/+9uAjH/lI8IIXvCAIh8PBWWedFfi+v/bZWq0WHHbYYUEmkwne+ta3BldeeWVwxhlnBGYWfOpTnzqg5/tDHiMDaTQOaNyXgXQw48EwkHzfD9rt9gF9Z+vWrcE555xzUNd9sMZPf/rTwMyCd73rXWtzN954Y7C8vLzuc91uNzjqqKOC2dnZdfPbtm0LEolE8KpXveoBXf9973tfYGbBj370o3XXmp+fD4Lgvg2bAxkP1u/8oY0DeZfXXnttYGbB5z73ubW5paWlIJ/PBxdccMHa3J133hls37593Xd93w8e+9jHBrFYLGg2m/d5T4PBIDj++OODI488cr+eYffu3UG/3w+CIAjOOeecezWQdu3aFbiuG7z85S9fd1+nn356MDs7GwwGg/u91pOf/ORgeno6qNVqa3P/8A//EJhZ8K1vfWtt7h3veEdgZsGNN9647vsXXnhhYGZoONyfHFtZWQnq9XoQBEHw3ve+9z6Nlh07dqw5Fccee+wBy7EDudbFF18c5PP54M477zygawRBEPR6veAHP/iBzF922WWBmQXXX3/92tx73vOewMyCf/u3f1ubGw6HwcMf/vBgamoq6PV6B3z9P8QxSrH9AY9Go2Gvec1rbMuWLRaLxWxiYsIe//jHrwvDn3XWWXbcccfZz372Mzv11FMtkUjY1q1b7UMf+tC63+r3+/aXf/mXdvLJJ1sul7NUKmWnn366fec731n7zPbt2218fNzMzC677LK1sPSll15qZoxBuuaaa+yxj32sTUxMWCwWs2OOOcauvvrqB+X5t2zZYk996lPtW9/6lj3sYQ+zRCJhH/7wh83s7vTEa17zGtu4caPFYjE77LDD7PLLLzff983M7Lvf/a6FQiHbtm2bfe1rX1t7lvsKbf+2x550RbVaXZs79thjrVQqrftcLBazpzzlKbZr1y5rNBpr8x/60IdsOBzaX/3VX5nZ3emxIAgO6vqxWMympqYO7EHuY3z3u9+1hz/84WZm9oIXvGBt3fek7fbFIG3fvt1CoZC9733vs7//+79fSw894QlPsJ07d1oQBPbXf/3XNjs7a4lEwp7+9Kfb6uqqXPcb3/iGnX766ZZKpSyTydg555xjv/71rx+059qfcSDv8vOf/7xNTk7aM5/5zLW58fFxO//88+1LX/qS9Xo9MzPbunWrbd68ed1vhkIh+6M/+iPr9Xp255133uc9RSIR27hx47p3fl9jZmbGXNe938996UtfMs/z7GUve9m6+7r44ott165d9qMf/eg+v1+v1+3666+35z73uZbNZtfmL7zwQkun0/bZz3523WfNzCYnJ9f9xvT0tIXDYYtGo/v1bHuPYrFomUxmvz67ceNGC4f3T5XefPPNtmPHjgd0rWq1atdcc429+MUvtq1bt1q/31/bB/sOz/Ps5ptvtvn5+bW5aDRqp556qnz2Gc94hpmZ3XTTTWtz3//+9218fNwe+9jHrs2Fw2E7//zzbWFhwf793//9fu/3f8IYGUh/wOOlL32pXX311fbHf/zHdtVVV9nrXvc6SyQS6za6mVmlUrGnPOUpdvLJJ9t73vMem52dtYsvvtg++tGPrn2mXq/bP/7jP9pZZ51ll19+uV166aW2vLxsT3ziE9ewIuPj42vGzTOe8Qz7xCc+YZ/4xCfWCfF9x9VXX22bN2+2N7/5zXbFFVfYxo0b7WUve5n9/d///YOyBrfccotdcMEF9vjHP97+5m/+xk444QRrt9t25pln2ic/+Um78MIL7W//9m/ttNNOsze96U32v/7X/zIzs6OPPto+8YlPWKlUshNOOGHtWfYYgDRqtZqVy+X7/UcYiXsbKysrtrS0ZD/96U/tBS94gZmZPe5xj7vf7y0sLFgymbRkMrk29+1vf9uOOuoo+/rXv26zs7OWyWRsbGzM3va2t60ZhnuPwWBg5XLZ5ubm7LrrrrO3vvWtlslk7BGPeMR+3/+BjqOPPnrNgHvxi1+8tu5nnHHGfX7vU5/6lF111VX2yle+0v73//7f9u///u92/vnn21vf+lb75je/aX/xF39hL37xi+0rX/mKve51r1v33U984hN2zjnnWDqdtssvv9ze9ra32W9+8xt79KMffb8Gse/7+/XOy+WyeZ73gNaE3uUvfvELO+mkk0TxPuIRj7B2u2233nrr/f6mmYlBZmbWarWsXC7bHXfcYR/4wAfsG9/4xn7tuQMZv/jFLyyVStnRRx+9bn7P3vrFL35xn9//1a9+ZYPBwB72sIetm49Go3bCCSes+/4ezM8LX/hC++Uvf2k7d+60a6+91q6++mp71ateZalU6kF4ogdnHH300XbhhRc+oO/+x3/8h3W7XTvssMPsvPPOs2QyaYlEwk477TTB8+3evduOPvpoe9Ob3nS/v0t7pdfrWSKRkM/u2aM/+9nPHtAz/MGN33cIazQe+MjlcutC2DTOPPPMwMyCK664Ym2u1+sFJ5xwQjAxMbEWLh8MBhI2rVQqweTkZPBnf/Zna3P3FZq+5JJLgn23FKW8nvjEJwaHHHKI3OeBhqY3b94cmFnwzW9+c938X//1XwepVGodViMI7sZERCKRYMeOHet+Y39TbHvW8v7+Pf/5z9/vZ4jFYmvfGxsbC/72b//2fr9z2223BfF4PHje8563bj6bzQaFQiGIxWLB2972tuDzn/988Kd/+qeBmQVvfOMb5Xd+9KMfrbvvI488MvjOd75zr9f9XaTYnv/8569L2+zBQI2PjwfVanVt/k1velNgZsHxxx8feJ63Nn/BBRcE0Wh0DcfUaDSCfD4fvOhFL1p3nYWFhSCXy8n8vmPP9ffn332t3b2Ne3uXqVRq3bnbM772ta/hnt97rKysBBMTE8Hpp5+Of3/JS16yds/hcDg477zz7he/QuO+UmznnHOOnPEgCIJWq3Wv+3Hv8bnPfS4ws+B73/ue/O1Zz3pWMDU1tW7ur//6r4NEIrHufbzlLW+5198/EKjA/aW99h73l2Izs/v8+31d6/3vf/+anHjEIx4RfOpTnwquuuqqYHJyMigUCsHc3NzaZ/fs2/2RRWeffXaQzWaDSqWyNvfKV74yCIfDkr599rOfHZhZ8IpXvOJ+f/d/wnAefJNrNH5XI5/P2w033GBzc3M2MzNzr59zHMde8pKXrP0/Go3aS17yErv44ovtZz/7mZ1yyikWiUQsEomY2d1ec7VaNd/37WEPe5hUzhzI2NsLqdVq5nmenXnmmfatb33LarWa5XK5B/zbZnenGJ74xCeum/vc5z5np59+uhUKBSuXy2vzZ599tr373e+2733ve/ac5zzngK91xRVXWKVSud/P3de72Hd84xvfsG63azfddJN98pOftFardZ+fb7fb9qxnPcsSiYS9+93vXve3ZrNpvu/bu9/9bvuLv/gLMzP74z/+Y1tdXbW/+Zu/sTe/+c3rQvnHHHOMXX/99dZqteyHP/yhffvb3z6g6NfvcjzrWc9at1ce+chHmpnZc5/7XHMcZ938Zz7zGdu9e7cdcsghdv3111u1WrULLrhg3V6IRCL2yEc+cl0KmcbU1JRdf/31+3WPxx9//IE80n2+y06nY7FYTL4Tj8fX/k7D9317znOeY9Vq1f7u7/4OP/Oa17zGzjvvPJubm7PPfvazNhwOrd/vH9C93994oPe/9/fN7F5/Y9/vb9myxc444wz74z/+YxsbG7Ovfe1r9s53vtOmpqbsFa94xQN9jAd9BAeQ8t537DmboVDI/u3f/s3S6bSZmZ144on2qEc9yv7+7//e3v72t5vZ3euxP9d65zvfad/+9rftqquusnw+vzb/53/+5/ahD33Izj//fPvABz5gk5OT9tnPfta++MUvmtn9v7//KWNkIB3k+N73vmfvfe977Wc/+5nNz8/bF7/4RfujP/qjA/qNIAjsiiuusI985CN21113WalUspe97GX2lre85T6/9573vMee//zn28aNG+3kk0+2pzzlKXbhhRfaIYccsu5zMzMzEmY+4ogjzOxujMcpp5xiZmb//M//bFdccYXdfPPN69IFW7duPaDn2Xv84Ac/sEsuucR+9KMfSRnqg2Ug7Ttuu+02+6//+q97TZctLS09oGudfPLJD+h79zUe85jHmJnZk5/8ZHv6059uxx13nKXTaRTqw+HQnv3sZ9tvfvMb+8Y3viGGWCKRsFarZRdccMG6+QsuuMC++c1v2i9+8Yt1qaxsNmtnn322mZk9/elPt09/+tP29Kc/3X7+858fsLL/bY9Nmzat+/+efbNx40ac32PI3nbbbWZm67AUe4+98S004vH42ho9mGN/3iXhS/aUY1P6w8zsla98pX3zm9+0j3/84/f6Do866ig76qijzOxuTM8TnvAEO/fcc+2GG26wUChktVptnQKMRqNWLBYP6Pn29/7v7Vp7/n5vv7H38//Lv/yLvfjFL7Zbb73VZmdnzczsmc98pvm+b3/xF39hF1xwgY2NjR3Q/f93HHue+dxzz10zjszMTjnlFNu6dav98Ic/PKDfu/baa+2tb32rvfCFL7SLL7543d8e+tCH2qc//Wl76UtfaqeddpqZ3e0sfPCDH7SLL7543fX/J4+RgXSQo9Vq2fHHH29/9md/dp9YnPsar371q+26666z973vffaQhzzEVldXEWi67zj//PPt9NNPty9+8Yt23XXX2Xvf+167/PLL7Qtf+II9+clPPqB7+OQnP2kXXXSR/dEf/ZG9/vWvt4mJCYtEIvaud73L7rjjjgf0XHfccYc97nGPs6OOOsre//7328aNGy0ajdrXv/51+8AHPoC4mAMdpCh837fHP/7x9oY3vAG/s8c4PNCxurq6X552IpF4QIbfoYceaieeeKJ96lOfQgPpRS96kX31q1+1T33qU6jwZ2Zm7LbbbhOw6sTEhJnZ/Ua/nvnMZ9rznvc8+5d/+Zf/dgbSnujm/s7v8Z737LFPfOITCDbfO/pEYzgc2vLy8n7dY7FY3G9A8P29y+np6XUA2z1jzxxFKS+77DK76qqr7N3vfvcB8Xmdd9559pKXvMRuvfVWO/LII+3Vr361/fM///Pa388888wD5j2bnp6273znOxYEwbrCjX3v/96uNT09ve7ze4/5+fl1z3/VVVfZiSeeuGYc7RlPe9rT7GMf+5j94he/+K0Yub/rseeZ9z3fZnef8f2Jbu8Z119/vV144YV2zjnnSMHOnnHeeefZ0572NPvP//xPGw6HdtJJJ63tgwcqQ//QxshAOsjx5Cc/+T6NkV6vZ295y1vsM5/5jFWrVTvuuOPs8ssvXwMW3nTTTXb11VfbjTfeaEceeaSZHVjEZnp62l72spfZy172MltaWrKTTjrJ3vGOd6y7p7m5OWu1WuuiSHtAnnsqhj7/+c/bIYccYl/4whfWCbRLLrlk3fUOhCn7K1/5ivV6Pfvyl7+8LgJwf2mNgx2HHnqoNZvNB10oPvOZz9yv6o3nP//5D5hMsdPpoNf8+te/3q655hr74Ac/KBGiPePkk0+22267bS29tGfMzc2Zmd0nAN3s7r3q+77VarUHdO/7Ox4o2/oDGYceeqiZ3a1AHsh+2Llz536fx+985zv7RRK4P+/yhBNOsO9///vm+/46oPYNN9xgyWRSFNTf//3f26WXXmqvec1r1tKr+zv2RHD2vPc3vOEN9tznPnft74VC4YB+b8/9/+M//qPddNNNdswxx6y7/z1/v69rHXfcceY4jv30pz+1888/f+3v/X7ffvnLX66bW1xcxHvcEwUfDAYHfP//HceeCPbu3bvlb3Nzc2tRwfsbN9xwgz3jGc+whz3sYfbZz372Pp2EaDS6VnVqZmukpv8TDM79GSMD6bc8XvGKV9hvfvMb+5d/+RebmZmxL37xi/akJz3JfvWrX9nhhx9uX/nKV+yQQw6xr371q/akJz3JgiCws88+297znvfcZ1h7OBxas9lcF6mYmJiwmZkZUbCDwcA+/OEPr1Vw9ft9+/CHP2zj4+Nrh26PJ763x3fDDTfYj370o3XGzZ4qhv0pC977N/eMWq1m11xzzf1+92DG+eefb5deeql961vfEnxStVq1dDp9v5EDGg8WBmkwGFij0RCh/uMf/9h+9atf2Z/+6Z+um3/ve99r73vf++zNb36zvfrVr77X3/2TP/kT+5d/+Rf7p3/6J3vHO95hZndHUK655horFotr77parVoqlZJy7X/8x380M5PKoQd77DHU97e0/GDGE5/4RMtms/bOd77THvOYx8gzLy8v36fh+GBjkPb3XZ533nn2+c9/3r7whS/YeeedZ2Zm5XLZPve5z9m55567Dptz7bXX2qte9Sp7znOeY+9///vv9TeXlpbWool7hud59vGPf9wSicSaIXPMMcesM2oeyHj6059ur33ta+2qq66yK6+80szulgMf+tCHbMOGDWvl5vd2rVwuZ2effbZ98pOftLe97W1r2LlPfOIT1mw27VnPetbaZ4844gi77rrr7NZbb11nOH7mM5+xcDhsD33oQw/qWR7McfPNN1symZSU8f6MI4880o4//nj70pe+ZOVyea3q7LrrrrOdO3faK1/5yrXPep5nd9xxh+VyubVonNndDvk555xjW7Zssa9+9av3mqqlcdttt9mHPvQhe+pTn7punfdUcW7atGlNP7TbbduxY4eVSqV11XEH8/y/jzEykH6LY8eOHXbNNdfYjh071pTm6173OvvmN79p11xzjb3zne+0O++80+666y773Oc+Zx//+MdtOBzaa1/7WjvvvPPs//yf/3Ovv91oNGx2dtbOO+88O/744y2dTtu3v/1t+8lPfmJXXHHFus/OzMzY5Zdfbtu3b7cjjjjCrr32WvvlL39pH/nIR9YUxlOf+lT7whe+YM94xjPsnHPOsW3bttmHPvQhO+aYY9YBd/cI0muvvdaOOOIIKxaLdtxxx9lxxx0n9/iEJzzBotGonXvuufaSl7zEms2m/cM//INNTExg6PzBGq9//evty1/+sj31qU+1iy66yE4++WRrtVr2q1/9yj7/+c/b9u3bsfz5/saDhUFqNpu2ceNG+5M/+RM79thjLZVK2a9+9Su75pprLJfL2dve9ra1z37xi1+0N7zhDXb44Yfb0UcfbZ/85CfX/dbjH//4tZD705/+dHvc4x5n73rXu6xcLtvxxx9v//qv/2r/8R//YR/+8IfXlOp3v/tde9WrXmXnnXeeHX744dbv9+373/++feELX7CHPexh6zx6M7Mrr7zSqtXqWiTqK1/5iu3atcvM7sa87DHSP/axj9kLXvACu+aaa+yiiy661+c/9NBDLZ/P24c+9CHLZDKWSqXskY985EFh3e5tZLNZu/rqq+15z3uenXTSSfbsZz/bxsfHbceOHfa1r33NTjvttDUFTuPBxCAdyLs877zz7JRTTrEXvOAF9pvf/MZKpZJdddVVNhwO7bLLLlv7zo9//GO78MILbWxszB73uMfZpz71qXW/eeqpp65FE1/ykpdYvV63M844wzZs2GALCwv2qU99ym6++Wa74oor9gtX8l//9V/25S9/2czMbr/9dqvVamvA4OOPP97OPfdcMzObnZ2117zmNfbe977XPM+zhz/84fav//qv9v3vf98+9alP3WtqdO/xjne8w0499VQ788wz7cUvfrHt2rXLrrjiCnvCE55gT3rSk9Y+9/rXv36N5+oVr3iFjY2N2Ve/+lX7xje+YX/+539+QEUTe0atVlsDuf/gBz8ws7vPQT6ft3w+vy4F/r3vfc++973vmdndBner1VpbkzPOOGMd7u/oo4+WlOWBXOsDH/iAPf7xj7dHP/rR9pKXvMRqtZq9//3vtyOOOGIdjmhPmf/e0exGo2FPfOITrVKp2Otf/3r72te+tu6ZDz30UHvUox619v9jjjnGnvWsZ9mmTZts27ZtdvXVV1uxWJSU3JVXXmmXXXbZugjqj3/8Y3vMYx5jl1xyyRpP3r09/3/r8Xurn/sfOMws+OIXv7j2/69+9auBmQWpVGrdP8dxgvPPPz8IgiB40YteFJhZcMstt6x972c/+1lgZsHNN998r9fq9XrB61//+uD4448PMplMkEqlguOPPz646qqr1n3uzDPPDI499tjgpz/9afCoRz0qiMfjwebNm4Mrr7xy3ed83w/e+c53Bps3bw5isVhw4oknBl/96lel7DoIguCHP/xhcPLJJwfRaHRdqSyV+X/5y18OHvrQhwbxeDzYsmVLcPnllwcf/ehHpZT1gZb531uJfqPRCN70pjcFhx12WBCNRoNSqRSceuqpwfve9741aoP7+43f1uj1esGrX/3q4KEPfWiQzWYD13WDzZs3By984QulvHfPmt7bv31LyxuNRvDqV786mJqaCqLRaPCQhzwk+OQnP7nuM7fffntw4YUXBoccckiQSCSCeDweHHvsscEll1yCzMt76BTo3973+3d/93f3W4K+Z3zpS18KjjnmmMBxnHUl//dW5r9vq5TvfOc7wjQdBHe3bTCz4Cc/+Yl8/olPfGKQy+WCeDweHHroocFFF10U/PSnP73fe32wxoG+y9XV1eCFL3xhMDY2FiSTyeDMM8+U59rzvPf2b28qhc985jPB2WefHUxOTgaO4wSFQiE4++yzgy996Uv7/Qz3db19S8qHw+GaTIlGo8Gxxx4re/H+xve///3g1FNPDeLxeDA+Ph68/OUvX2Od3nvccMMNwZOf/ORgamoqcF03OOKII4J3vOMd6ygg9h73V+Z/X/QO+8rD+3qv+/6+QZn/gVwrCILg+uuvD0455ZQgHo8HxWIxeN7znrfGdr/vb+79Tu6PsmLf9/fsZz872LhxYxCNRoOZmZngpS99abC4uCj3s+f5996/e87n/jz/f+cRCoKDqDscjXUjFAqtq2K79tpr7TnPeY79+te/Fo8pnU7b1NSUXXLJJfbOd75zXdVYp9OxZDJp1113nT3+8Y8/qHs666yzrFwu24033nhQvzMao3F/4/zzz7ft27fbj3/849/3rYzGaIzGaBz0GKXYfovjxBNPtOFwaEtLS3b66afjZ0477TQbDAZ2xx13rAFK9wCo920hMBqj8d91BEFg3/3udyVtNBqjMRqj8Yc6RgbSQY5ms2m333772v+3bdtmv/zlL61YLNoRRxxhz3nOc+zCCy+0K664wk488URbXl62f/u3f7OHPvShds4559jZZ59tJ510kv3Zn/2ZffCDHzTf9+3lL3+5Pf7xj///TCnlvmN5edmGw+G9/v2B8LKMxm93hEKhB8wvNRqjMRqj8d9xjFJsBzm++93vrpH97T32gOM8z7O3v/3t9vGPf9x2795tpVLJTjnlFLvsssvsIQ95iJndXaL5yle+0q677jpLpVL25Cc/2a644ooHxQj4Q0yxbdmyxe666657/fsfFMhvNEZjNEZjNP4gx8hAGo3/duMHP/jBfVLZFwqF3wqr9WiMxmiMxmiMxp4xMpBGYzRGYzRGYzRGYzT2GeH7/8hojMZojMZojMZojMb/t8YIpP0Ah+/7Njc3Z5lM5nfaOmE0RmM0RmM0RmM0HvgIgsAajYbNzMysa+Wz7xgZSA9wzM3NSSfx0RiN0RiN0RiN0fjDGDt37pQmx3uPkYH0AMee3kCnXjhrTvQeC7Tha3n6oKYdvh1XrdaxsHa3r1hT5ty+9s8ppOIy18/skrlkN6Wfg348k4mYzCVD+mzLNf1cbQBtRIZ5mRqL6L1kIn2ZC6Dkv9XQtZqPa5PXTFe3uF/XtaqMeTKXSuv9JYYtvT/wQEIZmbLynH43l9K1d8erMjcY6Dr3LStzzYbul3Bf768U1c721l+RqYivjWsdRxuDriT0fbiL+t5qGW0xEYT1OaJDBek3BnqODBCUuaxeo+/qujhd+FyiK3Pd3LTMmZlF+vo+o8GczMXT2kS07uszD+Pa5y8Z0X3p9XUdEp4+X6On72QAeyGaKstcp64NXvv1pN5fXJ8j1Nmu95fTfner/arMZUL6bLWsRugTzVWZ6+iUhQFFslDXudmennU3ou/Xn9T9kY2oPGm09Dnavj5HKq0yq2p6LyUVixYN6RkZpPL6weSizi2DDEzpO++Yyh2nrteNhnQfNALdf/2GypNEV+VERUWRdRd0LkrBH70Vy+9z1P2hWfm/7tHj9zZGBtIDHHvSamPhmLnhe1bfz+omG9ugPaZqlR0yV3KUR2Yq7MpcuDcmc81hXeZmnWNlbiLdkDnfVHj1lvVELuX0oPViqmRSwWEyN4zowfBMhchKkJe5EhiTyYQqhCNBSZSzKtDyWf1cDNbAay3LXDim6xIp5WSuV1OKhi1Z7XXlu/p7GyLayPHmnn630tDv+p7ey4zp5zo1VbyFDW2Zc101hpLVDTKXAEPl1nFdv7qnUi5T1T3eiepzRBx9bxEHjIV5fd4ovKNoCQykoa5zsgwKxsyG+0pdMxsktMdfPFBFmxrq+tdMjeVIW+f6Pb2fpuoxHMOQ3vPSLpVZw6a+k/hhOlf3VO7ko/q5XlvPobX13S2B0WqrugZ3xnSvlrKqaDth/Vw+OSFz/bres5PSZ0t2dH8MIiA/E/p+Wz29l46jciJo63eXXJ1rQ31VUFMdkgHDzB2q6q+ZroE70Dm/qvK45ateKaiYMK+h3929qPc3DOueBL/MwGc3BxAvzX0+F/y//98fPGZkIB3k6EfNgr3OeR6CJ5GMchBNFFQQLMDmSTT0YCSG6qV2PDWbl/PqUq2Qee2oB5lNqZeQiqpA21WGHZrdKVOH96BhpK/CphrSA2kdVTAuRZWKOueCsF6CyFWhrAZrt6iGyqCt6xKpqwBPZvResqAQ+kMVkD9v5WWu3lOBYZ3dMlWAhqP9mGrPmaH+XsrR5y1H9PdWA71uz9f9HAHjz1/VyFUjq8bLZF6Nq7an6xftqQc4nNK51bBGZ0KrkzKXmlalGOpq5MTMLPBUfA57ug7NvJ7hXkKlfc/T898FpRUDozBSVwOkDBGGZEK/Gy6qA+ceoXumvetO/a6re3+ppymLZEAhAVXwsYL+Xrup724Momjltn4uFVVncnVV30ekqIaFF+h7T+RUFk039YzEPd37/bDurWhT18Xp6T7w+xrx6QzV4BqoaLMqOJ3DhMqnIUREw6ZRIGdG9+TEks7VanoWElE1pHoeGCl9mAPVMBjoHhpqUM5i+26Xe+chXjdGBtJBjlbVN2evF5fL6iHtBxBhWFYBWQSvcvdQD1CiqL/nQ6pr+6oqmdkYpLUGKpQqWf1ct6dRoH6mKnMbW3ovdUiPxCAi0AMDpOfqNl1Nq3AYQkQqXFPDbAACt1JVQ6V3lM5lTCMgCR8UN5ysjAYX7NaG/l69qcba6kCFdVpllzV6+s5j4KVug7RWCqIVGQrj+2rkVOCdL8E7Sm9SxeFAuqncVoXgQMookVLltLOt+yoHEa7BmP7erp4+b9bjzvOhcT2H1lMl2I/qWW8OICWpy2W1EHwuAsotDYY7RZ8Guj8aYd2/ve36Xd/TRYxGVRslEvog89WqzHVMjboQOEMl2Ec1FUWWieoZGXh6f46rc24NUi1p3ZeTYZXRzTFIa1dUAPQ7illt9FQWRfPwcBGIPoHx19Gfs3BT91BqTO8v5INegRTgcEHfWz2u5zAaUSes3ATBuAGcv5buP+vpdxPjKncyy/B+J9YbZr5n1r5ZL7HvGBlIBzty3rpEaLCkwrRdVGEfAWMj1FbhMJlTb6IHnmsYlEw+o8q3A+kWZ0xD9vFAownVsm7GXFzDy9ku4Iggn7/QVWEdgfz7wCDtkdE1HYR1TeNdFQ6ZoiqT7gZSdnqNIKbedrOgEZU4GBa393VvDFOqnGZygF8AQ69S10hivlaVuV2e3kvMSOmrYOlDpCPZ1xRFFtLDuQF41hBxmAPP0MlBmiam97fQ0T1U2KrXXfJ0XwWLej6Krn6uMQWWqJm5BdVGiX1j+WbWh4jv9pCuw3GQLwAdY0NwpSMhwIrAeWi4qlRjHnj1Td3TUUeNCL+i698Bg9L34CUTdg8iQxVIpw0gStBY1s+lspAOqsL9TegPToZUZi221QB2AA/ZyoGx1tZ9sDnQNe2kQE7AuoRX9Vy7fd0HKdDyrSV93lxMz02tpkbxdBii1HFdg9Wy7r8OrJWBc29hvb9UQp833NIz4wFkMFtdH5HyIfJEY0QU+QBHvV63XC5nT3ztlLmxew5hEzZUYqgbIJHWDT/Mq0Ie1iCMHwaQHKTOhmE1VBKQAuibCs1MeJvMtWNHy1ykcavMhR1dg0Uw6qyhQrOZgLRbRD20tKknF4sC3sDRg9Zr6/todjXKkvU1Xzrl63OUYvrdlZgqwKqvyqQMyqQHRtMKeP7hHRrJaeZ0H7hxwFL0YP+FyejUd5QOV/VzEEEqHJaXuTDYoX2InPQ8vUa0A551Qp83CYZtt6dp31BO31vO1fRQOMw95kIZXf86vCejwg3AWC131Brym6qkh54qmWBVF9FLQpo8gMhQFyLXRGQPaTLC5PmQSnZMv5uGLEqtqwZN4AF+KQnoXFgXsPkRtxJVv8c8gJ6BmWc+GLFRCICQTs5DZLje0+dNQQFPt6jrHAzVYR0A0D/hwkE0lXfhsr6kMBhIg6bK6D4UjLRh7UMxXZgkyAQ9CWYxqDVx4S3ta5sGw8CC/xxYrVazbBZgJ/9vjCJIBzmCesaC6D1KbqKnAqPtAUgTsDE5qHarh3VHjYNsaET1EDhx8GISKqiyd2pEIFJQiQFwKPOKqlDKHgjrQCVuG6q1kkt6IGdiUPkQgpupag6rm9f1a7ZVMaYndVE3dPXg1D3Fa6VA6zcdwJOAsC6pbWrzkLIL9xRwn4yrqxRUVMg5kNvruLouKV8Fc2mo72MFIlyFkj7cMmiTcQNDNKn33KyrwRWOqeETBU896qgAd2D9+gPdQ9WsPkcyqufDzKzbqcpcHMDNHuDqQmEVvZGePrMLYImwr5GrWkIN7Q60cuwNIGUaBexe7g6Z8+Y1mpjo6HVbLZWBg74qwWqdBIpOGRggBilKCI5bGKCPUBRnkNW2MGjkIaTJacArsh6ch5U5iI6BcdAhy+wueOAelHpN6DvyQhDxhaiyN4CoZlQNnyq8XyP8cwRMDqiYhMAQ/l4a9otbgajSPiI6GAS0XWSMDKSDHMdHDrVY5J7duwusl6mBRmMGoFTjHX0d4zH1SLtpTX+FIOY8gPLJbE+VangaqogAAEgbdB7Sgsmwho1LPhygvhpXbXAd6nWV9M2CrksvorQGg2CzzKWT8D6a+j4W4gribUEVUCWm6YjjQofK3PykSutEfbvM3QpCqQASozihxm69qa5ruaH3XMyqMs6GtshcK9ACg+xGACID+HdzANVzTX2/OwCsnxlCxRq4i420FiJkVnSjeoB3CRd0DQaOKt56HZSOmSVAaS0mdb+lIAJS6YPHHde1icIZDkPqLDzUtc7dBpQKcI3qEBRjV41RS0EqaQ6s/qo+m0FEKqyZPfMJD6/bwwBuaIAnNvA1rAbGSw6MsAwEWTrqH1mLeopD0CsPywJFk9YAoDX1vADkhoXTul8aYPjMLulCr5T0IjEwQAYNvfAY4VgBkO2D1UkOTQQi/4WYWqdHQmVbcpM+x87V9b839Hy70YAXYp8xMpAOctxQG67jQSpBOD0Z13rHKQCDBiAJ7nB1M/qAb5mCSqVMCoDgmm2xhaFKepAD1mqpBIKslrVNjZe8p8/WDunnllNQbgvKN3B1zh1TQRAFHqnShIahD+3pWu2aVykHGQVLevp+fxXaLnPNlgqMtL9F5lIZFQ7ZjXCYSTK7+rlDslBlB2naVTAww9P63tIgcHNQWdUdqHhxY2rUbTTd4y2QTKGkuvn53WqMDzK6KSNxKFdu6PuIVSCNBxVJZmatCX3mdP/XMjcHmLwhFGSEVlSwx8HPrcNcldJuoPQDCmMOIMWwADmiACzCAH6vC1ZJH/jIomAdgHEQAtxPxNGzSVEbTH/BXI8C0mAXj4NRklN/y1oQdISstgHrBRp6ecDk5SAy2aVCBigSmIvrPh94ajQFVO4F8skvqv6JpfW643V17nMRiLb7ujApwEgdAgVLHYBuLDdvWvd/qn6jMTKQDnKsZCIW2SvFVhtU5TNlcJ92AOCxECOvOS9zm9K6KZJtSO2t6ib7FWwoqwC3j17WnLZu0HpGv5tO6WEBhgDL1jSatQE85m5GJdo85NBTDb1IPq3XeMIpj5e5Zz7sHJm7q6LRk3d8/P8nc8s/v0vmQpCnnwmrYdbp6jsfB2Oy04EKExDWBims1kDfkTdU47Sf0YhevqbCqwzGxgbQnX5HcT+JGJQDR1VBhx3FtsXKWgXUaqhH6sC6lOb0OSrzuga1LlRRApGqmVlrJ0R3yxTGUDHbSqlG9gdqeJbbEDpoQck8OAwWgHjvgUFDnnQEFAjl2Cm8A/xcmP8ioIkLSrqgxhBh48yFe6FwEUQdBg4ULQBYaYVsxDhEaFcBfO2qQT4Ah5pSivU4UEVEda/58HsZCGd1SmqJphzgbgLqCQ8q9KwNgOw2ALdhDZaBEqWzoypzoazu++2/horOjOqfzsr65w2GXHix7xgZSAc53GHLInuRbsXzqqASUfAgO+oVxYBkLgwl7suQf+8MwKPKaGSjuKQCI4CoQ7Wp6YxgqHWRW2TGrFZW4eCMqxJsF1RSDQEXchhEuJIRnWsDW++jwJNrQcrkM//2DZm77S593qUVVTCNgr5zp6XPOwcghFIKAMAdfb+hKcCO1HVfTYb0nTeAsiEcQPQDCESGEP5OQpVNBQz0fkON0wFwVVVcFa7uKnD9uGpwDYGkb7iDaAM0JtoP6R5vQ2VltAqK3MwCyME0YqCgViBaDIBsc0BZQlWcRUDDU7nbMhgM42BcNanMGhTIBOW/wHgk3QPVpAbEiWg0CYmNmS2BwdUG4w+q2HqAmzIfjD94tHCNqvF0amUCUqMG6zcHecZNauFTOjLu6A2GgXah1QF6BnCQaoEaFsEK3HMcbqYC79fVuSY4L7E6vHPoMOCA8dxZ1eftdQE8ti+B86iK7bc79lSxnf8Xr7foXumtfvR2+ex8XwXVlowetGAXUNsnVRCsTGjUIU9A5oYaKl1XX3dnERReD+4FGKiPhWqtcEIFbqKlQr3RBubWzYofKQB4PVpQpe+BZPaA+XZnV9eqCcST6TQpBD2QdUidNSO69nFPhVJAZb7AWxJr6nUjIOTIuXMgKpIFhuxBDjRCQg35TlsN6vpQQf2hrK6z70CpLpyPLkRTvIxqojAISGvq+4iE9BqLUF0cqeker61yig26klhsoKVP3T4UD6R0v5GOLgzUoWmW1QD08vpdF4olelCxhilEChbB72FZFxQeWBc+SAhbCtYRhXIGIkOgtw3SRghMiqnciUUhmgi8VAZYOxuA1QRR4BhkF4Y7oUUM4ZImgO4lqufGBSLbHjDzW0+LJRKAOyX70gBn50BadTAGDxIF3AdUg6NRMwsbxqUSzH0exAvMvmyjKrbf9nBSvzZnr5TDADylEkiRsgsEkBt0o7iQlkksQQRpVQXuQkSvkU9qtCMK4eWtRd14uXGdW4H2HpGGbqu5vAqRVE5P39gU5NUB2JcExt1EQhX3bVWNHMzmVWFlgaOoU1FMjgfPNgyB0ZlSAV7tQLl3Xd9lHLzowYzuDX9Z3+VUXAHjKw0VhuM5vW4Y2gUMfFVODlBKZICiogLRp2pXcUQVMGKzeY3yNVc0OlZrqGDuQQQuWtd9msrpnuzFVYnlgUjRjEk5zRScb4rbtkRZ92oXqD9WqqDIoMVJDFKrvYTuBWQwBYPBcXX/DppUWg8hgRUwaKbAoJkBC7UHBg1VsQEju20Ci7UKGh5AxubCPqLQkAPrFwWlD/ccmgYSYVMHKZjQdx4CJyKI6x7yAc9HrO0GBMRh4DfrOKp/zIOUbFLPwoAM1ipERIGqg86MCxXO3q/308gu7iOjaU/BGBlIBzna4YR5e+XD62FoLjuuCj6+S192ratVTk3o0JeI6dvN5dVqzkFj0EZHPdxWoJs2Rt7xLjVeDoNKoCJUG2yA/nHxRS0lrnX1uzkKJRc0OtHdofd3IhBvJIDKvzlUQRUDPqIQtGDpAQdNAoj7BuCRdjdB/v0uIEkEvpQWhNN9aL0BGHfzgaOkCrwqWVNyu/CYClx3txqiDoBDCxldg2kA0nsE+i5A5deYCtcliBrU/KrOAfneeAiii1rMePdn22pMeU01uirQKNedUsUzBo2Vh5OAKatpyw8HSAPb0Pi5C736Biv6jgcBhAlSVB8Pn8uBYdYGpupbQf0AZw9lpuxoiByugjIHI8KAisUgjR8j7liDaEeangNIZqFljQv4pTA4DMMspR71BoeQZgymIK0KISl/WNXP1YnXS6fwvU2oERvO6Fnwh/sXqbcVsLha8DlwzGz3PuvnB4adrvf9qfv9xGjc5zgm4Vt8r07mZSiz3LGiQtw33fCTgEeZHtODVu1ACsHV6gBnQYV9MgNVOkCV32xDGB86ym8v6+ZeLKiyLEEKMFpSoTkB4MtcGgwG1RGWgOaIlU0QkaqqwdoLqdFUn9e1L0C6JJTT561Cisgvq0ALp1SA99MAqq4Cb06gQmk8DZ3KoaR8cajGQQoUdAqEf3hFsUDhSaANAHBot6JrkAEAdedIxS8R0L/maZQv31MFXe6rkT2eBNLUJf2ci6zjRqrShnGIqAIg2+pgAMbBgA7A+y+pp+9BmsyBtj15wMZU07rPQy2IpG0FCg5ohDqs6UFMA+9WGCqkArADBhO6Fybbmg7aCcvcqwGeJwLViz01GDouNEyNqaOX3an7YxX0AIHDh0B/ODxC174IafcWNKYdUJltC/bvADgHomAwhMF4KcDn6LqA3fPnCeUOL30HOE0luC7IdyRR2pdmZj+RRSMD6SDHUpC36F6A191NPUCRoYZW43l9QSmo6upX1RIo9kDRQlphAKH4EJSGZ4A4MVZS5RZkoWdTUp/XBc+8AyzNBvib/oRG0ZY8aKlxuCqyOJBl+nFde3cKwH53qTDshXX97kjqvfQ8EPQglLyoGkPxlt5LKgKKMgKVKMv6ew5ECNyWGnXpFAglAEvefheRCOrary5CU8tJFZpRMAwWob9VaLc+bzr5S5kb87TyLjo8UuZOB8LVVk3n2lBt1UowBqkOKaw8pJza82oAzjeAGT2j5zASqPE9BEB8d1zXdRDSuVYN2PVLet1GDLBidd1bUWi63ShCz8aI7vN4XJ2wJlCEhDuall0cgvMyrfeSqeX1/lqav+lBJV8CIiUTSSJOVDm2dajvfAmifL2ynrlyXq8LDBAWQJGGxSBSB2SqFAZyoFXLoA59zaCYwDNg0KRUVxeMNajQM+BkoueAmggbQv5sMLmPnhr4ZneOeJB+6+OEiVlLJO85rBtCavlGgdK2EVfPq1cF0PKibopeTwVL1FNDKj+mxstERD39Wx0V9DM16JkFaR4IatsSVNn5LVV42ZwK3ERVDZAONEJsL6kHuQhpwS70cWpmgE0c6vEGOajuA3BjoqJCJAbVeAFET8aBuM8yuqrJBRBeUQ1hT1Z0EW4Pqvpd8AwHMZ3zIbTvAdnjIRmoSATMUD2llAixvJ6FDJTtDNqHy1w7pUrC9XQ/98vQVgTSSHEA3AJllJmZOZ4C03uO7pnchF7bjet1ktSENtD9Voa0ViLQvRCaAlwIsKB3gFojXoA0TxialPb0LLU6GuFKQBqlD6jqKET6+gYOYY40o8re1TF9jlwGQpFAGzJV0XVOhYgyQ39usQ2cbHCN1SyAqqkiNKuq2pmAdGlS13kCUq0RoGLoRDWXPAhB5wBgXh/m9brTJT04ycP1DPcW9cz04rqv2mnCZgEtzILug8o+EbgghMQTMkYG0kGOZDGwZOqeaFCpoZusk4RO9sBUXQQa9uhJ+ntF4FoatI6SuaavG7QKoNapngr62lHQdqILVTG+KsEZAKp3ChpKbnWgV09LvU9Sgkng+9gASr8TV4PQ93RNKQtyC0TW6MS4QDJZTd8ic2EwHLfBDw7uUqG0G8L9k1BdtQxkan4aQDRA0ulB9/dxoAMIQRuVRlHByYNFxbtN+YfInBvoO19e1jXoggfupQHf01FjYQqiC2OQlvoNReCAn8zMLAXLGqtALyzoQ+hRFWZXz9xdQ3jmDdBvD3BOlb6m3bNAY5DpABawDakpYDLPtiCSQ5xMgJfrd/XcRKKAkezqO6n7eq77UMFVAlDwzhgYxoATrjvqhA1T6nS2dgDIfaPurRB0VMhMHCZzg0VIl/Z1H/ie8oIlw/p+0w19uBWIcPd2aNI4A51uo1D1NeOqcz8BcrYPRKWR4haZaySgHVRX5dMCVKzVHV2Dxth6+Rl4Q+sjHfL6MSrzf4BjT5n/p791uSVT9wiJGtTHdqqa5nFq6jU3gdAvAUSCkawejAkoSU+mAUfk6v1lJzQse9N2vefdS7rxhjG1/ltQ0r+tvV3mnFU9fAVX8+o+pDg6ZRDCwMINjbsNHCDrJfXgOkBgmCqrQlgGGl5otWV5D5rkRvV5m6CkpwAHk3D1ffjgbVOGqA0YqX5T3+XEJKRpIFphkAqygQpIH0CfHSgCqgPFcAcadm6AXg1jwAnUhnYaIeAYy+d0rp7L6w2aWWPHdpmrRTR65fe2yBylTJJALhqGvRXJ6/NNAV9SqKUKpVGg9AhE4aDALAoUIVHYb7WaGnqer+drvqXP5gEXXCytirsxpzc4HgD9BKR5emAgWQA4LECZ9QOgP4kT5QhQegzVOfBXNKKaAKM/PIRWHgDwLnT1XvLLeoYbnv5eOq3P1oH94kOTxYWhnv8m4EknoWovs0EPgw+yKFHT89GoqYFEvK4r4+sNLr8/sJWP/GRU5v/bHq1W1oK9kq2FiGIi0pC3boBCKUB4PoBu75MAblyM6OfaacACACnfj/6vtkeIQbsAbwvgR2rq8UGmELsZNFIqCFoh3d1ApGuFNHB7gKmfAYoAf6g3s7uunmEbIge9lKYApwr6zivQGXuxBiSi0JQ1EwAfSUmNg0lQTlWoCKsCV5VHTZZCGv24E0SEDyXqEai2TEF60z0CSnVB4GazmqaZbKrwd2E/D6ANRR8qW8JQEr0bmH7zTfY0O1B6XXCP0A8C8/1WSKc1G/qOq+DQJFpqIE0AeeTKNGC7oFm1lfXghIAQ0QVMkxfRtQHKKfN76nCNJdXRWw6rwxAAQ0Aa+KGGE2pcTYCxXA7pXB3A0m2IWACNlyWhMjNo6FnqQml9BNLkeWjsXYsATQU4Bykghaxk9TyMAflmHAo3qCglXdSzfgSQEqcAlU5r2k5BUQ+QQo4nda3KG6C3IDCMHzm/XkZ7/b593X4in9t3jAykgxzZ2bQl947eLGyXzxSheiG3CSxfkF31VT0Yd5kqBaejlvkdN+p3O5D3T6Z1Q5Vn9LCsVnVzD/u6kUPg2SQ7+t3pDFRmgRKsgxezVAAjYqi/13ZVCIeAGDNPmGVgjE6nNCTegl5b+VVowLhdr1EDb7GZBOUEnd63F4BtGppGbgVF3oMqymAO8G6Q8uhDRCq6RVNsPWph0wYqAeAZ6sOa+kCTYH01PpqOGrvVtEYwEhDpGAPy0n4xr9c1s2xO12Z8RRVKZgwiG64qUAeiNgWDIoOyKvhfDRTIHAvUEUhCaqoEINlYVtehsaKfa04DoUxc1zoVUeegvwA91iC6G4Zu9H4RsFmwfuWhGuTZSl4/mAaZlQVc3bJWTcYbYBQP9X1EIVLXjqvsXXAh1e1DmBUqDR3AwM46et0aEJVuhp6N6SZwraV1oasD4PoD8F6uD6m4qspoB0grwxAZKgL28bC2nv/6Iev3ZJ/A4jBGBtJBjqX+jZbo37NhouDtWFgxPk3IC/tL4DmEVdhUoOdQM6rW1ZGw4aNH62ZcuE0FVbcJHtWsHvrEdmCHBUI5wGhbBLhCEhAdG9KXU1Dq7KtyivvwXYiyhDX6bZMg1C2hxmlrJwFG1ds5cpOuaS2jwnWxrUI4FlIDOFtTY9cBnqYGtD7vAyNwCJrVtnrU8RRChDsVXxEa13teAQXdhEq0yRIIdRBqRcBm9IH4qQ7vzSB9vRDRdclWIB9mZqGG7v0lYJu/c0HvewW40XJ9vXa6retFka88aI8+UMQk66poI8Cab4AByUGD0wyUXA0AZ7II0QRvVukiqLgymoFqt5iuVQq68/odoJqA6FMaCBFXBsApFtO9Wne1QKYfVfxXtALULlE1BKhjrwN7PxvR56hDA+AIRE9nIA/lDoCyAQhD61CeFoNrpMDxHgNMk9sHJyCbl7loF/rCgcMfzqijl9mnxVGvTVTxOkYG0kGOTc2upfaCcUVzKrzqDeiPA4DsQhIEMTT8CwOIJgYYi3hev+veBmWbQxWGW8JAB7AL7s8HoDWUpCeAXDBIAvEcKPNl+NiUp59rDlRAhoA/xAnpGnQSatBUliE6BtUfbag1jRyhxuQ8CM1CRQ/4OGCpWpA+PKKvtBA9qO5J1XVv7OhANdOUCr7NwEDtQfRpAJ5cYzqv19ip+34yBl45NH29E7hWSlGoxqtAk1zA6PhZPast6GnYHm6XOTOzZEQN8kG7KnO9rL68TAWUINAdLJriuHp93edRaCQ7XoDKMcDalONgfEPD6RCknL3lvN6LaQomHug7cefBScxACpbacbT0vCaS+u6GOZVjXUiJtYGnKXMLMN8DBUcUKuUyUAAQVQYTa3QghQU+ySZXz/BSAJVjYEi5RT1zMWhTFAdsVm6bOtTuIZoSc6G4puBBAQ80D+/l9Cy0Acsbi6j+caFqL2Z6Dd9ZL2c70HeSxshAOsgxlz7aEnvhYeIEZIzoyyhChVmT+paCN7wZyjbbPehXVtG5WgF4mrz/lLm0QSVQVzd8kASvDVIh/aQK+jQAN7eDYR+t6edqIKiofDcJYfedOQ0XBUDINwYVP31oTBuK6HWrveP1wiGNsgQpNQTa4G1vAVzHMmScGtv1XvIAmh+HVigJSMlEjoAqtooKzTaAPpfmdZ/m2hqtmHBUQHYj+rnHDVXxzkf0u0kg/atCZ/Ys8Posg2JLVIgS0iwAZu/ypEY2h8SMDDUG6YRu/mEHorFwLzko0rgVlGqqruvgAOFoHVoIpevAseNrmjGIQD9F6KdWBwxdJq+XWE3p+k0DGeByCNoo6c9ZtA+8aiFI1U6qIRAFgFUI8HJ9YHhOgHHaqOk5nIW2MUFGv+sArsuHticB6IZbgdqhRGmnrdAiCiggxkoqO/ItwKzFVaamgJNps6PvowLYog7gDcOgGyb2gX20Q6MU2+9kTPYiltqrGsaHkF8H0jwVIAiM5CCyUVfLvAYdBGuQmtoUA3JGoHBPO2pxp6GUuAl8P3no4zYAzEYfWOJ7kI+OQtuTLVnoVwSs3t2d8GwQcs6XoCIkqu+oloX+R5C7LySARdpVwTKEvlD1urqVcfDelyHVVQYnqLBZhUjfhf0CQrNtYIXVdW5larvMTd2lGKTCNITix3TfL7Q0ItWGNPI2T/dpoq7GSw/wJFMrKpjraQCRA15w7yrVvUcHlP5EQ/dHD0qvOyV9vnhUz8MQjAh4S1YvqZyYaEIqHtLuAyAchEyo7YbKrBJQPiThHTdCuoZDYIwfgEOY66mxNjMJsgNwjqs9VZYdUy4eG6h3ms+oIzCogtJvQVQJjDXXV9N2K0SuLauGz/JAv5uHvnVundoo6foVIJw1M60G4aFgrLWA+Cns6v2FpvIyF/GgiTcAsisALTFYv0lIS/ehZcqd8+uv2wViZRojA+kgR2qxa6nGPQqoPquCIA6QjfG0Rpr6UNGUhhB7rKweXxLSPK2oCtckWPUWAWI3MF7igJtoDyD33FdB6qQBgwCRl5ldwOcCXdzzY6oYh0kNV3egAWimqcZQsqqRtSnAKoSg83RyTt95DarE5h2NwG3J6D4I8nosdwMh2uQGsDoBC7C6DDxSBV3TMAg+H5oCZ8JbZG7hGMA5QauLJoTOwwE0eO4Cvw6wu+cmoUXEIjC+lyCC1AZrKKwVnZ04d7YsABEvgV8zYEAnAgDY1nT/NqbUuA2ApTmyApE+CPiM3a6GbGJmu8y5G/SZU6vHyFwVCkYi0G8rRHQWSTV4sw5AFKDcfg569UWAGy2c1P1bCumaVgBm4CbUgO5A651SDygzKrr3p0z3QSSqMrXeVZm/JaZ7NTpQ/FKbGkln1RjaCD2VM1mFAISBa20sC1V7AHyP9vW8xnu69i1oFBzK6v5LenrWy4BjpQxG4ZD11+hQDzcYIwPpIEcj6towdo8xEQKPxfN0U7hQ0dAHgRaA4TOEyFCoDZGSSWD1hvJ4AuIWXRWGLqRlfEjj7TY1NsCxtnEABVpUBdXmvirVlVX1OhzAVA/rVZnrAOurD255uAleKig7f6NqyiIYoseMQ2QIuJZCoLg39xXD0a6pYK4DQ/tkRw2zeQDcDgHjQ83fyePrAAbBzcH6RXQu2dZ9OoC0VK2i3x0sbZc5Ynweq6iS6ENH5hikggdhMKTMzAPMSyQJ6ULoV5YFMsU+nOESIa3hLNUP0XNdBiDuFqAxKIFM6EJkjtj/nbgaOUlTY2gIVAceEM8GoEBzkGIbwJqGZ3SzZqG1ShlK5cYh0twDwHgWANQxeEVgz1iorBGafl73dBoanvdgreqpvMxNFBRsPplVh3UMQOmRVahO6yqv1zZoAO5AVeFsDfjcwNmNQ/TJC22RuUQaBHwejEQoVAk11+u4EKTmaIwMpIMdsa6FYvcc1k5NjYM8lL12XTVyog2N5CTbGmHoTOpm3FDSQ7DQUI8g0lFlnkropu2vgqUfUuURKigupNhRoZQFXE0UGHITOd3crSTwuSyrUZcDQLEldP3C0HOIPEgr6T3nWmqoeFHo/QOtASoRfW+TpgLShyhLv6Dh/pmqvt+gDj3M0mrYdqFHzOKiKsrAID0Hdf6HApFluakWf32g6+xAe4luTHEi4BxbPwzEgpCm6ReBlLCt+77r6O9lITJhZlZIQvS0qAp02NQ9vQh99KJJPcM9SGGH4IxkltQIngypkdOa0neyCgZ5PKHnOpKD6B8Ay72Brn8YZGAOeh322mCEgcMVt7zM9Vs3ypwBR1k+qbip5k0qe/M5VdyZISDAUuowFAFpnc8pLnEOIhleRPdGEugApsHR2wR1NGMpDRe1gLdsNzRaHs/r3phydA26LcV1pqD5MvXtWRpStAgqfuH3Mu3bZG4ITben0uv3Qcsdpdh+NyOTMtsLIFkoQjojDN5OUkv/J3P6uT706gp8vUZsXAXkbA/61ADIMBsGoPUheliCJT0szZC2OIkntCppZ0efbWKgUQfPgWjbUD2b6QlVRP1+XuZWwetNAS3+bFNLdZNNNUoSJTUSV3yNTnQhwjUJoPQAyl4dTykb+lPqlbcSYOUAnmcFKhK9piq2ySw1v4QoZAUq5SDNkAfSz9ZQIxgVCIkvhNT4K0LqrFYi41QVTDYJ1T1N3VftsH7XjUIjTjML5aEEGogObahGSRQIMxNQ2ToL5D5VoCIYzoDAb+t1HVCM1JOqC/gsz9d3ArUm1piElEkfWMsh9OL0wXjJ6p7p+brOGcD9OBC6HsR07fNbIDIc0uvGm4AdravlvqOl72MRCIOLISAChWbkPdDUh49BNDaqFlK7B9jMsD7vsQn9XA8oJeJTKnd6qwD0D+keOhYifzFf31s1BlXeUJ7QWsrLXDapa797cf1+6bRHKbbfyZhORy29F86ln4dKIIgSGLToaG4GnpcOdGKG6GBsoIKlB/n8AVBa9zq6WUIQVWokVVl2O2rkgJyyQ5f1GlEo70xBmrECgOKqA0ZOQxVW0ABmWUiNOBkwuFyNYgzgxKRqmmZIAealBSXMTleNoXpMBVDK00Mfi6ggDWrQmwwwJnUgSWxCE9qUA6DKNOHdIKdQ172bh/YSceCb2gRkebuLGhGJQ4uYUAMY5QDvlkiowD0awvh3QkrRzCzb173lQ9TMz+n9RDqq4MtQ4LEDypizQLPQ8fW8JnK6ZxJ1Xf82NGDtJyi9DAoUwMMbYrqGnQT0EgS5U55SY7TXVzl2KJyHLqSrO4AFLHVO13txgNwyOE3mWmFN6UwCg/dDuvoutwN1he+CMe8CxQqU4xaSuteGId1XoQw42dCqKRVRwHgYqAQiqyrzN0AFZgaiY5S2TAIJrpOqylwVYAHQucSClMrFiX2csBbQOtAYGUgHORKplCXT9yjXMfDQAlB4INetD15WBsreq0Ds5oxDnt5TwUItDjxTQT+kJpTU2w0AlIlVNVRy8Gw3x/RzGVAScTC4ih2N7lTzGor3ptTI2QCA5xA0FA6AJXwA2JHWAE79UA0QNwbRAEqxOSoIkoDX6rd+od8FAOUY9VoFHJsH99IHVnSvq8rOa4LwciBNk4UO5LAuyVV9R8f09UGGA33nlaKmqnYPVPjH8mpULCR0/UpNrmLLAOh2R075ryYjeoh94GpJJPWMrHRgb0HMZwDRpzo1xIaowwT1HNulMms6CS1JoBFyx6nqvUDpfxQioBugk/04dAWOJvUaFBWNQ3ouC9DHblzlpwtFGrN5iNp6uo9uaoGj7FVlbjqhz1aahQKZsr7fYRNoNPp63crMsTIX62uUPwpVhSkC1/chSrWgcrs+0PexMQss96QzOyonclDlGS7pHl/19bqBrb/nDhQ60BgZSAc5Uv07LbUXhiITBrZP6DLf7VBjVSCAA6BrE8KZ7qoKrxS0OOiuqDcbAs8riEEbgD6QgbV1C4VNn2MbdD7PAd4j0wZyO8BDQdraWik4kBXNjXs5SFct6XP0IG0Ugq7pMYh2OONqSA0WyVOCypsdsH5D9Vz7vt5zGpTnAul3qHqK5nW/rBp0p1/S541DCqsAuKQuEHwGYDx7EK3oRDQt3etACwvwhA8Z03UJu1tkbucycIKloFrQzOaTKsSzHqQLqlWdA+WRB3Z9A36pIfDuZMCgtJR+d6Wje6vfBh6fmEY2o8AlFTRV7hShWqIAmI8xwCCFS/qOd0Mez/GIKFKNnBgE733oxRaB1HQGKuVicaiohd/rHg1M1dBGKd7SuUFXZa8PZzhGhFgz6phlPYUPBAPA7kF6ON3SvQEk5haiimkgEW6CvBtLq7PbqaljO+xrlDoBpZppYJVP5tY7TS1IU9MYGUgHOcq9TdbdKwTfBtI1F/g5YoBVoiaP1Trgl+JqhYeAoz8EzVG7EJ3opsHTrwMlPJCL9Qa60Tzokr5xUZ93cVJ/7zaIXKWBIGwMSAPj84qR6BZVueVAOcUAl1BzAGsTgdYPrgrNAQCP74zroe8tgpQDMHe0rgqhCJxWCSAljUAQ4hYHjGLAUiSgA3CEqopmdE3LNd0HAbQfSNSB3NJVpbMyoe8j6d8hc0Wo2ks0gGsprNGeQ5MqhO+MsYE0VdNO7Dsh4tt0NstcFvAou1xVluEsGLJLt8pcMq/7MubpzeRA5LeBgb4ADSST4Pw1AMuxHNWzfhjguOqwL3tNdQRSOej27irweKygkUPfVc/M6eiZW2mprJxvq6Hy61Vdq3RM58K79H3kAIuag/RXmByLaZ0bg2rNlU5efw86G6SnoHIUWjVVo0Be2lenKYDoWDMBZf4NfR8B8AQOE4rrDJVBN7R0TTN9qMDcR2a54NjTGBlIBzsS1XX5sglowFqDlhXteRV8GzZBWASa9lWgN1FmtwqlVoc4RYAcbxckcofgkY6psrxLHQdLDyGlEOh1d/d1cx8KZbReTsPB9boKtPCsCkiDjtc+pC0KcX02Dxz6O7vqZbl9YNf2gAL/Lr2XzpheZDivbm8JSHcGPQJVqzL2gU94Q1p/rw2lzmlgGO/3gJdmVffQBkjj9cAxsDDstRDgHDLAvFyDrunQn60NsKS+pwqhW9fvjgMGxsysH3m4zE1DmfptaX2WMrjhpVXdl30wPJ2NaiwPXa2uLFe2yFwVwg6lMT1zRejzlxvXdXj1kVoltg2yrTevaBQjB4ZFCwDK1Hsu58NFAG9YLmokzO+q8p1I6r7cBUbdkXndH1VwhvLQoX7nopbMrwCXVgZal+Qh4rPS0bPpjkNbG4AKxKN5mVuCo5lPAMxgVZ/Nh8h6AjIT1tL9t5rUdxlt6j4AyKW1AfJA5a55b/26OAOoLoARCoJAV3Q07nfU63XL5XK27T+/ZNm9AL7tQL2dDuADQqZK3+2o4ql0IYQ4AyzIHuBHBnovThXKSqFSKRXTKFUYwun1Cd3wG6FKLNRU9tpMoNKh1dU5Hzzr1TyAbqt6Ly5EwiyjB7IJaYEV03vpAZV/OgxhcuD2MPC2V3cDr0oIiM6gwizk6PvoAu+LD5V3SWBAzgLjdgW8ux4Y3uGGvo8iEEC6AEBt5CB9CBWT7aSej4B643V1/bY19Nlm63oGg3E1Plpx9iO7ZKBmANwMBmp3qM+3AFixldAv9ffCquADAEH3vV/JHHEe5ZtQpbdT8Tc9YK/3cxC5ApqKRFTPXGdS34kPXV3CdZVFQJNlESi0CAOurgvg+ngdejaWdK82++CoRKHFEVRwHgXG1WJYo51dAK9He2qULPn6zuspXdPNdcW7uWGgP4Hii1QADO1AtFnr6Tr3oHAoAQ56CPRUOFKVuQr03wsBy/1sQ4372D6xoFarY0869zVWq9UsmwVD7v+NUQTpIIdXr1t/r43aB+b46R16WHYC4eCqq4fFLeimbQIRWxs4XdyQbtopDxiooa2AA0zaO8f1YJQcCC8vqvTyQttlrkpNT33gPMlrOm18h3qu7QFQ749D2XVLFZvjE4ZGBWSiqMJrACB3ggf0AX+zAUDzBh5pP4A2FOChJQv6HBGDFMWEKoSFKtBHQDuDMPBwxUHHOkvArwUpqApUAQ6AS2s1pgqwUdA1LUBj2i1FVTBB6ziZ80Aitql038xK0DwzCdVGLWCbDwOfTnaDGiVDAJ9U26q0WmDwlmYP098DDrD6ggqtZlbfSRz4lw5bBRZ5iNp04npeCxTNBvzYEAxjbxqiT6DOwkB+Ghvq/TU36PnKL0PlKJxsF9a+uaLX/a+8Pm8N0n393+gZzkJV7BhUYsUDdWgGBeC+AsxVuKxruhoH4PsdAPSf0POVLai+iAAsIAdtd5z2Fv09aAfVBhzmInQJKMyvv5c2RHppjAykgxzL8UXrxO9RaD4IG4Oc/GQWyMWAKXh+WQ9BIqvCJqiqcA2Dx1fpQBn4kXmZ62QAML6o2yUSU+NgPqXh/ixUICyAFRE0FKczOwd8OlVt/BoGD3IZwOsbJlRgpHrQLwuMxNoCaHjqeA+B2chG3QcZCEL0odde0tMoUDhWlTkfvFkfGNCjEK3IgTDsQHXPqgu/B2X03UkgbOzrnhxrqOCLQj86F1Jds9DTMASeuttXCy6Y1AhSc/mhMpeEKi8zs8SUPl8D8DyDpmKdyoDPGDRVsKeHagxNreqzLEME2QeOrUFPlf5gi34uCf3PsrDW0BvZgh70MAPL8zCoHDXAntVdLbSIueo0PQJa9JSb+rltPTX+cmD49Kahf1wNolRFAJFP6bucqUJlcFU/l9mixsvE1rzMQaDOxvpbZK4DnRL6UMzRiGvE5y5of+NGgb16CXredfUMr4BOytysRtPSEjgB0P4mkVdZOV5Vx3E5v16Pdr0Rk/bvZHT9rEX8ewRHEuAKK6bCplbWDVULQ6gW2FL7tarMxat6WrrA5hr2oBXKqkZKkrdCchyq8Vpj6u1EG1qJUgvr4SsAoH1X80idi6p3kgTulmEXctlQ0VDZrgovlNWUhz+A6A4YAlB8aC2wQEIVkGiAGXB3Aii4pIKgWwEPDap2jFoDANV+K6Zr1fP1/iahgW0XcGe7I/rdaWDr9QbQTwneb2Goz9sEr9KFCtFaXtdgBarGcjkFQDvEpGhmt3V07zc7EE0I6TpEQVEkk6oYIy19lgqk8WwGck7QOqIXBzwKcN3kXV2b9OFqGLcDSDnPQXUlFDysALbIEkA/sQpwhEFV5nbMqfFX1tuz4YreS62nH3TG1NHLxKBK+TaVd15Ur5HPqNxJbajK3CFtWHsQHQPoxtCBvmZ+RSNmTgqi2d4WmStAZbCVjpapnqmMjgDOZ2cAXRZSKkCdeSK81OdtAm3Fti7Ip9B6B7gP8oXGyEA6yBHrDy3ev+cF10PA7eOqIF0AJtOeQcuFXep9Bj5wusxAugravbuBfne8rF6WCzin2yLQqwvwAU1P12BrSO9lWwi8eleBjKmOAnE9WGeisa9B1+oukFDlAHhobl6mIoAjAviNzQATbB14WkLAdu6ltbS2UIa2LAN9b00oX+3GASMFEYKxQB9kPK57chlSt4O2fvfwoc6t9oASIaPG+FwN6C0AOJyNANgSlNPinYDSBmqCKGBH0hmNVpqZucBSRyXQYU8N934HRG9Yz5IfggqkIqTxgGo5sUHf8daQKv1GSqNmAzjrdQBQJ9sqd3qH6VlKQOXjypLu86nlqsy5JThL0MV9N2AG3Qq0WwKFnCio3EmG9f46HX3nhbTuIx+KTWoVXYMjoMVRCUh6y4ATW2nqunQimg5OAFM1ZGltFXBOUUejbUWQ73PAXu8B/MKpAmYNshWb4RxCENgiVf3cAHB23j5dFtrDjv2j/pyMkYF0kKOXSJuzF9I/BBbyKng2uQgot7gqhUhOcQRJKA2tblPl4aU1FVcDHEwb+Co8MLC7BAQfqlcUBbDf7eCINHzIsd2iH2wdrmm3EpSYDUFZOn39bn+onshqUYVcZ1XXpdSDijUAm5f9qsxFQJCWoQqjAyHipZpG4NJpFcIRqGbqJ9WDbHX0GisAcv9xSRWHG9Z032RW39tCSveGs0sFLmFHcjW9l4ktusddKGIIx3RNS7MaIZwGL7K1qvfcjaiSNTMbo6gNFBks7VZtdCswLUeJcX9Cn6Xu6b4sghGciqqiaA7U+y/CMaxDiigEveLmA12v2Y7ujyaUSMUc/W4dehhaR881iEobepraa4PidiFV2APtC6Tolqzrc3Sbeq5zBd0bxxyVlznoDGKVnhrKK4sAAciobHOgefgA+ilGoXo2VgNnEoDWjqN77Vj/CJnbBWm83IQacF3o09mAwiFbUUfZBTnRgqh8I7f+nXeAuJTGyEA6yNGsBRYM93oh2bx8proRWjjMQeRlqJa569+i34U8uAf9cQIoP19MqAItldRg6ELPrCaknLKmwqYGTNWRATTnhOaDgM2z3rJ6HfWCVuikVkGZqI1oK1Fd5xpUkyVqQDy5Qa8BnVCsC8zXwRywUoNnvSGkBnV6CB4VsGUW4yfIXC6hQu7QHFBAtPT+5gZ63Sh4hlkAlhehs33rKBWaCSCtHIN17mdVgHeKKkjjgOVrQUuHRl33VRGMpgXYz2Zmbl3PUgo4bPLQN3AaijR2btM9U2+rgZrYoBGfUFsVTxiaS6cmQXa09TyML6kDZ2ngX0urwRsADURKbXQbApuzk1eFNwBW5SqwV/tJfY4IpArj0AOyC1GMmAdptww4okeocVUAjFQDouPtNjR+Bcc2BPjAaEf3ZQNIa6OQpk0CMa4f0fUbGPTzdFRmjYX0bDrQ1DYEUUNnQjdHCCg9uhXVF/6h+nsBkNumMusNzBCRw8EYGUgHObqJkIX3AtW2ysABBKn2IAXlwHXF3/THdVPcBBCVwaoejNQKhDhNCdZun/8PmevAwajHgPm6r4os3aWSTzWkdgw0XTXdAEr9tB6gXkdThW5EFVazqkKuBxVrvVX9bgSYiH1H52YnVGBkAKDoQRTDgIQwu6rCwZ0EBm/ItQ+hnDoMzWW7cV3TsAedu/tqMFT7UJWVVaOuultD9jMA1s9AafIyRBebAIY/FKIBfehzOExUZa5oUOwArN5ZqAI0M4sAxicVheqqOKRHTb3/mY0q7LPL0Lx19/+ROT+qxlA9qen53G4gTqwB+ek0vM8hFFo0wUhvAF1JAag/FqDNRlkVV6IBRKxAjDtoQouOmJ7rWFsjTVGobK3nNb25YVoNrs3QhywP1XND03Nze1v3xqwLdAoLus5Lm/ReStCc15mGSmjok+ZXIOWc0PtrQ/TuhrKucxyigZGQ/t5CCowraCuU3ATR9gSkuSHqddc+OMxuG1IaMEYG0kGO1HDFUnv1PAogfVM1qBzZqUph4RA4pDXgVarp7yWH22VuF0QYuj3dyEUoZw8VIKoEuecB8K9UwioI+hEV/qGyVkPUHaoI080c6QFgtKkCI7Soh94BwrHIEIySsn4XZIh1AZm/Ja3GRgRKUp2Mfq5pinlxYZ0DB5iqAeOzZJoiSgxUOWVcVWwueNEzYLSHHYiORdQAWayo0US4PR+a3waQ3tgNfWTbCU1HpDoa6Qh1gYC0BVVFAeRazKwbUeM2Aq26iqb3EwBhZr2hzNxz0OB4UFc6kKXdEEnLqJE5hMrMDhBFAubWehFdryKokE5RFVQG2nFUcxpR2bldn3cspfIpC21KklD23h6qU5dsQRsVWIOJQL971O163ToAhYdVMDDHVE5MALO8X9H35sfVaIqvakSvl4J3Drx0XqB71xnXfR70gSJgWb8bnoWquCV4H9AYsgsQj1QbqqgdPezREjT7ntcUm9Nav3cj7f0jihwZSAc5ftBfsPhe5cdDCIUisZ4PlRRVnSuuAN8PVActZjQVF4cokEEYteGrAI/FVBA0IHKwC1I/42AQtqDB7rCoz5ELgK6gqZs5AkbYyoqGtRMOGJOQ3g4B0VmQ1bRFv6saMFeGvkZjuvaVFYg0VYCILQv9z5p60yvgkYZA11GvKGvruqxCRC8bVsHXbEDTSMBhGKSMgxwo1LoaZr0wNNMEIsvmMuQ3oeFppXiTzFV7elY3AI/RUg86S5tZqqD7cmIfMKiZ2YoHVtP34ZmTuq7FDnCejeu65jcRhk6N/npYjZIYEPBB7YAtAHNzKqpRqkxI17WZ1mdLdFThOXmVRc2EKuQVIiGFM5xY0c81E9D7r6VRpWMCfb+VNNABdNUxGxQhKgItp0Idvb9OSuVYH4pN3ECd3STcS7SlEVUomLYKVLGuBoB9BCxgZwjOKaSmianeHdN9teqA82zQuuRGPeue6fnYWV0fcey1R2X+v5NxiJe25F4GUhNKuSPQJsItAZszlEM1c3oI8l0ox65BnyoX2Jd3qVDq5MDbgc7iw6ZuKmJaHWyXKQsXVZnXoVy85kDkIKwAwMacHtwBtEcBp83M1wO+eUqfrZPQqEMaOmj/pgSUCHeoUqVqwe4mjVhsLwNeKwGNLsf1OQZAFLfg6XPUoGv9LCjAFRe8sZ4qWUsCWHpVlYkzDwYXcJlE5nRftWO6povQ3Hh2Wj/X3A0RLiBIbYRBmQRcxVZvKj/PDsB+RuqKl5sCUHW7DtVuUA1JlU9N6KVySxpIIVcBI1XeLnObYR/FfYgMR9WwaNXUmJ/YDko1Cfu3rZHNNNCaVIZ5mXMg3VIu6nMAfMks+kuZmneBAyyi0btIDVLnzZv1ujNAu1LX/Zte1b3h5lSvtCH9HR9o6M+BlNgqUMB0gNxyrKnnoZOGtDZ0aFiF5tdxSGG3OyoXoy193sGKyqyVvr6PJjDpb03k1/2/Q9WvMEYG0kGO7u6OhZL3KJFWSptnxhb10HdqQKyXVQE5po609TuqoApQ3p2CNE8H+Egy0DE8t0tDl/0YlPRCyXf3KJmyRRDM01DtVgWMT3eozzZBQg6iSpEOdI9v6VytrIZADCoSFxw99KlVFcK3FPSdtzeoRz8NwYVsVwXkSlz3xjxQJ+T6AMIdKN6luwy8ReGqzAU6ZcEQ8B/AitxNAViyDPQMDWBeh/NBxHjT0F19CZi06+2qzPU1Q2HRDKTTapDHM7OxuBqADSjpn3TVENgBvGUO4Gp6QA3RngPmdqia8iANNV7Qd7Izpt5/ZAnKtruwDmFo69CtytwA0tpxICmqdSFl72kUPeGp4xiM6/tIdfQaYwS6H1f8ZwTsHg+A/b0YRP6BJLUSB3qBef1uENMomgc8Y66j1/AiuqmHvlrt2V3UABiavIaB+LgCEek0VKw2gdID9E+3rWfOg2hRAN3cw4A3bPt63fI+xL1d6H5AY2QgHeSo1BvW2asmvrJDJUEaWHOHtPLLejCqDRUOkFK2LPQDqwGdfLaum7EOhpkBK+0ydMaOAX9QD8pAw1N6MOahDDRShzC+B5U8ACjeDdGiDdNQPgMBnwXAeqX7KvzjMQg7LOsLaQDWxoh4crce+gBaK4yBNwvMPkixUPf12YZ9VSZBAYwwCNl70NTWW1LlmVxWL9qHXnaxXT+TuSWonvOjKtRWW3o+PKisdEFBZyuwglFVJp1lblZ7E5QYxwCY2sloCjYOHrwD4LhoR595OapGzhCiCYllwNqsbJG5KSizjhd1HZYgtW8dNXibTeDTgjLwKjQpHc4Cke0K8blB5CVUlbkYEHd2c2DQuOqE9W/VdfFmIN0K/EG7loASBYoqspvUeWmYepgN4JtKwhoU+2oU91uqfxLQKHjCdO1bwOvVA4qVJjxbpwVVyi2N/KcmoGLaA2MXcKcrIJ+G8Iqa+6Svex5Xpu47fq8G0qWXXmqXXXbZurkjjzzSbr5ZBeue8cEPftCuvvpq27Fjh5VKJTvvvPPsXe96l8Xjdwuxd73rXfaFL3zBbr75ZkskEnbqqafa5ZdfbkceeY+HsLCwYK9//evt+uuvt0ajYUceeaS95S1vsT/+4z8+4GfY3dth0b2iKKG+GhEBebSg8Cx5gkxFoTQ0GVZvZ576ge3UTTYTAQ4VkHsNYIKOAVapGFNhXZ0HwQd8LsUicI/A80bAc+hq1sgSHcA+QAXMAsBlgigwkYPDvAFwXbW0Gk0lT7Ejt3jq3W2DhqehPnjH4PUWJxT4Pt1TgQEtlqwDBpILKVlIptlhgIda7EPZbA4wa54KyBAw5A7TGjpPQhuPGDCCp9v6e7053ae7E9DHDVJVeWCfNzNrQsPkTqDr34XKNug+ZGFITfUc3Zc5YK8uQdq9BU2y6yloNVTXvRXqaeRgAoyhX29SedeDqq4u9PlLQPQ5tUu1WwK60Tt6WWtE9RwSfGACGqFWwTErZTX1E2rpOrcjUP3VBYzUEDIJ4Gx0gd8oBtVzaUhDUXppoakO12RV73kR+u/1E/qOcj09w8Gyvl+3p4qlAP38auCwhiA61gKaiQj0/YvBec021q99F1ra0Pi9R5COPfZY+/a3v732fwcqc/aMT3/60/bGN77RPvrRj9qpp55qt956q1100UUWCoXs/e9/v5mZ/fu//7u9/OUvt4c//OE2GAzszW9+sz3hCU+w3/zmN5ZK3S1YLrzwQqtWq/blL3/ZSqWSffrTn7bzzz/ffvrTn9qJJ554QPffjW8yf29Ok4YKoOq4HnoP3k8W5iJQFj3XgGoIaIMdzushqAGTaRKI4lrAoJoCsORdQEaXgYoQqjrztkPOJA48I0BGF4EoQairB7LcVWOjvqq4ifGstguIpPVd3hpoCjUJ+Juqr4R8FDXodnQuvqhGXRaY19sgWO7cqcI1O5aXuSgAMiea0CetpXNloLJw2lDdA3w43Qld04ipMvaBKqKd13UhRyOSUkXk5VVhedTGA9gBoxuAV8PM4hCZ67Shh5mnv9nP6LrmwMgJQBx6AIIuw+di00CpUNP1ym2AtHZIlcx4Tn/v6HlV3NUQNGXNV2XOXQInMa1zgyw4ifP6uSQwoydBfi5VdH9kS7pXf1XV25vdAud1SeVxAGD93GH6gxPqc1p9VY35/rh+t90F0sW24tPSEV0rfxZAynU1NrLABdesAVs3sI6noBqXXI0G9MGMwr5qkXHv6NoPerp+uyrr32+/8wcC0nYcx6amIM8J44c//KGddtpp9qd/+qdmZrZlyxa74IIL7IYbblj7zDe/+c113/nYxz5mExMT9rOf/czOOOOMtd+5+uqr7RGPeISZmb31rW+1D3zgA/azn/3sgA2kuFO32F7WrpfQUHIoul2/ByH/xrJWhHQi+sHclAqMOpQcx6CbemerKor5eeAjgpRTewB54ahu+ST1COtCD6NxwBZA37BmUg/kUk8PfQ+Ukxt/mMxlgKyw1fmBzA0h154sqeE4jKhgWYXGoxCFtmxU30cmoQqmAZIlDKm4UB7IGTv6e842FSw3FXXuIVHgSwFweHZRjYBeBlJYgBmaPQQavEJDzHIS9ktEDWUPSoSBv9HGj9Bnizf199pdAKOYWTgMbNOQroqUdX+kgfupX1Sl4AIVQSYC0T/ASTlAnLoQgkIBaNvRyUKl4k16vkAf2xKAeCMGXEsQBepBxeVwRfVDYhWA+BA97fXBuYpDo2DoRzyVgubSS/p+c3l1/pZUlFtnWZ9tBboiBIDD6oDs9TO6BmEgOk1C9K4DUa+ZrhpD2wNQVACW7kNmogNVmbGd4ECMqUHTnlInttBWZ7c6ACAhsI4n94lmR8jzgPF7N5Buu+02m5mZsXg8bo961KPsXe96l23axNT+p556qn3yk5+0H//4x/aIRzzC7rzzTvv6179uz3ve8+7192u1u9M9xeI9J/nUU0+1a6+91s455xzL5/P22c9+1rrdrp111ln3+ju9Xs96e5X71v9faXJxsW/xvfTKXEQjDAPgXMhkdOn7s5pWWJgDzhlgue4EQCXg6SHY2AYOi4QeoNsnoKprCFUTrqbJdoZUcUxAWWW3r6nUQQ9IA1uqfGt59U56EE1waiqpGlD6u3Ec+osBsWO7roq7D15vIa+4hHZbf28Q03TkzpIqGB/aLaQ8FZC9qgrX8tx2mcsAdcKRzjF6fxMgqLoaYieiyChwX20IVKPGHAAnJ9RQPnwIjkGgUUMfQLOTNb1Gc17vr0wkrNBXy8zMDeve9/qqLJeB5qMb0e+OQaq7B4YA2X+JVd1HfhIqW6GL+RAir9Fl4NMilmbo6ZfqVGVuENZrDCf194YrKvvDpmvQmYFGwQFgLoHmow9ysZZSedyKHS5zNlDl2weerPBhahQPAogCR9Qo6SdUxmTa+t1GA2hSkkCWCwZ+CTA4q6tqPKcHapyGNoDBCv0tu0t6FuppaLDr6Zp2gWy019DzavGq3t9RsK/2cfT64MDSCAUBSJjf0fjGN75hzWbTjjzySJufn7fLLrvMdu/ebTfeeKNlqJrEzP72b//WXve611kQBDYYDOylL32pXX311fhZ3/ftaU97mlWrVfuP/7iHLbpardqf/Mmf2HXXXWeO41gymbTPfe5z9oQnPOFe75XwUmZmr3v72RaL33O4Yq56d3XoztyARp49KHtNxPXghoBtugnAtAYIljw1OAQMajwCKQBg0i0DX1Ie+uiEPDUiWgkQIuDJ1aoq/GOAHgxDvjwAjFQIWnlETa/Ry+hzOJAqAOiTtaEyKwX8PB4YdWHA1VQhJRaB3l1lwCDElH/QDnHyMrfhUH2X0DPSenO6YULATh5kVAiXgp/LXNYUU7cLKvSywKG0AhGbRB4iHZA+jG4CQHBZDbhoOC9zZmaZSb2f/CpUBwEZqBPoe3cbVZkLgAKBwL7FCWAtBubxMEQEu9AqvttWA27WUcO4kldlOVyASqWCRiLiMSgXb+tzjGE5AoCMoTq1OQ7Cbb6q15jUM5fx1eAKd3RNG4Dd64f0c/mmchk1ATcVikEBBVSE3TlUJ3EpBH3IFlTeRTbqnoz1gNohDZihmK6pC/2WmkC+6wHPXQfoO0IAoegvA2kt6DjgarZ2sN646rU6dsXTXm+1Ws2yWdXPa/fx+zSQ9h3VatU2b95s73//++2FL3yh/P273/2uPfvZz7a3v/3t9shHPtJuv/12e/WrX20vetGL7G1ve5t8/uKLL7ZvfOMb9h//8R82O3tPeP6Vr3yl/fjHP7Z3vvOdViqV7F//9V/tAx/4gH3/+9+3hzzkIXhvFEHauHGjXfTmsy26V+fENjCeJomIIqKCtL8CxhV4w1moQEhAb506lO9OjoESBOOq7QFfUhPcawCq+mAwhAHc6PnqsVQhxVaHRpKRgmr9lq8HKA2h5HRJlY6zrICeCuSyq/E7Zc6AUK400H2QGICh7OdlLhvTqNcKpcxDGt3pQxf2eFWF9RQERepgNCXHIRoI+ft4Xw3MVl2VZ7ig1+hOV2VuwwpUpzQVXzWE8rR8Gig0xjVV5YIxWQfm6tgktBE3s619TacHsIZRIDVMQ8TXC4PBllLx3IUOp6Fl3eeZAkRP6ionJgb6e2MbAEvpKa6uCxGkeAxSMENVyL2B3kvVAHRb0nPoJPU9bQSDprNLPa4skLjWgVSzH+jnogacPVUAWpegyzxEQOMgnyq+yt5mVd9Rra4ya7Wh57+d0fdRgKKZFDR+duLAM5TUCFcirHutAqlWi+VlilLi1H8TOGutmdY1zQFFiBddn4rrtbv2N3/ypj8sA8nM7OEPf7idffbZ9q53vUv+dvrpp9spp5xi733ve9fmPvnJT9qLX/xiazab6zABr3jFK+xLX/qSfe9737OtW+8B4N5xxx122GGH2Y033mjHHnvs2vzZZ59thx12mH3oQx/ar/us1+uWy+XsCS8+0dzoPZt8ANT28ZB6pX2I8LWTUAmUgioWyDPHoPpmAB3qY31N/ZSzmsfN9lUJFg2ECCC8a+RF94FIjEi+oLwnAjiCELSi6OZ1DSaXFL/UCoGHBv2PhnVd+xpElSI9fefuzK16f9B3KdvQ7+YaeuhXgWZ/I1SqVgDpH4fzH43rl12ITPTnoMM3pHgjwN0UiwCOBdjJc5HNMpfpQAPLrp6PHKSCVym1BP3Gei4Y1FE1vCtRwGGY2UxSDdRNwBUWgfXPAslOz4Oosq/r5cbUONga0n1UhurAELQVcgt6HuJQXRVE1aDpJvRcR0zBRa26GhYOpGAtp7LIaUCUIKTGcg+IshwAGWepNyYYdT3AYXaHgP+CdjxNwO7USuA5Alg/uQS0Bqv63UVIL8UXdW7B1d9zCrr/IAhs4ax+dwMwrzeAc8+aepagh7f5YdUDi1C16wK5o18FaAkUGAxy6y/ca/fsby644n4NpN87Bmnv0Ww27Y477rhXTFG73RZgZOT/WeB77LwgCOyVr3ylffGLX7Tvfve764yjPb9hpgDLSCRivg+kD/czWrNHmBO/R7CFAGPhDvRQFaGDbSYPCiqpLzuAbuXDCHSobqpwrRPOoaYGSKKjc7cBx0sXaIZi0BNpCVo4xKvqiXShlHMI7Ku7gSV4CuzL26BPWhUA7YmGrl9uQp+j01UDLpvTUx+BCql4Tecc6KG3AEosCoDn2wtgAANwuAbG3wCUp5dRjzQzpgKyAFED6t1XBSxa3FWjImgB91AbeKkg9dCch47rBUinAaYpAkq7WYLz27xd5szMyr7uhQGQM2brujYVMuKyagSHgW3eddVo7UHviNBmiO609JmXanrmtgITayyizlUM2gpRsDMb0jXop3RvVT11LKJ9iJSAc5WBnl4BpN1bsLe8OQB2JTUd1AO0vxPAOczqNUrQVqgF9AJuAP3KoP4rCU7nLjCG6oBzinYA4F3U8wVsJTZHjfqgWCKX0rMZgrPeDfT99sFa6xDB0UZd5yQYa/syf/ShKwSN36uB9LrXvc7OPfdc27x5s83Nzdkll1xikUjELrjgAjO7uxx/w4YNa9Gkc889197//vfbiSeeuJZie9vb3mbnnnvumqH08pe/3D796U/bl770JctkMrawcLdAyOVylkgk7KijjrLDDjvMXvKSl9j73vc+Gxsbs3/913+166+/3r761a8e8DNEwzFz9hJO/QkVQINV6HETAoENRngYSqoTPnQRB86TXnuLzAFjvfWzAGB1wKCpA2s29HZbBMxVKQ1l/iVIzw30XuLAR5Loq6LtQOpnYw6I2OrAmusCi3lH35EXUolR8dXQS7rQ2qIIQNqWCpFNE1CJAjiM9ECvcQtUysXAMMs19V7cAvDwxKFEGDzNsTFIV0G1S3hV328MhH/qMH3eQyNqGLTBqJha1bOQAQLIyCHAjbSo76Pe1euamfX9qsx121ppN8hpWvYwiK4ll/UeyyXAG0FvLWCGsP6c/t5sSs/mTE73tAfVfIUh9ESE7vH1GUjPhcFQgaiIOwSlGoG0J/BpDV0F+w6h7+Igo+8oB58bJjVS0ilWZa7Z1HvxgQfJC8BpigIhIvHmNaEtE3DWdkEPRF2dc2G/dGkNTNc+HVf5NOzeKHP9hK5zNAXcJADijGa1jU8cMiLxqDrybSCPnPDXR/S6PjD1w/i9Gki7du2yCy64wFZWVmx8fNwe/ehH2//9v//XxsfvFlw7duxYF+l561vfaqFQyN761rfa7t27bXx83M4991x7xzvesfaZPYDtfSvSrrnmGrvooovMdV37+te/bm984xvt3HPPtWazaYcddpj98z//sz3lKU854GfwEj0LEveEZaINFezxkoZbB3nq5q2fm43qi1yBqMgibO5mSUOcG6DTttfV6zqAkXDntEJvc0+/m4K91wHWxRCAleLZqswFkI/s+1qt4QLp4m1h9QzHhsrcDNQyNmeqGMcAo7KxBcD3sCrfYkg/F0tASC+he6ixW583BI3mjgK27mhKBcvJGzS9se32w2RuAWjHhymt7nFXFL9wAqxVJAEeHzR4XnUgzQAGaxz4khaSENWYAIbhZQg5RoAFusSsuw6kBgNIEZfBQN0+r3MbtqpxcBj0Z4z0gGfMVQGwAJiNamu7zE1CAUWfZAx0iXHiwDZtIIzgGhMRNWhC0N7HD+neGgIZqFH6u69zQ0i7L+f0utCX1iZrajRVk0r9sVIDclBogZEDTruaA2F5eB/pRb3GnQAfmIE2JW1IsXcg0+S2dI+7WVhTiPy3gFC2AYUbmUmVCZGens3UCkTvANMUikOE29bLzzB0caDx3w6D9Icy9mCQHv32565LsXVMDRAnqWH7INCoQ5t65qyq0nInwTNfVkMlAY03m13Ft7R8YG5NQhQjpdeoQDjBAYJFFwDKwxXgS3E1WrQdwtBh4JEBR8naLRWGxZiWEg9T6olEgSp/qgKpjECNjXHgFIoG+hyVJqRfoUKnOqXuYhUO+Rikbje1QPJN6DUiEMFOD4CUFAgv4ZZtN0S4whCZdIEZugd9q8aBbDSe1mvMhfVzKQC++sCFkgUDqQ9RDTOzBGBQ41DxEAUA8EoByrEhDZWG8u5CXe8xPqkR1SSsVy8OhFoD3Uervt5zZACePtCGdKAdjxeryhzoMesa8Jul9T2lDVrgwA/GfZXHfcBIBVGNPvehKMWLqyHgtkGFQsopDHZ2BIp13L6uc6OpRv8ypdj6Kt+XG/oc1MQ7Cw3Ku0BDMuEqHUi7QA2dVe/1oFKuUKQqIf1uGNLXua5i77Kz+nClfVowtZs9+5MzLv/DwiD9IY6Mt2DuXuHyMAhNFxp5UhuG6Ap40gPd8NO/0J/zIdy/HaI22ZgetCEYPt0hdDUf6gaNhIA8sqmbe+ipECYFP4AUxwxcI5MCDA2Elwd9ZchuAx4lCo1uB0Y4MTXMDt2iHmSup0ZYktrBAG/WDeAZ5iFK5YAyyaagzQu0W+hDGa2TysucldR7j0GrhkJf5zZWdB90Y6AA8xpNmQAn2iDd1IHS+K1xwLsAHmqQAOwTdCVPJoHR0MyGUPYea0KD4816lrLDR8hcCdA7N3qK92hCVZwDxksJ3nuzD5imqD6fD9HdIASl12PqgPRa6mwMXU1rFwAAPN4HriAwvnvAARbpqPEScjSSm4PWGxUg6QxnAY8GGD8XaBKKADJe7Gj0yfP0XmKAZK7HVXaEoUF5vw4tP+AMpwNqlg7RSkevWx3o4Rz2NNpeCuuzxU1lZQwMwnQcGt0O9P42Q8p4Ka/3vG+DjgihxWGMDKSDHI2pMXP26ukUDqlB0xuoAgA5bBvAg2y6uhlvK6jALdSh0qapkiUFhkq3D60G+rrhQ3U4kFn9XBSqDWrgPbU36b3MeIDX6urBGAL2obKs27k0C6FzqHbp36rCcDqtaz97yLEy14DGlOWwlur3gSyvMweKG1KyQ/DuJgFg0c1oNMsHTisCvjoAuA2WFaDcbmvYvT+r65fK6SaPBRD5W1YB7gJBrvX0GtRHNgWcCOkN+o68gRofngukjlGNapqZRUw98+0lPQ9piE5EgMPGoKR/I9DBVfL6jlsRVUYZaNsTBuqFsYRGMVdCamg3wJCN9qFSsayGFFTM2yLg5Sppve74ip6HUBzkTpUIyXRdSjnAL0LXQa8Kkfq8KuQ2zO2u63Okq3p7fcCTLkxCugqqYZZ+owa6TQHfFBgW1UB1QxGqRKMpNWhagEVzIQbQT6lxGp7I6xyQeUIxqE1FdV+VKVXYA307XL/O7d7+EUWODKSDHIVuxdy9mi4OIBQ6BCKxBhzc29Jg1ZLna8C4DVVYBqmuHnjXDrWEaOlmrPgqrSugVIFU2QZpVchRuOXbo3oySjmoXoJ8fr4AxlVbheZjjz5Z5jYdroZPd/gbmXNM7yUCPDIVOFl3AZu4bValPw6GFNC+mFcA8HBE168fAbwWNAAtA34ym1OgZWYrtGCJ52UuDFQMUztVgFehdYa/oHutBbw+GzMQpUrq+9hhVZlLdFVRbh6DEDBEK8zMfGgD1BnTe+wNVYFuBaLIbWAo2hbdSCfENGqzq6zKaKGqc/ECFGS4ekYmoX9XsQ6NWsHwcSeh5yB1fq7o/hhCBdcKAJkrAIKOAU3FhryuaR04u6JRXSsDMHICIkNdYOnNASVKAEtQBwb/9l3g/IEhVZvUszkNEI9QQYUH4WomSnmZOzSsEf2KaVrrrmVoIZJUDGwBqgBTgDsbJ24SR437/JSu/ZYe0Fa46x3W5r2cabnkfn1qNO51NDu+OXu5RzHYej5s2jhgd+aX9WD4xJANfciiG/X0xUGAV8FDmx2qsebHNOze7eghzUO1VhZKQtodSGHlIE8P9xeJqUdVNKCin1cBvgE6hld+CZVttZv096CjvAeNS8dyei+rYCgXITw/m9H1a9R1XXqQp29Ar7gO9GeaGB4ncw6QjT5iGgC3AxWG9dWqzEWPVK8yDmSZ1bRGbSIR4G6aoa7uuqa1ZX2/+ZzugwT00MpCFdA4VGo2whCZMENQyfgKeP/AW9R2IXU+pUo6vaQK/i5gtD+0qHJnHNpJ7IS2Kb1FVSjLAKZtQ8skM/1cCMreHeBuS0BbjApADyBTY7GoXjcI63PcDl0HCsCuH+4okWhrFQggU2qcNi0vc9sAgG6QDah5+i5LGZ2L1ICctabXWAY8TTqkciwBDvruOzVSuqsNVYUxfW8Rwp2Z4mdbnuqGh0Mr1gEUJwyh6tnR12YepM53u+shD+0/hDL//wmjd/O8DfciikxAv6IhHOblRcDBbNHvZosqlGbBABlANQk40taLqNcxbGk6qF5SYdMCYTM5r8qj2obQEDCCTwN3SwFo8ePQDLZTUEzDoVFox+Hr+i3HlGuFWBczgP+KVvWrq8Be7RTUsChCL7atMypY7kiqAM/EVWiGEmpYHE2stL4KwzTww2QmVSFUWhpF60H0IwZcOgYGYRLwzq0CEO1Byx4XUgDFNNBRQJuHxQREU6AlzjLQBmSA7dzMbADVWiXgpuoDAaTnafTKddXI6YHBu7GhSjAeBycHvP9MFwxPV6NwWWhZ0QV+mUERjFtY1xIwo3tRIOqEyrEycHFNBsD309ffo4heDPp8JSE6kYNm0NZTgHIvDIURBsUcETWuFoF8cxoOySQYorfB+wjlQPZ21ClpQPVnKKMyaxqyH/lp/a7fhvAYVPKm0kAoCw27HXCaUsCK3irp5+rgIE3W1r+j1n7Wpo0MpIMcy/WIRfaKojSgd00nAc1Mj1McQR+YmzcCoNjpa2SoDLwOwxiUzAJB3SCh7VWcuOZb8kM1pKKb1eAqpaBseDv0bNupQikAsLQPfbSmgTtnAKzjC9CrJz2EVg1ARjmcgVx7DA4WAP5SbX3esQFwoxDTJoCgZzPKCzIJIPJgUgVQAqo/MtCPLgP96ObaEPmDppZWVOGVh3vOp6GCaLd6wmMAzHWhxUYHegH2NmjU5ZiahvtrkB426B8VBfJCM7NiVve+BwZ+LgcszRA59CFNFjECBetZX+4CBgwIPXNJiCrXVSZMwTUmQcasevq8oFMtBOzGgaO/lyhUZe6IkK5LAyqzJoCOYQ7wmmlodJusqwFX6UDRh6v350KlZw7SRlFH1z5d1bn6AIwcWNOtgP8chvT3GpCy36jBHUtCheOgTL37gMMrk5e5Sl8PcSGu73zOA1JSqCrOJ0DeURQYmhb3cvu8X+DWojEykA5yOLmURfbyAhqQyh6D/HECQhGJLeoRAAbVBlFN2W2sgkcAvdjC0DdsqaKCtA6KsTANjSkBM1SuqjeRdjTC0AT278SsliYPKnrAt0Gz31xPjY1kjzhsADQLhmgSwrxRqP5IQUXYEQAejADfxyCkhvKhRV1Tr6bPVgrrc3hTYDAAtsgFo6kBcwEAkWMhvT8/qt5nDNJIzRUVOeNUUZeXKQM9btZRr3zDgqYA0sAmHk3q+WgCwLuQY1K5iKe/2czpeiV9ADevwjMntsscBEXMA8WdSW/Re0lD4UEWSBchsrHaV4My56hCzkCqtjDU/dECTEnBVYerC1w3Q3B8plwgrZzUsxQeajUp8ElaJqbv3cnmZW7QBaZ62Jd+U+VdG7osRGN6OGOkljfr+rlRlYvtKmwYKBKIhqFAAaJoyTHVDeMQoV2BgpZYA/B4vbzMTQNo1YXCITcHfHhd/Zwf1X06aK+HVQwg/UxjZCAd5JjZ4psTv+cFOykVkNGyGhvlqJo+RQjZ14EiwAFQ4GBCFVQC3m5qAUghgTq+UFAB1AU8RHhCnzceVYE2DiHT2BF6g7dCC4wViCZkEkAGWFGP3ilAk9KwroHbg/fWgzYAQMSYhXvunAQePTQUvbOqVWIz43p/kwBetTyQedb1Gm1gbc+2ofu2D7wlC6DIwZ2dBvbagQNcS03op9TUfRWCVHUUjJfSBFRMVaGqaKCGdwG4ljZA2jIKncrNzMqAoYl2gOQTyo77EX3mzOyhepEwOBFgtOaXFVe3cU7XqwWGYjzQ87oCRl0HrpuuAqTAIWJXVaBdOEvpNPQwBAb1FrC5u4tALjoGGMQIlNsDwWclBiliwI7moRn5EDCIDsj3XTFgE4d7HsJ1/Zae/2QeSubrUDna0kbLFZ9IYfW6TQCgJ6DnXX6bnsP6OGQwwPlLZUDXAK9aDgqbVsCrKO5DfBwAVQaNkYF0kMOrJy3o3bOMPfCe/Ioevi19qCwAvEHBoCM6RKSabfWeuhDWhtSzRSDc317Ve4kDwDNSViOi2FWhXgcc1vw2vb8WdDQfa8KaQluGhgspuzIoCSjac8I62YfU6OFNXcA0kB/GtuvvxZJVmdsA/Cvthhoqu7pg5EAkJ7ILqAQgTePNANkg5EbcrhoWlbyKje0yY5bdpcZLIqt7qNtW0tT0spYXZ/O6zo2hGjReBfBQaVUmPQPAbVzvudNlHqTxXbo2kUkg/lzSfEYPmp56i9A7Ma/vvQSP5wEztxsAVqkFrMoQVS5A+X6/oo5UFQrvlnZBaxsfGjoXVMYsRRVbOLGSl7kw8JH1xlTBF+vQDxBa6swDJYW/U0NNq0VVrKkIYOiAl27YhaKKQ9T4i/mA8YO0W7KqmYQo9O7rAEayDjxDU2BY9KGLQRaqimseOEiHqHzKDiAzAeX7QUaNMA8clS7omnRUnYVgdv05CjAcrWNkIB3kcFtDc7y9DitsgF4WGslCJVrYoUobVfC5cVUUm8Gw6KvDZxWoqFvxdXPHQnovQD1kw7IecA9gOqvQODdqYGwARioBOeVmXEGusSSEjeEagD83H/hvapBmvB3WYDwDFVzQZypdzctcAojOGgCqjGYh/dUAfAowDKeS0BMJQN9z3Ztlbj4vUzYLYOIotAFpzeoaDNtQAbNFtWwcWjUsA7dMLg6RRMihOJA+pKZygwq0PQAySjMzf1axWA1Iy6Z6+sz1EETXgOvKX9XzPzdUZygFBIH9op65chdSSdDnbyGt/eN+2NCUWLKqxnyipe+z0NF3Eq+BFx/Re/lFVtfKnVc51oampwM4D86MrkEyCY0wgYbAXQGHIaMCZcEgjwdVxXYTpLCp2eFmiOSmwRnq6r0konn9vboahDuBRT5UgH6ULeCqglY3nquORbqj73KioOsSW9HfiwGRpQeA7FxacS7DpfX7ZdCANYYxMpAOcpR93yL+PQc929WFT/c0dB7frMojnFYvawi59kRDf68Ppfoe9J9KdLTkexb4eZaW9QD1AhX05t8gUxMxvb/JrHrRHWAidklmxgA4G9Vn6zq6fnEIyzYgktN31AjLQHVfBYCvdIpSCRWu1F+s2VbCwHQaytQ7+hy7e4BQhBSKP9BKtDEgEe00NWrTBg6VHqR9dgdQ9VhWj3QMQtuJnt7Lzvh2mUv71E0esBmQgvYhpbg8pu98I6QPd0dhnc0s/xs1oIMilMen9B0HnhoR84v6jje1wQv3NOK2IwWsz7t1/VtwvsqBKukIGJ5OQq/hgbwLxoCgsqfKvA4RrlwUutYPIf0NxRIFT8vZW5Da7/5cjZw+VLs6aZWpEWgyXFdfzbYU9F4at+u5yWQ12tE9Uu/51iVV+i5UtiWGKjuAncU6JaU1CTvQxHcZmlDX9F5ikO5PQkrRK6o82TWnaxpA1XOtp3CEzO16BtPQyLi5z3bpQBspGiMD6SDHJjvS3L1LOomh+FDAG1C9cxp6/9RVKJX7EI0BNLdfh87kQ83x+9DkdQnuJQ4NBAthPX1FKPN1o+pRLUFYO7Scl7kUsA6n46DM61WdA0bWDK0LsJ0vg9EUAcxLBqri4p28zOWAyDI1Ae8X2M6HGTVOJ7qPlLl6Uz+3q6uRjkpVrzEFfCRjCTXkHTAYBmVdv0Je92kL1spqKtCSK/p+hxNqZCeXoCWOD+XPgFVodXTu5pbeXyYCoVgzW6ASGqia9JZUg3YnqjI3DpgoDyr35gJlMk+kFMfRKKuRk/b0WTI9ABRXdc8MC9RcWgVPa0kNEC+pBs0wrO94AIz29Qld0+yUqq5kQx290Ao4PqB8o0DPsFLX99EGxyINuLplMA6KFd0vIUh19b8BFYTHqFHsRyGVCUWxPcBhWgkKRuq/lrkACD6LgJuqAk40HIc+gmBQN8IqJ4bABRWFKuAuVPzeFVJHObQPeWQXCnVojAykgxx+fM782D0vM6rn2wbQXLYeQKl5DQypjAq0ADjr22XAJQ1VMAdQFl2v6O85VeijM01VTroZFwAcGs1AE9AyAFpL+rx+Sg/LoKUCww1p37VuhGgSAFvQgC7YUK3RgFByJ1DLogdlvpGeCpZuSb+7EcCrXkqVfsJulLn+1BaZm2rq2td7anS267r2hbx6wvWu3ktkqEZOoQXcVwF41tC6wEtoBdywrRVJqxAxi0ErmWhMo09tCPeXoJS4BtwtZmYxwhHVCfenBkN0VX+z5qtM6EIkspTQvbUCqbOKq5HXbETP8KJXlbk2KPiILqEFc/q5PGB8/LuU8qELDPSNpEZUIsDqX74dGiETzhHILV0gdozOqex1HJUxSU/feftOvT8XSCtrbTXmQ01NQxXGNLIW2qEG5mAWoixDNTbcpt5fsqN7qBUC7NNQ1yU7XZW5yEDvL2iA3E5BahkKMjzAUnbjuqbpMYV4FAETZsPquv92wMijMTKQDnL0NkzYcC9uhzwc+lIXQDkQ0g0Di2zb0UPQWFSyskzvFpnLJgF8vQhSrgUbD9o6eDUA9kI7IAupYdYAFtkmUAR0llXh9RbV06dGrUFTnyMEYElqx5EAwsa6o+tXrsHvhdSwKMLv+VDmm7tLj+BSHng8ukCEEobYPrRl6AOmwQGSztpAFdstd2jE8YiUGsU9kElN4BnqQBl89Ai9v04ZaAMqKvxDQBEQQCOnO1bV0JigQpZAjef0vcAVliiABO0apld1rVtpPdd+G3iLenqTjQg0ah1CU1a4vYWu7rddXYjQgqdXg3YtMyDvtrW0bU8mokailYDSo6UG9HAZIleOrlWlCTxZ0NrChhpljeU1mlXbNy9jZmXANMZKIMdqauQUAScaS4BBWNOIjwdl7x5UrMbGoTweKjOzZd0dmyG9PEzo3tj5n9BaJQOQkQGkBSO6rzrhqszZrJ5rv6KyYzCvRmymCJUD2fXn3xm1GvndjFylY+5eXCDjGX2xIajCWBhCX6klqHxK64usOVCSCqWrvbvUq2xCe4TVVehrVtHfmzhCBUEAkRI/pAJoA0RjmtDWwcvpPQ+gW3asBeDmCHRxB+xDHiI+lTZ4fADInmzrkan4UBoOGI4w4CuGMU1ltMf0/iIAwh0Cbf8CKNk4YEzqCVXaBPq2KnjlwNZ7bEffURL2ZCQGyn03tFYAEGUnAp46SLAGhPFnwajwIUXWSykAmgx5MzO3SZQZul7A/2r9JSCqg4hPHSoGu2XA7kT0vQfAQ9Nq6zXCdcUMriZA8YTyMucBSWotpF5TIqHXHSzod+eBJiXs6Oe8gRoWjqtGcB5AjUTb0KuCcwWl/wPgFGsBKD28rN9tJiBCCDxtHeC0SyT1zKXGVbYVAT4QqkJqb6gOyCo0PK6AU2JhaEIdAs69VZDvdV3AGahmTLZAPnWAIDmk760DvH6+t172ei1wOGGMDKSDHEFxaEH8no2/HThFlsFgKEDV1G1Qnjio6YuM+/raVmog7PtQZdPRjeeDd9cCPo3KMnAygXfSAUv/zj41JFRQYB8w0BlIF6+aPpvX0fsLL6lSrQG53VRZjZccAXYBMB4eQquRZf3cEBiV4456rt3bVcH0tx4uc4kyAHgX7pC5egJ6ou1WxREMNSo3ARiYmG5TW0xrNGvjFuAPagLAuw8AT6CeyIEy6Q1VIbhpfbamr9/N+7rZ5tK6pu69cKZ4UegNBZ3TC0l971HomRUFSyoZUqVVK6hS6AHWpjsArF2qKnMGvGWBC+XngN1pLOr7LGY0bbTsqYwZAg1uakGvsTShe9DpAXygoQZDBbagP6n35zX0zBUT6jiWJvRc9yFaVImqIVAe6hrEVvQaNdPvZmIaWVu5S9PQMxDlmwV6jHhYn3d5oAZmgfbpGOzJquqkMHAouV19IbdowaR1wNGYnlc9ulrQPdRM6Z4MQuudtV4HnEEYIwPpIMeuVdec2D0vPQUtaRJgDFWamrqIQ1WSM9RXVA6DIeCqR9CpQSuEhh6+6Sw0/LxDlUd5AwDnFuAQQIXJFLQu6fU17TZoq7DeDpwYaQDTAqmquVAB0wTPetED0sUepAUcVTq5SSDuBPrqNHQeruf1En5YldPCNgDmpwGrkNV90ILKO1LkLjTQDkGj1tmCKpiOq+uyWNaqk3pUAcZT0FA4AVHD9phayukVvZfkQD/Xc1Uh1PP6wAnAxbWpq62ZpXy1FAszEFG5XdMKsSndH/MDKOXuQosO+Jwfhn0OrWNCsM8D8NbTRb2/DmALI7CPwjtUZgW+RlmaG/ReYsA9lunpmoYdiLZDw95eXJ2X5RBx+0AblTi0H/L0fQygiWpttxoRiYEatksx4AUbg0gitFZKmWLyhuTYJnS/TAKgPeHod2+HiHlhCEUQQOYZqUNqeXW7zGUTKrNmS4on3TF1tMwt15SaJNaF87pPaq8PPQlpjAykgxxZp2yOe4+A70LJvEFjPB/C1QuDqsz1wBreNK+pC3dMP1etKojXhc3dakGjW8iNO8CkTdVfk309kENIxZVrwNzaVM9hCODVRcC3+Lv0c+MlNQ5qcxBiT+n9AR+fxSASEdmtaZlODpqWpqB6ZpfOuS6kFMmIoMjGEnAjQelvLQR5n3n9bm+TKqw7gCtoA/SoyhaOkjnq99aGvnWDu6ArOYD1K77+XgXC7mNAoTFYBuwN0Bq4HWr4w8Z8c5dimMJZ6OnVBwZlAH07gPFxk3qNPhRLJMLk6etae9O697twvrolAOym9J6DhEYxLawRkBTso1BQlbkmELFm+/psA9CLPvSKmw2p41gDJe0sqYG/DE2eq9uhZHxCb6bS0fXLA0YqmFNjowegYgdIHGsAjEtPqROxAlHWBPV2jOk6t8HRC/LArl0BDOJW/VwNml9XocI5C2c9HVecUwIieonC+nXpdkZl/r+T0WwF5uxVfhhdUoXXhQaR7bSmiDIdwGf0VZkvJaHCB7zANnWUhjB5BHiBQsCrMtEET25Ojb8aNLoceqB4fPUSetRMF4ywwkDB5nOwBnN9FTYhT787AdisSEUFSxlwP/0pMHzaephd8tTBwx1C9/cYVEcGEQBkh3RfOUU1WKPQ1wj4DM2ZhG7o0Ku2BgZhHIg7qQy+N6lrNZlSb7Ff198b9PS6xTyA64E6YaK5ReaaPRWJE2EOx4c8VR5BRPd0DdKKkXJe5uIEagfG/W5alWC/qUphLqrr1RuqEgzD+QqD4g5ldQ07QGS7OlTXojhQBe8l9btuUpXlTAvA+WAwdACo3oG2J8t1PcNZiDS3PD10TTBsAzDci9C6yE2pHlguAf7TVGbFq/ougdfRNkxq1DsZAioBwNrNQzHHGDDVVwBLWalBk3bgHBh66tynIfo5hDZFDuCG+jHQewUVUME+xMLeqNXI72b02oV1CtzJQUVDBUL+4CVUTAWuA0ZOEvhSKitQGt7UDVVV58nGW3mZuzWigiAbQCks9IrrwdwAyll7bd3IXahKyoGXWoWqjnAIgHfQ+ydSUWUyP9TnndkMFAHQYLcD2B0PSCHHwFNqQsl2t6gElTbQcHrW1XvJT6kQXg3peysGuteim9VjTs1DyiNQ/Mc08KV4pFDBc80aEJ/WVdC7wD4/ADJUd1kV1iwAbhtjei9tKNOvQKTDzCyUB+HsqIGUHeheyEAboAGA830obhjWoAQ/s13mCinljRosaNQ2gGqtAFquhFoQueroWo9DqrbjQToIlGATeIa6vu431wHVFdXzYH01JjclVXY0obVSYkzXYHpSz1eqq7JjrqfGmtdXgysLXEEZFwgqD1GDptXXtVpt6lr50F8qgNR5DooqmlAcsiEJhg9wvE1W9Dn6G4DQFHi9ktCIe5Cpyly9CoVDrr63TaX1QKdee5Ri+52MikUsvNcyVpZ1UwwhOkEtpcNR3aDxGpWp6sudhN5EHaC2z4FnOFeAXDuEb4c19T4HUFaaBnbjSEVpCLqmwON4Uo2m5rLm6Xc7ei+laT1U0LzcJjcClmqoaxWp6e/Va6oAUzn9rg+ppIEPndThfUQ6IKiAciAK1YcWVqVfcNX7bB2qyn0wVKM9TW3IoEKvAcSiLQh7OVAdOQAKgwK0b3FA8zo13ac7YVnyJTWunJq+j9AEtFHJ6++ZmZUBi2VN4MlqqyJbmtZwnQehORdSoUCTZTWoqOsC19I8tO2YpPRNmwwf9a4aDSgOWdZ0lQ+4ycDR8592gNUb0qONiKZHm25e5gpQNNOE9hR96n47DhxPUd0fTk73UQrYpodAtBsf6volk2oMhYGWIzYLvedc6G/X0N+LeUDOWPuJzHkpABrk9PeOg35v/rgaxVWodp0Erhg3prJoBzTsngQMVw56Cy5E1qd4+2CA0RgZSAc55pMtC8Xu2Rz5OPTHiamwcT31ml2oLGhDR2kvrIcvAzw0TlojLwOo3InuAOULhHJd4B4ZLKiwaUHVHgV3rKSbtFuHSpSMfjkLgj4K/c9WIrpWYajfD4eg47qjQmkaGva2gI+kRjmxmAr/VF+VfteAHBBAlR2I0JBR5wCAn6Jtibg+72QBwNygjFMQsZ4CxVsm/Ay0wTKIFvXCqogGs9CLCYDWPhiibegz1Ynq+9hh0LzPzJJ1VXilhJ6R1gyQ48W36HdDUH0DqYuVKcARQX+sCESa8sDj0/HAYdig773sAC4po8pyNxjpsThgbboKFch2oAIRmMwbPkRAunrW/aiquIdAGf38obovh1D1cQdEmjcBXcQEVGGlIH2YdvVe0uMqO5JpjQYu9e6SuU5XcadJYOEPTO95GIVIeBzaI7X0vRlQz1haI9eTpnMTgZ6PYVuj6NOrYK5sBPoIIFz29+Ei7AGxJY2RgXSQY6brWHivsvsIgGnz0L8nCRGkoKbedWsa2nb4akit+rpReh1V5uOrKqiqkbzM1SAEm8qBJoOoSBfwQVFInQ2GqmCGU8C+2oTqmaZGkBYrWjLvTEK7gI7+3m4AXyYhrTUHTLBuAOW7IMBdANK2oNotmtAQ9q/6uje2RFSZ7IY0QwS8rCjgNWaBKjkJ7MSOo5GT5qpGTltgvPjQdNftQ6UWkJzGNulzJAHknpsEOgCDdwTvo9eHCAEA883MAkiPDmagGrIOzVFrgFEDIsEBlJp3mqrIDCKgztG3yVxsh65hGDCD6bqqhk5Y92p8oPcXAb6fEmCBvBKQZe4EsC88LvUcczYAhnNZZdZySt9Hqa2GnpfVc7ippzIhNw2UCIfoPsjMq9JvQUSlCSndhR3f02tk8jIXB36jcEHfWwKyC/WG7o08wDn6UI3XgDoGIlhNViGkX1SW9WhcjcRNs3re3Jie62WApWTT68k3O9SUEMbIQDrIke80LLJXKf4QSnCHvh7m0LgeAqCrsFxXd952AKYeCt6J11IDZHFKPzcOVUQ9SMtkgdCruaqbuwc5+QC4eOLQVLQf6MGdiQGXEaQZNxypz9v39SCsFqAbuqPrEqloyiMFfeY6IeBGGug1Sg5EIqLAl9RUITyb0+t2ARqzKaVpkApErigVVy7rOmc7+ntHACDbJiDV2ga2cwC0BmNggKTU4Cos6fqtZlQhdMcAMA548UFfFWXgqzKJhIG7w8xqQPY6vAt6ExKEaRxC/H01WgfATBhJqHM1tQSVmWAMTULrnaUGRGMGqoxCgDdyIOqQDfQcroCMyUEPs3gB2s5M6LM1h2pgDkFLO4EaJcDuYHFg0i5MamPvaXBAfEgvOcD7tgPwd0MA0keiqvQ3RHSdd0JVRWdWqwXJ8d4JAPkcUMu3ocDAiekezw/yMtdv6hmuh9XggjZ9Fk/r/fUhEm5llQn5ouqG5X0a4gYeNI+DMTKQDnLE4nPm7NVZ3hmHlARY68R07kOKYzWruycd09cWXVIl2N+gocZSRKV1EFJOplRbLfgekFGWiupl9ft6f02IXDlR4ACBkHMqBj2RgOyxAcBSC/SA5yJ6z+2KCpaxAgB74V0mBvpsqaoKLxf64PWz6uFWQThMQLVQa6AKYagQKUsB7cIAKnli0GF+J7ScmAQm3Sxwad0O6dx0Tz+36Vcq5BIQNqgcrnOJBJCcNvSdh/v6fn3APoXB+Ou1gD7ZzOJArBdzqjLnAAdTaPVInXNVmcf7+swtIAhchmqeVgN6HULKfisQ+q309J0UXDCaIOrY7eo953p6LxnATQZjEH2GyjvXqP0QWMHQ6zDcUjnW8JSt0Kn+XOZ2E3EZ4LXaCTU2esA2nXAVCzToK9C6F4J0GvTLq+3Tc8zMzMbVWRvvqvwcAMdbFBz5PqScG1BtvQkKI4pRNUTdsL7fMapIjKtMXR6osxBZ1nUeT6yXWe3hCIP0uxluxmyvPHcBwum2BTp8r6h17fUVyFxc0nTGwANelaEqsi2AtWlAJMKH8tMGhLAjbd2MgzwAccF7yk4Bx0tcvcXsnN5zEnATUQC+72ir59oIVPj7K3roJ+DARA4FUGAfquKAv8amoct5HMCry0Czf6Q+7/AmSAHm9Lq7FoBMEYyXIuDiJmYU5xDPq3EwgE7Yd0EBUd3VvdsHo4RArp0wYMJuo6pCfUcZaNmxG1o1DKnXHvRNG68TSMosGEIoH5RWAvBPnv1M5jpxVebDnEYxa3lVvk5d+3flS5ByhsbFQZkigvqeFsCYn6zr+c9QZHioSmv3Tn0nU9vUuIpOqOIug0E444HsmITwxB2g9BNQ2QppdwdIQwMIgHpL+myJARDPAp7MAdyPA7ifUKAA6uHPVTekDlVdEwogJ5bU+0t76nH50MsuAUZiE2gDhtAAOA24zkJODbiN0H/TdXRPdmJKHrnrrvXvvNsetRr5nYxsy2xvXr+yqaCK/IpKUlVZxjMU+4UmhXAf42lVZEuLOldoQhf3KQg7tNQbi6+Ah+HrwXUm9A6HUUitzKtiTEL5s9/Tw9wAfphJ4FqabAOIHEjrEiVohAiHyAtDc8mYGhYBADdjSyqosjF952OLVZmbz6oUTiQBO9JTo6Tra2QilITefR0FeM4TzURV7y8aVS91MNDIyRKAVysAVghKqmDi0HW+Ba0GEkNVgEmoKqyDAK9Aj79OgiNIOR/IBQHrVO8BdQDwLbnAjZbu6L5sdwFcXtTztSvQ6MRWkB7bgbXcAbvCqSsNRMvU0+8lt8hcIVB54kJ6rtXQM7cEbUrS0LfST+pe6IGKS2/Q994Kq6FXrKlDuAytUHxH94eTUCOn3dczkoRekX2IyrXSygsWB8MM1IWFoFckke/GOmpsNHuqLyLA7t4iWBz0RFwa6lrl0wD6DusPlgHH2gccawRkfnOfPo5wazhGBtJBjnazYo57z2p3BqosIZNkUeDEsDpgfBxVMukS5LIBkxMHjo3Vxil6L2VIQ0EYNZzTzd0AcrvGL6BSJq2bdgaM+ADSzEAjY40MkUeCQQPBnWJGBX0cqtg6HT2QHQMvH7iR/L6uy/g0NHlsAmt2HqqPoMFmEtrQ+Jv19yJ19TTzbRVy3QnAcEGUpDClz5YCY7KSUvBqCQCecWCa9yAQW4X01xj0YiNOq1pThX8ATNhDwC+5EHUxM/OAOHXV0XUNQTVkChjy62Vdw9YKvOMEGDS7dW+5gL8LHw/4kSWNuC0AmD49UEN7HqJjXXAiikAKWyir8edlqzIXaqnhE4nq3E5HDYsp+NywqgbDDigEaUA/tQWozDwcCH492EeDAAgMQWYFPZ1r+0B/EGgl9DAKxhBAEEk+dSKaxktAU+sENMysdqA1DaTi49Arzgv0DP9qVWVRaEH3bgoqiJ2EXqPmrn+OPtwbjZGBdJCj0t5kkb1aiveh51gCSoejHTV8KiHd3OG7AMQLvaZ2QqiRSBfzGb2/Wk2NtRA0H3Qz6lH5UC7eSUHFCngxXej3tAipwh5EOzIt9Sq9iArDXFTXaujo4QNeQhsOQdlBWquwqteNAkO27VTA8zYAfQdAoHcoKMomNFZNNvUdZYfg9cagy3lTMUgboG1HDgDtk2P63jZs0OsW5lSgdfJqFaehQzFVLtYyun6bd6tQjxTVmIm7YBRXdCOsAgmrmdkQgIRdYEuvQeqnDO9946rut+U+eFeQci5M61mPQfucyJ363lthdV6SdaiKg+hfG/B3jqMYmkEezhxw0fQD1eZQqW+TcLyoI8xCQ+8521UDrgOpWsuoQp4Cfrj2OLRCieo6u0XgvgpBdLylax90NPrcyP1I5mYg5dmO6x6q7NR7yUA0uwEVdbuBWT5bVcMx0QBiYaDWGHbBgYN7WQDiyVAR6DuqEAmrrf+u14FqOhgjA+kghxvyLRK65zD4/SPkM8MFPXyreRVKwzywucYBjwJcIVbWF56bAPp3I44YVaou6ITOshK7tWf0YEQzqlRLEMmpzWsFXCSpazUzVM91ZRLC5HoezWqUa1Yl1gdQoA9Cs7iqh3mQ03Vug9JxoZopBoDWGUgRWUR/LwVOkAc4MQ+AuRNZjXROhSDuDFVnkQmNBsQ9vb9sRdclMgNtKJaAJwgq4Kob1KBJNiEFXdTvplYhmuJAiw3gbkkcoga6mVlj7tcy1+qqcZABY7leBzxaQRVjvq9OxI4eRPBmofqrAeDhnkZZNmdgDSHCaMD0HY9BlVNXjcxKT5W0m4AiDQjWJUzXZREYntvQ3ieU0D0NNpOloVF4GDCNdWBuBz/KlpY0beQGgOuEaEwaGMs744onW7lVLzyXUGxbblWNpnBS5V2/BVWFHWDIdvUM153tMmcljSDX+4B3NZVPCWg4GwWc2CpEA5tJDT4s7tM/bki8TTBGBtJBDm96YP5eh7riqGU+DvZMBVplOL56Ex7ktyM5xZnE8tCsNq0bLwJphQh4mg4o+E59u8zlgUytnVDPsJaBhr0BlCYDdicC7LC9W/S6hSn1oltAf2BpxWY0O3pwC1AK30wA5iWiwjBT0PD31Hbo2bZBn9dp6L10oPImCxZSoqjGi5tVIVyYgJ6BITAEPL2XLXUgvASl0ytCn7llqJ4ZU0Ga8HSfxms614WUljm6D5oQ1bRAPU23psq929aqRzMzf1LfSWIBGMCjqlS9aUhreaDMHcDL9fSs7wDyw4cAz1CpomvdT0CEEcDDbljpJ2Kr6rwMQAlOupCqIWgXRNGiA0gHAddSV/G/FpQgBQitS7ywVvJ2Mifp50yjTxvKVZlLlFXudGrgNEHLn5CXl7nePOD0YpBdWNF9sCOv53qiAEz/Veh1ltX9l0vpO6+n9Sz1m1WZw0FHE8hBa0D3EgCQfgWocQr79EMdQE9IGiMD6SBHOBu18F5l98csqzD1oN9FKqMbNNMGACuUY8bGFDhXr+jngqpugmhWPdc+kAH2mpqPTo0Dz4irXqDXg1AOlNEnEW4O+KqMekCRh0N1GoR545CA7zT0MOeiuqY58D5bcWD1NX221SawmB8CRIeDLTLnTeo1YmDnhQCDNAZVipaC1BRURwIrBPEPWgsaGftw3RpUEMYBHVkcwpmJAB4KWJtDBlV2FSBXBRb4JlTeNIqAMdsIa2pmiaZ+35sEoDA0CzXg5xkfqFcfB3JGa+v9TM+q8pjZqPtj905Cp6rzUiTm8Y5qsg6UbSfzUO1W0HXdNQ/pzIiSW3bbEEWDELcDfESebiMrQMR3patycRDSaiinC9V4RGkVKI1DAE2BzQcDaRkqjSehN1k3L3OprEb5o2Hoazin9+LmQQAAHKHd1si/AbZoA8A5AsBwtoEqpgVcep0qgPUHGs2aggrnaHf9mnpQaU1jZCAd5JjshczdywTuASGiN9AIQwT6FS2kVRtlIT3XWwaBm9ODlvAp1QBezBjwxjiqjLpAKDeASFMMMAgxYOoD3jnrAvg6C1G0MBCTtYGJmBDeWWCgTUF/Jh/YXIsNVWwuGC+5ArRliKlRkpiARqYralD3plVhER+RDwRocUf338wEMCVnVLCEAnXLY0BuGQV6hlJK17QC0ZQlUDDtju6XVF/n/DQYnUDmVwdWzSQwZDshqKgBDJeZWRkY2Ydt/X7QBNwVYMXa09C0sw1EmBPQoiekSmbbPKTno3o2Ietud0ClXKT2C5nrqE1ndXjeaZA7pUk9X922OhbpMb1ICcDNdTAmG0A8GQqrkRNOq/INgbPWcvS7SWi9U4fU9EpW76XYA2qCrMoJF4oloB2lpRKAS+zp/bWg8Xga2jylp/RzDUiD+h0gJd6phvI48BbVWrpWIUgjm+kDF6GbQAveW3UfIT2ArASNkYF0kOPQ6bTF9sIJ3bkTiKtW9VAtQ0fpqZYK8XoYlBGUxUWAgbrtqqIttXRjDBb19wYOcSjphm/N6OaenNNqjR7gklwgEPHB205H9fB1GuCdRNWLPiKpCsZLVmWu66lnE42pEE5l1Htyi3qMZh1d57qnSiyxCsD3McAlLUPTyDiUuIZVuA4hnWtAR5FtQ9Nd6NTqAVDVMU3dDFv6Plxg0u2Sx5dRpVhc0OumE6CcgP03PaP7rw29ZnOLes8O8PWYmQWuOi91OHP1DDQkJgJD8BgSSU0HJ0ApeND2xIFKpSyQd1ZW1eAqQAuMENiJzlAVaBgiFuEwPC8waYeBXmRpoPLTAzqFFLSPGAMuqG5Um7IuAMZvPHmCzgE+sHGIYl7iOY0gxW7RNc3HoGw3A1xVCbgGRPSGQKC5OgnrDI2Md/RVnuTLmlKkPp1ORx0pd6hnaRWsq0RXDbMKZA1CNaCFgEIVy+qzHbIPRrIf7po2b9ExMpAOcvy6ebs5e1VyzEHftf5MXuY2ARajCeDh0pRu0AS4Dp1VFQQ5iAhEJnSTdQdVmXNDKmzqjgrS5pJ+N5kHGncgnuykVfBtiOqhz0U1hdWDpqBZTz3wjK9GSaQF2x5K613AL+XA4+urPWgOVZgBliICfbBCu2H9qiqABsD70omqMISCKQuNa8TRIK3iQLTIgHtouaX7tADL7C4BtgVaSYxBs9pmCHAdEGlys6DE2tDaZ1lvsAvA3J1ZeMFm5nUAxwVYoAAijNkhMHZHVIFmoT1NH0I+3hLsS0/f8SpxrYHBlUhpJLyf0N/rgMPlQ4PdRlxT9j1X5YkPRk7UNOUcAtqQRBEKBYA/zAOHodTVfR6HtDvY9+YAAC+8rM97VF7fZQVIIX2gWPGgbUcuBHAJAPREILq70tL9l3cVYxbuAhUD8a5ETpCpXriq14CizKoLaXfArMWAvyrIQ2oZQvqd5vqzHhB2EcbIQDrIETR2WLAXaWESctnhnnqBxbEtMtcCz6sDHBEeYTsmVIFWgCk4uaobqgcbNABcUqMFncr7KtRX5tX6n5rRDZmCiFlvqApmwVRBJQrq/odqKpSGoEyKJfW2M9ClZKIF3ejzW/ReykBXAPiqOBg0DnRcTwFJ37aIeuDQus8yDZXgUeDh8aCtzSCqgjkCPaAqIIRdiDgalODWAYhchzJ4qmYa+Br1ikM4vQ0VKkOoxnFrutfqSd1rkQrkkcwsGKjxHW5Dl/RVULRFSCVFNApXG2pEYDCAVkMx/b3llu6FKpyHDWB4Ug+ulkFxCJT+DyD9FW7o+a82VN6NQdS2BdQJ3oLKxWFeC2TCXUjZ5/WAxfvQsgawT9Apx+LbAb/UVtLVDvT0i2eANgCi3u6SXrgWVwOzAxHCXlTfeTeukb9BU9cgDjQOBvKkM64RrkRP9+4wBoZtAdjTHY2YjxeBuy0CHFRQ2bpvr2SfLF0YIwPpIIdnjgV7KYxcTgVLtq8bvhpVMGICmgC2gCStD0R4VldsQTiq/YXm0nn9HLAphiHdUmzpgSwDDmYyrWtQ7ughSEPKKQ2NeNu+RpCsqwey422ROQ94kFrQPDg3p2s/cFT5Li5vl7kgp9co1uG70CuuCPibVfB600MVfMsgHDIhNSJqdRWGHoDXJ2vQG+tQ8OSg1UgpDmGqMSA0zOi+KtV1XSLAuNuCaqse9NobAAA1BOXou0CZ1KESyjOYNLNYRlPOlF5y2ypmF8ApaQFgNw7UBn4FiElbarxUDRjUVzTithuwWAt9vb9xIPnMAzdVG6LFziSkeSFz6bRBSVOhIlTKgv9mqTxQDkD0JAKtN9LA6t0GyobuUGVbHGgq2oBfylRUTvShwqwLHkMLshUGUe9QBjBNkTv094DiohRV5yAN76Ph6h4fNzVyEiWVWS1fnRIoILZm7yiZ87oKUTAo3HD30QN9aoYKY2QgHeTo3JmwyF4EN9NQWuvHqQO0/hYw21sPohgJwButpv9T5gYDyG+nVQlOlYDNGfh0FqfVCJvsQxwDOoanfRX0LpAVDjuQggFPxFMMubk1NZoGK9tl7i5wHvKOkjh2oNv7Yrwqc8cGQKrp6zuagN8LQeTFvUUFSy6rD1xcAa4V0zC5BxLNb2oqqTqua5+A3mk56HHQrgIxHqSbmpBGikCqCjJGloWmxZ2IGlcTESBrHWhUszLQdZneqPsg32Nvs9MA6xYibvVDdQ0TwAv2c6i+Mw2KWNuB6M60ntd+H8hjgRxz2FNHKgb956JAghtkIWXXy8tUGoywUoX2h+7zFBRuNHq693c21JGK1oDfLKRr1YOonOvonolmgdEewMMdSLs5Hd1vZYjGRIEIuBHXfZV3VfZm4PcGHXWawokTZK4PkbWwafYjDdeNQ9ujaBwMUSivzxT0ui1g/84vq1HXB6N9WFS9F90n6t3bN6R0L2NkIB3kiGYi6wykZcqdAmlnb7e+oFVoDdDrQclsQr2x/OBsmRtMKC4k7OlmLO/SeylOAsC7DE0jU3qYe139bgGoDqjHqwdU+V4I2iM09KB1+gAABk6M+CqkdPJK+mcAPN5syjOUcsFjhtRPZmdV5trHQDXJLPQDA3fb7+o1ShB9yoLX6wGlxI5VqJ5raxSiPw3uXU8VUQYaemWh4eQUAGQBP20rEAlzktAs2VHjdAcoHb+v9xzpqzJevBdnM92AqG1e19r1oA0L8IdthJ64O6rawDoKbPNJaGBdNgCwruihS2b1/mpJ4DIzvcFVwKOMh6oy117Ud3c7pNjioHxvArB/OAxEm4APXAQAXoEoHxK649IVyLvH9Br1PJCpeoA3BDhfFPByqRoAmQtQkejo/g1DZHLK1/R80wdC1KRaJU5T9/hwUeWsA76431eHppmDKm/gfRuvQeoccFiJcT1HfajkdfepiiOsG42RgXSQY7mTsvBeeVo3poRjMaA+d8dAuDYpVKvXDCe2y1wkDm02PLX+5wJVgssz6nmlhtBCBJpGumH9PRe4gmJdFTaeBzn+phoHgach2HII+lsBliUKjOXNjgrIfk9B7kvgbW8M6SEtA65jMNB3WYTqNHdFFUwuq73ioP2WdaogrCEqspoCtl4o/U2m9f76x6hwLdZVqC+WoCErlD1lARxZ7UNqqf1QmcuM6/3FIE3mwPsAvkAbQnse64NRB5E/M7MOlJB3usBXU4WCgkBfaKMJVUkVfZYgomI7EgIQelTPTXug793tquPThIrQYAVA2h29xl2QgvGAwLQFhrFBv700VKzthkoq11fZEelCiw5PDYtEFBo/g7HrQ7XgODV8DOveisD5SkZ0v+Sg8TCti5MGeTcAqgMwmkLLqhusrMaLC21FIlndG204h9GqfrcVgQo9AOHfCfK4CBQh8b7Kzz6B5vdxMIcENIcxMpAOcqQ2z1hkL6LIANIFMQf4bwa6ocLQVTsY1wOZbKtBU4YSzduiShpWgvTXONxLHXZGBErhx2sqgIaB3vMy9JkLDDxh4DJqAzNyZAD9rVz9vTqUK+chwT0FkbBmVAVVxdXwfCamhz4DQt0Aq+Al9b2tAMYnA+SW6THA2kB36xXgoOnm9QVnIA0ymFPh1YVS4qluVeYCCAN50PfP66uz4GxSrEykDWlauOc+VIilQBENwtCaAiJrDTCozcxWwMiMQOPiFEQTFyFlkuyq0A6g/Ug0pkbJeEyVfgzOV7QGUeChrk0CjOoKGPib/EfL3PamRg7Cad2rE2FonwF6u5vWezksrWtQ9QH/2QOjCVJ2rYxGVPqAX4yGgcQRoqIuRAgDSP0OwLFdBkMgDS2TklG9l0ZLzxIQr1sKDEe3oGvVTKk8BhirBf5xMud3qjIXrmrOOAZr78KZ84j9uqOyd1iBvpXF9eeoT92wYYwMpIMd/artvYw9qAiJOJqmGEa0JL0KncnHCGjpKNYm3QGW4aYywVbBg89DaUa0qodlCMJhR1s/t9QDTiFXhWY2rGu1E9YgHFJl2U6QgtHoXaGhsd8INM6sgdIvLkJqBDBIECCwja4qrKyna9AbgIebAt4XAIwPyuB9JoEQsa8GZhr4jbJQ9RTPAH7G0/0y76goGaxCWgCYqpMBYMd2Qxd74JvKZwGADqmM5k5oXdAHwxbSc5CNMDOzfks1zyT0s2tAO4k84McaU7qREjshkttXw2f1/8/enwdtt5blnfC55nvd8/CM77xnNghsEQe2xNJO2hg7WxOTVAGGSFpNjMSoke4PFUKoitmxK420Vod02hKNFt39VUqTfFbQmNjEEBTaARRhs6d3fObnnsc13Gt9fyC8w/Ejvtvt0OC7qvYf+3rv577XutZ1ndc5HOdxABVB0ZYhM8BDupCRWgBgvAHl/qKje+kCZOaSAkpJ+RUZa7dAADhS2zYF8PBFAOyuQ/2NY9PAMYWjsAMM4z0AIy9A8qPq6fNur3UPDyFDsw3k6adrDYpzRVBYVNPfKKpgUzfUfpaAN0xB8HwPGkbOhopzGsE+dHMNWNcjte+NQn833VbveVIAGBU6VuPR7Xu9WNwrsf2xXPPR9La6b9KDElGhwM8a1GfdUlO1zy/10CogTV6p6gFfwNvtgvr5YAysyks9UBY+kMIBHUATSCHJMSsdNerBvj5bDOrxW64eqo21pvE7oNm0BrVs4kHaIBE9IDD8bVDGvgFt6kldD7YNEKvtQMYesOu2AnBocw8kBM6rUcqGOn8zty1jzlU1rnNIYQdAfJoN1eCGppia2AVK4LoelLNc3+X+EGqPK30f5UyNsA/O/XQG4st1Eg0zawDB0XMHuo7ubwLpHQDd10An8EU7oMUIjNFLKF0s4ITarqhjlkFLdQ6SKXmuYwGU4kuQjpjVwGkCjM9ooPecFiP9vimUtYcQYEIH10MbSgcyCPWefeh2nYIAcBN0w7JcHcfpWdVndIEIuJ/o2vegPd42QY4Dzgs/Vqd44ep+KHWZWg7YtszRzN9RovYkgN6GINLvcys69/1S59mfq2Obl/ou63Vdk9M7MsNpea/E9sdyVd3cPO9mpOGBN7yErrN4ogZtM4RyECjKz9YaKZUQZc0SaP29DuWqWFfyYgPaO9vAEQM8PhGouF+c6/ctl9C2F6gTFq30+yYjiIrqoJO21lJXF6LZw5puhWs3QGQUslneCHiQdgHXAbV2fw7OJLREb0bQYQaGdH9D728NpccG8K+cv6oRmhdplurEAzLK6/p9OYDrAU5m17bUoens6mHsQpNABl/ogzRI0oe26xbsS2DHnl6Flhozq1Qg6wCSEMFY313e1UOrN4XywwFk5rbBoSnV8dls6CEdrPRvG0Ds6qZtGcsguzt0AbO1VGfo/FKDjU9CENGD77sCIN6qo++pvaG2wwFgf5Do/mrMIZPb03mpQ4A5GkDX6SVgeC+UD8sC0ASDDHIP8Dw9CJAKkvIBstcu3MoMMFLxQvd/uwtnXArQDRDCOwRCTt/AlgOOdR1BsqDUcyWF87G4Q+i2gDI8XfccpBd5bdVK88Obmx8CUCuOwMkBRfRK+zEdq2rZqA3M0gUJKm3roh0PdJGtgTSwAs6Le6pGZJKrQRuAMdz21dMfb2sksnR0s9Sr6uQ0AEtV7OuidzN1CD+eA07keVCZB8LGcqGdbaOqbqOmaVRUAGndAlLiNhzJ0GFNjVevAV1AgDc4LDS6Gx+AFIqrc3+S6d82D9RYn4APUYUOHW8CLdsHIJ3zjBr/DEqFHeAYO9vUF+dAYLBu6b0cAxq+bi+VMTOzKeDM1l3dS30gx+xd0f1ag2yCByrFEfDzxOWDMhZAqaEOBKvjQ1hbbcCKQdCtO9isBGb066Wuhc4McCARcNgAhhOanOzAH8lYC/i+CliDBpn/TcBSuse6LqstXTO7cBCUkOWfHUM5vdBZdbu/I2NjcIbWqZ4NAeCXnm/pOz97qt83nquzuwbeog44u0deW8aSlMiv9H30QNJlAuD/mqmnN8+hJn6nXJV7T6z2j+UK5pvm35KGrIawCaCCcJ8HCvA1fdkeqJqvoGxkUD+2QldUxwM8BLTvNkF1fQBg7v1QDfMKOhpWoB/XBCT4pVL/9vkTxa0ctmXIjqbqlLRzfR+1zitlrJjqpl+WoKsHANnuWCMqBzDay6bO86fWqu02DNUp2V2rYdl4QKOnYAWcJyfQHgsA40/s64FVgsDuAeCw3LmOnQKj7UGThJHV6YzG6tBAYtLOPQIM0p46SFsrqFvCvqxmemAt2vo5M7P6WDNkQ6CfiEz35lGgWQcv1vVRS4D1Gfamu62HTAdkGGbAjOyAPTmFubG2BiUzKEOVkPUeV3T/bzVAQiRWHq9yCCLKa92HbThUK1XIWCx0Ie1c1b0+eQi4jIAaImsRWSaU3SADOgVR4BSEvasj6KOPtWpQHKmn0oY91wIh8yNP137cBV09oPQYwTuPgcTVVvq3NeDBmQEth1/o3nKgjlfz1H7aHYK4wQIyGXDdc5Be5HUSz827xZOvLdQY1iP1aJc+eOFwADieGtLlVdhUpgt00dMo0INSzTUoOT1cVYO7Ez0sY80aiORC1JGChIAX6QMfQlT0KHURwUE0B1HQBQCxJsCMPG/qAVo6UJqC97uK9IDfJMHJtc7LFLqeWtABcwJlo5O+RtuLQHFYGaDI2zGQc52BjAgJj0LnjXu/OvxRTR2z+3NdQ13Ifp48ouvq/oo6JNvP6v2lVZ2rEsqWaQfoHlIooWaQnTWzAtb0cKzvuEpZIMjqdeqQsoQuVqemf1vA8z2z0gOvWwWeJsgWdUG7rvGgPltrX9dvG+VfZMi8ChxSsKZb8SUZO4AM0o2JBpiHTZ0rpwPl4FzX70ug/WsE2aJkoQHcGHQXDTouPeDZrRQaFPsbuh+WCWRPtoCMNtF3npt+HwVDfWj6aEN1ivQB+4BPa0B5/mqkc7rTByqLcCRjJfCUjia6jzp30J84q3sO0h/L1Zs45t+i4J0FwEoNTLxBDuUW0MwpYFM1QZB0EoMkSa6OwLoJkhpz/b4r4MF3Qt30c4U+GJHr1lItsa0Aj7LdUuNQtNQBMTBAZaxjDmxcDyjrB1ASa9Q1qhwDdieoqcWg6kExhc4lwLEsgdjR1tq+m4Nau9fR1pYEWNw8oCbwoZuxBLmAFP5201P8zMp5mYwFDd0fAxCXrJ3o+2gB0DcDcLgD+mAx8HVVP6UO0kGuz+ZBBG5mdjhVB7C2dUXG1hBs1EGMtxbq79wHB2gIf+sFkGUBTqYIwNcOHSgAkh98DBzobbVtO6DVNwQB1hSY1tO5Zln86JMytproHkmgycWL9DeiEQGe1VbegFKcA12KZaZrv1bReZnPgcUcPKQ0VuclW4Adm+nniABy0dRsVgrUBOEpUAm0wcYAD5LNQHAa5FHGQKPRHMHZ5UCp1VWbsNoHjUUg0B3ewS2VgRg7XfccpBd5DdO5ebfW11dK2+82tIvtCBwkb6kGrdoArR4AVc8gtVrpqJHzx9BmWYfulBkA4oD8bLulGCmDiDsd6kFb2VCDVjHoLCrUiAxc6CZpQabpEzq0PqeU9X8G9O36kA3IIbU/hNbVYVPndG0apXpQOmsBdqwPmhN5BpmJIZRQgOMpHcF62QZR0EAPxTo4IMNCU+dVF/TGQMw1mwDZG9AOz0Dfzpupc+9AFwvwo9pOXe+5DVw6O5+jzb9RI/pg6OYD2ZTWDjCtQ+dOFIHeW6zzFe/pWB0kSRaf1PkKu3pwjxcK2C8ifV4fRGif6gHmCogdq7libdbgQJdLPabgiLYWOEONI1irTQ0ipqD91FiosxYAeex4CoLC4Ej1HL2/mqP7+hiCoQ4wgg9Bj66Vqd1p5PocTgLCr0Az4YIm3ylRywNn1GoGvGUgiLto6u8GEKxNJmDb4HxMgGKhdfl2W7le3ROr/WO5NoqhBcUtG2QK2J1kpH8IwLQMwA4pRKkt+NtN6NbKQSdtPtBXfhbaOz8KEcaDAxXYBb8M1bJjFwwuZDkzVw+YKqClg9pIxuZgXFuvUeO1fR0oB2agDr7WcotBir0LBnIAEf0EeKRCyBAsT4FRHc5i0v2adQDA6+uaTHuwNqDLjtTaE1dT8VmoEd9+FRiyoeustgtZG0jBOfvqZNdb+rfNtUaaF+fgoEPpIdjQzVCHDKuZWQBM0Bm84x5F64f6ndlASzX5FvBama7BUa7v5MYlyFLVdJ1vAJlqATQf3gVwwgBnll0GWgM4asZdzRYvDTLrcx1zQXojek7XTB/2cBekPEIPspNbkJEGh7yIoaszUyzVoqLzN04B5g4Z5EUEJey54g3Duc7zNNb9nyS6rnxo4Dl2dU63oXu2gAaexVyfIwLBaW8A3akA5o6hPDyZtvVvQd9yNr3dPmUgS0PXPQfpRV7p8dwK/+YBOWlruOmVOnZu0paxBWgYBQ0F4i0zPdzCh9TYZPvqlKwvqvH6OCiD9oC1dB8Uspqg7Vb5lC5ubwMwDYDNSlb6tyE4nSV06KyB8K4Dxn81gfZY0FNrghOWxcAjtYR5gTT+VlUzZqd1jYWX2wAEBzHIZQ/KEft6KG4ArsPO6HPkdaA1mIMhTdQZ34DScjvSNXSj1OeYLfQwbjb1XlogMto/0jk96oA6fQmp/Zne3xcvNXqf1QBxb2YN088O4CCbRjqHZxvAlwYEiytglk9BFd6BzrE2aFINgFzUh2xi8RDo3p3oYd6ARpAa7MM5dHCWp4BfhIArqqlNCKBd/BgycM0xANChq9CDNvX1OWj9h2Aj6EMpqfpRGcsmQKALXFp1Krtd/7iMNYEKI4HqQu1Znb8IOkxXpT7b9qHur1lV198KAlvH1b9d7Ok55Tka0FhD7WLlBLrsOlfgc7oO7tzpBE+g656D9CKvSXzOvFuUgT0A8QYTfdmHNQDgALt2Z6ULeVnXenkKTTppQ7E7k5EuMlCkshW03nUKHRs3taR4sgsZLtBOi6/pId3dUUNVCXQO0lIXeDQDnbkckvFtkBDY1QPe7+vvxqDhc6at720FOlgucLc8CjiC/VzTRftAGQXZZbMalF+giyqH7EkxUkfFBQySAZ6kmEHp8Vj/tgO+hgeHzhK6LZ+HQzYCQPAcSrK1rZGMpadq1P/vAOQq4OA1MzsTQMmkosDZADqpzNQB2W7oHA7q0PAAzQMbAHheFHrwRDA3s6462hFgSlIImqa+zuEcyrLuJszhAsgAoUvMATqFBPjXbK6bZB9Knq0dkKfoQBAGPxHCXlr5kO0EzJAPDogL2olDKKcvgJKiBX3vC+jQI0rE+ZQUH3Rfu5DxKaC0F6xUP3J5HXBTUOkwsu/Av9Q4q3vdvQaBN+CmjiZ3aLGB5iJd9xykF3mNz43NDW++pOZaD+Si0BdWAXtRmEb6KXWiAGZ0DS2fYxBMdTNNOecXgZxxCNtqrg5IZ67ssE4B+JEayQDog5QDyJQ09Z43HMBSQUt/PNSIJcogJT7QuUru13uJIAr049+SsaAJvEqZOj4BcK1cAoHis5A2Hp9ekbHBHGQoOrquRqneX8vX580najT9mq6X+ZG+j/QEymnQpWhtdaS2s7aMNUp9b+5SsxrnoA0+BmAzcHRaG8RIa6BHZWbmeQpwq1X1fs5u6LvbAu1EDxzeFWhGQVOclXUoV5V6QIUgIJyD8ny1pgZqDE7deAZs6SFkwoBJvwa4PwewLFXortqG+4vO6LM9+7Q+WwZ6heWh2piwqfc8m0Dr/xq0DiPdh+NTINWFjHSZqmMbrjQIW8K7HMM8j+eQjV1qYDsD59kdAcanrkFAGQK/HpB+eqk6SMCPbAmpCVASINW5f34Ma+2O47G4O//onoP0Yi9n0TInu+kgAR+aub4OhqCcbA3QOtrQtu2p35ax+lodmrCtm2AbIpECtLUCwC9FzZGMLeq6hC6abpZhpJs+AXr/IeBvnKFmro6h2y0/UgMeQ01+XVen04UD1OnrznWb6mxMoHPEP4BMItS9HQCWxk0QAD4AYlHAa+y09ICpXdUxEgw/AIB8CuWSItGOxGPQAqyCdsF1V7UFm4C9sba+j2YTlNTnenBYTR3W8THoTG3ps9231sO45RIAzGyrq4FAs6Jt5VcO9X3mkWZttiEmqUZ6QPlwSA9A986HbiMfyJ/iAPBQcDRQHmwJxJoT6GKrTnQtHEL2xAXMGyRezQe5lSrIt1x6hc7f7x6D4wht3+lQ7TGJzkyBnTXqq6TOZqHrvAa8QCemY/0FTAJki0pgh28t9V7G1K0JRJaHCyh/TcGm3knEaGbFDEDfGUhiAW1Iu6Gfi0rI1MHaJYcruOP2irvjibznIL3Y6yFbW3CLqOGhry/77BoOPGjLb2yop988vCJjRwHUnkFUtFipk1PAhmwu9XPrYCRjbtCWMYNIzjLdfLW+3t+aVEUDNWgjIIBrgwPXAtR32IEOrqUaqmsAZDx3DCnxZ3X+KnU1kHlDD8AVRPQrKBWk16DbLdDTc70xkrHZFeieyxQrswYZgAqkJq/D2jhnSpOwA5phg54ar0aoc9+DwKADqfiNUJ9j+2F952fcL5Wx9iO6rmKQAMohgHCfZs6UBnTfDaoa0NwH/pUPpe69TLOsSfOKjO2GIEUBwN4l0IasoRW+7sENzoC6gtroe8AoCQ1CE2LIBm6vYqKfa0Pb+6KuY84AmioA5/iwq2vhxo6uVX+kzu7GEIhOM7BtFSXQvQ4dsONMv8+BDGh1MZKxvqOZ1/JQ7d0MMsOWgeMD8le2CTYa0jvuFf2YAb+edTR10zad+xFk0ZfADxcCvrdoKfY2uXH7vi7z0nCh3nHdc5Be5DXNd8x3bk7jWSBsJHnxypZGVMtSjc2gBrxKwHzbcYHunqKTTegEgnrs7+S6eNrQzROPAMSXq8HIOoBl2VdeoG6qmYNTT+/leWgDfQS0egagUVelzido774BCrExZA1iUIBPgQL/zk4KM7M6SANkTTUEM0hNzqCkmAG4eQ06eJVNNZD1LQCqZ/qOCHC7dPT+/mz3URl7FIClZ6Bdvh7q52bA5D4BJ6ANe2FVAqC9oo5GCAeM+zIGaZ8ksD4AO1Gv6/PNHtT5eqSijmIMvDHPDfS9/wYAcc8AcNvv6FpICn2fa8hENqGMkkHrdTcAfCWUahzIHJRN7f5ajdQRHa0BjgC+WtaAjB50YY1ONdt2BVgIqfsph8xGLdd90wfhef9E1/QcuvZOIqWKsSkIzpbgWbj6zg0KGAYdejYBPAewuxePANYTHD2g9bORQ7xv0D07AfA6iSWDvJTld9iJvDADbNud15+og/QP/+E/tHe+8523jT3yyCP21FNPfc6/efe7323vec977Nq1a7axsWF/9a/+VXvyySet8nub7cknn7Sf/dmftaeeesriOLbHH3/cfviHf9geeeSR277nV3/1V+0Hf/AH7cMf/rB5nmePPfaY/eIv/qLFMbPmfq5r5+zCglvZqUe6WeKqHvrgQ1gLBBPjoi1jV1v62kpfjeYEOncqphto7arzsgEpZ68F6VHoXggPIcUOfCnW1Q00yfRv6wnIMrT1gIcmB+s2oaQImTWiFxjvjmRsSiSEM+hcdPWdr9rANwWH+aCixn8Nfnc71XKQcxY6fk4VQGlAsDjv69qIAz2wOtAl1vUVe1MB58WH4n+10INo6Ws2pa2+pHlLaNkOdCEAl6elE8gMnQVhWYOTzcyAvNrcuq6FVgXYVGf6zJVM938JzQ2NQNfvK+/XtfBwoX/77EoXUgW6Z0vgSxtWNftXhdL5CHjQPFhvA8BDTgb6uydQxhuONUswBiqHyVV1zALAqIwAyrxYgCi4Qbdgoc73EWQnFjeAsBFA7rYAqQygirCQFAbg0J/ougKmCLMmDBJmhGjMh/C3G9D+kxHgCDYnKCVYBYD0W3CuQKesVe5wmj4fHCQzs5e97GX2H/7Df/js//v+576l973vffbWt77VfuInfsIef/xxe/rpp+1Nb3qTOY5j73rXu8zM7D/9p/9kb37zm+1Lv/RLLc9z+4Ef+AH72q/9WvvEJz7xWQ6QX/3VX7Wv+7qvs+///u+3H/uxHzPf9+1jH/uYuS4sht/nqs+rFt6ixbYKVVsrhcMyTCDFCdH/CGgyGhDlLpYQPvmAX4CsYuZA9AmbbwFioaSRG0O6egEg4+BERUDrkHJOIj24N2Bf9EPNyk2A0boD6eUmZG2SFchGLGAzQ4Yw9PR95F3Aa1TVaNbGuoYywJg9VypJZ+8TAKR/qC1jD4Py+bmaZnzcM0A2OleHYQKdXrvQqZl7Ggk/n+pBFAI9Qw8Ewjahq6jh634bgwzNGdBMgEqwUWXZzGx4qniqha9rsA88Lwbg/LTU9RYBZqjbGMnYDBb6c9CVtAHB0BQIDIdn9ftuXFWCxdhRJ2d4rS1jH57oXK3AxsSQESDwMGVyDiq/K2M1oAiJgcB0DBHrGIAqdSAwXUGbug8Zswy09mwEwUsdHJBc8aSWw1nZgHMAMoTmQioHCG8NGOitC+ekB04JaCJSVcvpgZ7fGfiNEJymHOYUxMMtvWMOoDGBrj9xB8n3fdvZAZAmXB/60IfsK7/yK+0Nb3iDmZldunTJXv/619uHP/zhz37mF37hF277m5/8yZ+0ra0t+43f+A37qq/6KjMz+97v/V77e3/v79lb3/rWz37uzgzT3V7D3oMW3CK9UD0G+QGIgJY9NeIpEPU5c8g+tQCYmmvUAVAla090hY6r6oBMS6UIKKFjpQ18EmMgRPNAHLGEtuYlnCUJZLPCUA9pN4O0O/Alma+HxGig782JYAL3yACNZGgCju34WOe+7CmuZhd4hi6eUQP0pctXydjypfobF4Esr9gGAwRp7aCv7y07BubbDTXCB0t1qFsdfZdnId+fbeoEXi61bJmDUvkG4Ng84GlaAZtuBuzO/nMETzZb3afOkHtAHrnOa7sFYwNwqiGQWgFTddXRv13O9Zmvb6mN6cx1HupQcjqzq3/7iQM9oA6PdF83fMhcl4BbW+hzTIA/zA9Asw2wdv0udC9m2mSQAqaxAiBtB4RQ50D9ke/D/roEUR10GhtwMhkJlB9DW9cYHBqKikGT06DZxICF30B023INbKtr/Y0IKBaCVNdpBvQsCXTALnagwSgDktPq7fdcpkx/cOf1J+4gPfPMM3bmzBmrVCr2mte8xp588km7cEHT+mZmjz/+uP3Mz/yMfeQjH7Ev+7Ivs+eff97+3b/7d/bGN77xc37/ePzpF9ftftqYHR8f24c//GH75m/+Znv88cftueees5e85CX2Qz/0Q/ba1772c35PkiSW3FK7nvxe2jKfl+bcQpaYQaRatPSFYajags0CwoWLRLsSdiFydfYgigFMU3LS1p8t4DDP9TlGOahbQzTRBT26aa5O2Go8krFtALSPAWjpr8BpAofrWg1S3QONdhonQILZ0He0c04P0AfOqkHrtRXA68I8pwDc9mf6vNuAIzihIKul+ykD5tsWrL8xyJ5stdsyFoLul1OosR6BmrwLBnwHqlrnF7DWgL/GBexYAiWAVayL40EoD626EL2b2XgP+IPgOwvgRptBqXZRA4cBAKz5dSAX7EEmEkgDfWDSdxxdNFsAiB3M1MYkY3UYpv5Ixk6Bkbnp6IEcdvU5fOA/mE+03loHjFR0Vd/7NNQAs5ICgSZAdyoDdQRqhJvcgC5baF4BGi+WGDiEJgOQW8pitakNEI1egvZk4UHXY1UTBxfO6Rp6tHdJvw86K/f2da0tAe/aAXby5ku/QsYST9fL81d+Q8Zml2+fgyIv7spBcsqyvEtGgD/86/3vf7/NZjN75JFH7ODgwN75znfa3t6effzjH7dGgwWQfvRHf9Te8pa3WFmWlue5fcd3fIe95z3vwc8WRWHf8A3fYKPRyD74wQ+amdmv/dqv2Wte8xrrdrv2T//pP7XHHnvM/uW//Jf2z/7ZP7OPf/zj9tBDD+F3EV7KzOwv/v1vvy2D1HA0DW3Q9lqHstvaQPcGAIqNLTAE13S+Moj4Fj09tNJDYFUGhuzqXB2VIWSQ1nBYxi7omaQgFtrT303AQFaf0zmFhgarglJ0B9K399U0c1AHwGPcUyOydoA2YK5R9MJRR6pbAhcMEKedzDULdDHUQ2INQsagymAZMDQvq2r8G6uXy9gk1pLYhgetySM1wmvI7mwBGSpsBVsAg7QLWddKTddpBh1ExMbcqen7nYFAsZnZEMSbe6ZzUwN82z6UklbXIRMBVAlWV6ckASenD2WepQskiSc6X3tAi2Cx2rY01u8bQAa07KvjuFeDjCU4ZpQJn0HguHT1d+MJMEanaihK2HNVKMUvUl2DhweAZQEm6HgKmfCWOi/zHISpfZ0/t66/274CemXn9J7zCmSfnlebcOlhnfw/86g2HQwh6fX0M2qzEiC8tSrQs9QB4jECihXIOCaF7o80uX2vl3lhww9dt/F4bE1SV/+960/UQbrzGo1GdvHiRXvXu95l3/qt3yr//oEPfMBe97rX2T/6R//IvvzLv9yeffZZ++7v/m779m//dnv7298un/87f+fv2Pvf/3774Ac/aOfOfRr78Jky3fd///fbP/7H//izn33FK15h/91/99/Zk08+ifdGGaTz58/b33zHt1tYuWkkJpAEiiDtmUGU4N5ZJzXGWEwKXdxQHbERUMGuoctuDG3vWaxs3etEn2MGHVIxiAWugXSt6Wqa4Etquhz9OgirVoFbaq0plUZNDW4/1TkdXlbQp7sJh3kEjhSUKKMECC+htX4CSVwXSntbgToCdTjsjqFsVIODPJjp/JGsTQHt2TF0iSyX0D0DZdVqBbS7YK2tlrqgO6C71N1Vx+AaAHPXQINRm8Kh2IMD37siY2Zm87ECo+dNze66IG2RrYHOItdnPh3rIVOBzEEXugifmujaIl50H4CEz0LZc7cCTOsjIAjcJayiHvB9YP6LViMZS3PKFgFmqFSH4XSq78ibQwAHlBnOoQbL/UQB98O5zksGnbf9tb6P1QLKt01dg+oymXnU6ZWqwzWDMuMmdEc2gE6lhOAvA9qayRqyqSFQSgD/1w3olFusISM9Age4ofN335bec/WOYCHPcvvPP/+rv6+D9CdeYrv1arfb9vDDD9uzzwJwzcze/va32xvf+Eb7tm/7NjMze/nLX27z+dz+1t/6W/aDP/iDt4Gs/+7f/bv28z//8/Yrv/Irn3WOzMx2dz99wL30pbcDhB999FG7dk2BhJ+5oiiyKFKj0Yl3LLqlXfWsjeQz/Uhf7BG0qXsOZE/ubE80s2JTD1AvAoK1pdaK/US/bxcikWXr1TI2XOhCHk70cItbaiDDUO9vFenz/pepHhyvivSQuL9Qw9fYBeI+0APagjKP7Wr56whIzfp7+rxzkNk4CyZt1VHj0DpVY9iagEMNQo2nuT7vFuKBVdRysaF/W4Dj6AJVxBE42SvQlDoHIO0BMF9vLQH/5UFXEYgRr/pAiXBGn8OZjmRsH8pX167rPW86nFXurjWjEoAG3wmInnoJUAzMdJ0/0ntYxgpwLJ66DAKiwJ3zYcjg9QqQ/AHCwQVwcXXa2mix8gHzlrdl7KJBwAU8OdNT3a/zXT26KofK6r+9o7+bAFSgAbJMJx3FvG0WWp57mavBwdjTZzuFmg4Fxeu+urHXhuqY5WA/CzhXbA2O7UU1FPU6kIju63MkiQbUUaEBZnNDnZw5rL/oBMhoAftYNvVzMWhZTkBT8k7ll3UKZwBc/69ykGazmT333HOfE1O0WCyk08zzPv2gn0mElWVp3/Vd32U/93M/Zx/4wAfsvvtuP/guXbpkZ86csU996vZI7+mnn7a/8Bf+wgu+573iyMJb2mlniTolYamHQu/lAJYGqFKe60KOB5ABAfwCaMtiu/0SOGcKkKLY9hXLsnsnRamZDWJQ5AYwYjyEskdNn+1T0I55ZaW52seLL5Kxi9Dh6vc0UzKDTp5GAQfCRT2cyomSwv32WJ2S0XO6KR+KQHj0pZpyrkFGCgTIbQ01fq8NLfgrwEOAplQZt2WsBxkHZwnA1xIyk3Vd5M/21QxlJxrNuo6uteFED44NkHlYgeM42tY1GR2qwaXmCTOzeRcMNtBo1J/VA28GWdE00nlNwYkbg67Zw1DqLnLthvRAX2UFjPZtVxcXxD02hAx3CZ1FFwD3F0BmbQlO/za8pzHIBYVwgK7rOlcVoE6pNPT+tvpKiLrK9ftWAHhuQMOIWwCoH/ZScFbfR+Osfi4/0bV2COz6jVDXmgOSOvvHGvxB3G0ZaFkmK83ynZzoO5oAzullHT1rkrN6f8mplvY2Kro2io7Oy42j0e2fAewnXX+iDtJb3vIWe+KJJ+zixYu2v79v73jHO8zzPHv9619vZmZ/42/8DTt79uxny15PPPGEvetd77Iv/uIv/myJ7e1vf7s98cQTn3WU3vzmN9v73vc++zf/5t9Yo9Gww8NPH8KtVsviODbHcex/+B/+B3vHO95hr3zlK+2xxx6zn/qpn7KnnnrK/tW/+lcv+BnicNfCW0oi3UvAwQL6OKWr2ahxSyOC2Qoo18FDPlvRQ/Wkrh7SpKmGtD7WQ2bWaMsYdYn1si+WMW+mILkhZK7GoQIA0wVwQa30gImhBPP/QCZn/wE9GR8b6uZr1gFcG2hU6eaXZGzd1Xt5Wf0V+rc7wLh7qPXyVV8zdS2DrNxM39v2A3DIjjU7Zm3AogG43t+HTsMZKLMDZrCcEyu6vsshSE4ULci2gU6XB1I3Y1edRKcNzOtz8JrgkE0WJDBhduJCI0NDs03d+kjGCtO9DjGJ1a/pXG91dC3s19QZbUDZ82Fgc7+x0Hey3gb8UgVERaHcX4MurAAwNKPNtozFoQYW+UQzXJ0QWNDhddYqoP1V6mEe5BokTrtqizaAQCiHbOBxANQuFIhC9/E5Rx8kNM3KHZ7XtfoAMPgvrwC9CJSwxkCqm0X6uRwIVl0g6U07+o7OJlodcsHO1oBk0nE1KM6ALykASMEDd3CH5YVn3Hpx+/Un6iDduHHDXv/611u/37fNzU177Wtfa7/2a79mm5ufTutfu3bttozR2972NnMcx972trfZ3t6ebW5u2hNPPGE/9EM/9NnPfAaw/dVf/dW3/dZ73/tee9Ob3mRmZt/zPd9jq9XKvvd7v9cGg4G98pWvtF/6pV+yBx7QFO3vdz27OTX/lvLHo4Ear0ZbpzkFIb+HB1AuAI6iDDZGdAT4pVwN+xzwC3XIEsQpRPUbujGClR5uwUwjL3eoxrUyGsnYdAGbGRyQBByGRlcPwecPdA6GK2BzPq/39wpgyG1u6UFEPChVD8oWQChnO/ocRwC0/uVTjXBDT9fLY0dg5Jb6HIWnc7qEduUVZDpmxOHzvJanSwClBxdBEgdgGHXAOZwAG3YMpKkVEOwlxt3aCPTygNBw2mYaEjfQ974c6LyCxrN14HeiDPQAL0J2gliQxyBqCw0Kl32957j7mIyFG3ogrwHI3DzSwygELp4pcMzFcH81kH9ZA5HgOtL9tbmrcz+bAVi6OpKxANrJNpYgLptoFr0EjrIceLx8R52h7Jpm20Dv3CogEB0D6WoC3a7nfA2Um6V+bgHZ+0NX32UEpcIkAtA34E4HuXaKV471vfWaeu5BbGWHECj7U91bozuyStkKUmNw/b8KpP35dE0mE2u1WvZ9//L/Y1H1psW61lMHpAddMF0fCPimegCUI/0+qmUv2nqAtiCrdAqpxSBVJ6Lw9G+TGHRvMgLY6pLaWKjxT+CAynw1IuPnQIgX2Mn3KnoShaCxRlLOdQAtVoAX5FHA7jy0rb/bSKA9GzSbWrA2ohRS8Ut9R5eBZflaXz+XRwCaT0Yy9rFTHet56hS/5Hxbxlrg8JfALTMHQUwHuryWULZIwIHrAJ5kmcPaBa6VSqnOwmSszzsDp93MrLatJ1lloaVGdwTltLZ+X6Wt770JXay5p/ezhoaMZQlM9aCxFt8PGR+gHq8u9Dm2gck8XQN8AADUNdAGWQNmKIUsQb4G2aO6/q0fKDbGqekenuRAJTAGfigAVQew3hZA42BVzYDMB/Ac0MhQHbdlzM11z+0BvKEDjZCbFcggAfXHsqIB1wrOnw7skRrcy/BU34cPpKSLGlBUgIJNOAfme6BTyO9w/larzP7JD/yrzy+Q9ufjlVUzc2/pqDo3VKN7+rwaUq+jqfg2qLgvoKX6LKg9r4BEy4N0cAW6P+Zr3QQJrIwgB9KwGUhvBGowAhABLVu60+JDENO9CGDOY91o2xDaVFt6L8ST40T6jkpwSp7x1CG8caiOwEMd/d0L0K98uAbRWIOMDzjes8lIxlpdNSxV4HiZ+Oo0fcPuYzK2qmsZz52rAapCpsmb6u+GVZ0D904EpZlBA6EdZRpAnKxBPw7IFRPTCPeo39b7g7n6oipIOpjZGHAcJajWWwfwFB60+Z/ofR9BptSDjrW4A2WPhtqT9gzYun1d59NMMyUDX0HLh9CZ2YYW9yaw0hddfXctR/f68k6ZCDPLoAmiN1DntB9o2sGPdC/54ND4ETRLBJAOdLUseAqZnNj0HTU7sJcAZz2u6LvsEfvEZShDtzQDWoCNblX0Xlor/d28oVigBBzRk77eS2UHMLrgcKVd4A7b0XfUHKgtD0Hi6OgOmZKUFBHguucgvcirsU6scst7uwGlqchT19eZ6thopTIMbk8NexCTTo0a5hK6RM434OTZ0N+YJIpbATFvS4F2frSAzgzoVPKXwAa4o58DSgyLdxWQvXUEpHUzNf6nDpTsgNW3DVF5qP6R5RM1GJ9s63McRIBpSPRvtwHrEdwHJRQQtazM1Qh722qUFrE+22gNJVRgcidm+H3QP4o64PgAJ1gBjvL1qTp6QBZtjqeGbroA7N0NPTw7EDlWANRbXcKhaGYt4Ao6jLQ0EEMDQI0cQNPsxA4Iea6OgWst1mzCLnR6jkDWxSoK4q86WjL1oOX7wlldW6NUs0U3euqobMNj7NfU2ViD9lwIuoFPAW5t2QKMVB8OR19tkQPdnwWUBbdWbf0cyJQ4VRC1hjWYtEAKCbBoGejl+ffps3UaYI/BiZiG4NgGaitdKLHvuG0ZazWAYmWl76M1hprirj5bu6ULpoC9mQC/1pk7KicrYheH656D9CKvy9nAwltSeiG0MSbQUuhvAvM1SHlcgO4Ui4EFFcCXG3VQYu6rMVwTtgNwNTlE4UGkmz4Blr/ifkh7guOTHkCb9EwN5Dk49CegYeT4+rubUwDIn+jBMQWh1rMXIUIDxtjsmn7fYKXvIwXOmBSoHe4D3qKYSPVIwqamB9YO0FEURMgHa7djul4wWwlCpkcw96KTZGbxSOeqUlWjdgTUE0Ffo+OH4HBamEa9OdS+9gAQbGYWwEFRAMA2AyAuNcZVfMBdAXN7GqpTfQZoNJagB9gKR3p/a9CF9PQ9rSGD9ylfnYgY9IK2oev0GAIQb66OXifUY2oE3pUPsjONsdqJAGhD6hD9FZCV98GJXULjSwG4KXeqv+GW0JQC+mcuyNVk4Mxvg3RAHeg7DOgAfAi8c+Ba2pnr2CgE9QtoTmrNoInkEZ2/1kQ3yHiqCYTaGQheIOtld5DCOhB40HXPQXqRV71oWXiLmGIIYLXJEsj79hTNHwPuZ7zUg7EW6yatL/T7Zp5ulnWmr7yoPiVjQV1T2B6k09enmnkJQo2KVifQVg7f51X1nrtTjT7HfahRA4/HV9ShE+2lujm6rvKbzPbUGRoA2LcCbLinm2oInjnRzTxMoCwAKuKXn9N5eUVD536jAEoJB9hLAZ8SpepsbEBX5gwMbuNQjaE/BSkJmD9/BQc0dKftwzoIsrb+KbT5eonOVeNU17i/1rmf1KG0bGYhAGdrYHg/CWSFZ1r6nctSy5lRCkzhF+DQD/XwWDvAV1VVSorWEIDRVX2fLtAgJ6BGX7o6L8vWSMbaGXCZ2RUZG2egUZcBuNnXtVr39J6jQPfIEPQeV0DYCvKRFm/oc6xDIDWEBoWIMDRH+rthE/ZwqI73GtbfGPjcfHBeVkCxUEbqjE+oCrECNvGh/q63oRPoL6EiEmvD1Fam63kyVIfaAdzUcusODBJ1TsB1z0F6kZc3W5l/CytucqrGeXel05yfhS4x0IHKZrox3CGUgyq6uCNI86bQFRf1dUEdZ9BxsQL9M9iQEYAb3bPqINV8JZnbOgOaaFDmGdcABL2vWbk1cUY9r5u5qOkm7ULZMpiqwWhvqbHehcan+zd0To+h+/CpQ8V6zCEougFK77OBPls91bnaBv6QJTRxrodqlJKl/u0KZF4yXzM06xPNTPTbIGAJnWgupOzjSVs/tyYxUsW2LUHV3S2BlyrkLrYVtLes+noA1KsAlnZ1/08qgKvxQTut1Gh9HcF+gCB5OVcnrIDsRAsyIMT3kwPOJEw1Q7MFzSEu3N9J85L+BmQ7KxBcVUGrbwVlRr8ALMumrukONA+EHXA6fa279yC7k9OhDGXyZR2yRTW1HVGuG7YA/KIXgvFoQ+n8FAhNwXEsQCdtBffsQWm0aOpeql3W34hAIqaAGDucA8Y00bmqp7c7xcsFBz13XvccpBd55X7D3FsWQjnTxTOHDbk81gPAO1DjmhbqvFy/pot7sqV/G0Ppgrobo7oam3MgUrrb0A256OhCXp4fydiDc7WGJ8EnZOyTl9UAzYa6MzJot63M1Rhubus8339BAfJ5rPNccZ+RsZf29P0eQPt+AZ0UETgCrbZmHF+d6ueuNzUSXg01u7Aeq8G4fKr3/Pz2FRmrPaPEguddjbadC8DaDJxH4RT0BkH6ZXOsZqi/DR2OIxDTrIBC+lqDhZqnWarMg+wnZERGJWux1UEbKin0kHbOqmNx8hRwcVXV4+qf0bVwDsqZVtF9cwR4r06iGaRsqkHTHEDLK3AoWz3o+oMS+wSSBD40jIA+tIXEepzoOt8G52Cjo+s3b+izVefq5GTQTRaA/mERAC5pDvQspa7fFmTMh3WAAED5tQHH97oE++TCmgRh7zbI8RTQRLIFItTPArdcI9f7q44AAxeDkoOvdjGBLsUWSA0VKz0H3MYdTvv8bqRq7zlIL/rajCpWuUWCZLXUVPcN6ESrFLr5khrgF6Ar6Rzgah6KH5OxKrzeGslsnAI+AOrl8za0/iYAjARuj9+Gjpou4CtaDUhNe3rgVaHNdwqM21sBCMSC0+TEwJd0Q//2Ywv93Y0EHAGQxXDKkYxtjvXAOr2kv3E/aB0NO1CnHygmpCTVdF+Nde2qHhL7W9CGvK+/69G6ykDvCUqPM6BYaEOmsx4DcKer73ICc5DWdV9GoI01WKmjAckKMzNLgFx0AlQEBjyTHrDcR9AdtAV0+GkVDvM9nYfOtv7wCgDxa2B6cUCY2gIoaw2BDLSuGaRNaIzwa+rAbQA3WhWIcRPAQ5XAl9YAZvPDRN/xHMgtY1AJ6IOzBokmO0wABqFVfLsOPE2hqwD5SqFrI4OsYVJoFjIc67OlgOvKodxHOoK20v1V7wB+aaXPNm2C0+7pxFSALNcjhxDGCDvmpLc3HZXQrUrXPQfpRV5Hh3sW3qJovarrbqkAqZl/qAYtqml0ArJrFvs6eAzefzdT45WCZtYo1k0VAJPul5z7Whl77D4tk9F1aB+Vsd89/F0ZW691cd93/8tk7PHgq+/qd+/2ggY9+7FUGcGt0C3zybU6ZucWepg84OshvdzRtbGxUoORBFDegNbfKUTWY9DkaoKeWlqDluhSy2QbkKnzgYXXuoBLgGxWGxiyJ0OQdHFBOHPxWzLWSnR/pMBmPZurE7uZamZyVgDmwszy+/U7e0D5sL/WA+V8RddHs6OR/iLTkljT2jJWvwjcPkP9vj5gcirAcr0KQE4GeLzyRJ/tHDgqcUvf3WSsWT2vC2LQflvHWrqOUggEDqHTcDSDdvuW2sDmCISka7rnKiAX5MG+Tga6NmqXdO7Thc5Bs6mB8nwJ2pNAYRA7Glj0HQCMu/reroGHcACyUXasz3sWND7PgNB1ARikOmTbKoWej6sERMYjEJxu3F52y4Ftn657DtKLvC41z1olvrnwTyLtrpif6oGXO1q+GYHUw4kBBmkANdu+boLzbSD5akMqtKcb7UHo3DlIfkfGdiCdvmMqIbJjj+nYjo79SV3AcW1vffRLZOzfF0pQ/5EPfUjGjq+3ZewyiB3HUz0AzQPK2Loagnasn+t09J3HIKNQg3bqVlMdldlEnaFFoIZvN4DS8kDXkJtotmIS6eHkBIrDCkI1wu4aSoDwvD4ouLegcpaazosHmU4zs8lY9+sBYNle4ihlRnBWnS5qPK4OdP5bgJextUbcLhxkLcg++1O1HYuGzvWZAy2tTM8A+AzIRZcDdSzcpgYMzQIyltBt2ISa3RJK3cczIMatq/10fZ2/flvfUQW62Fxg4W7EOvftKvAHjT8pYy1P38dsqY57CF3K3gKkd3qAr4R3mRMvFZSiHMjKJTAHzxa6wXaBQb7lQkMANBgNYdn7qWKa1r7a1OwO4fZsdS+D9MdyxdWZxdWbG6610A15BtqdD5vq6QfgbHSBSbcCGzf01fFZzhWXFAJb71lISWaghbMGDrxfP/w1GXtFRxfohei/0T/+f/n1fx/+lIw9BxIdC+AP6Vf0nV/YUocmelRxXfW5riEP6PMXUNYqp4B8BaqIKmQ6+0T6CaXCZqIO/yjRdToC5PYm9LfXTUsKQQadRhD1GmAVqrnO/Xiu7ygC/bhlokbdSbnGtuzqHr4velTGuuBsANWVnWzo55ou8PiEOgZYeosMytoBBGEzzTR1XNAIrGvJbgJQgdZK3+espqebN9N5JSLRGkjlpBW95/FC11YOh3mt0LVwdqm/u4TfXahPZ+ZqU4C31vvbBSKvEXST9XNd+3FfncTxUn+jgO7UlgG1BqgJrIDjbRd4mnJ4b0PAtgbg8ruwUBeg43YIuN0dKNktO5oRDWE9b3q3H14LWBd03XOQXuRVD3yLg5vT6AGOwIWU7v1dzSA9s6+L4mSq9fxwD3AE4HG3d/UA6I6B/BC4Lpo7UN/eB8JBKMD/ItS3u9V3y9jDTb3nl5/7dhnjS41hAmW863v6HMMpbKoMTqyWGrT/BqRfJl19jn1glh6fqMFdDSB6B4kYp6K/kQBIuw48I1vQ/nw60XdeZEAol+lvlCBr4wJu2IcybTLTw32xbMtYCFpsjbV+3wCAyM/N1RH1gIV3DBgTwruMfdZt6oCu4U5FMwI5OEhrX+/xPjoACr3HY3CgfSLqrAI+aHRd7wXWrw8HfF7oQbsJz5ZONWNW3dd92KnQGoRGFZCJ8aAs4wBDfq+mGbhxFbJFS+AyC9UhbE7VcZyA9lz9QN/v1Y6u6RKA4IShGZJu3SEAmQGsX6yA0HTRlrE64PnKRNfarAsUIZc1C9wg1YYc1imUbnsAfUpH0I1HTPMgL5Xb7e88d6C5A657DtKLvLxibP4t/YfuIVCfA0pzPlajdM7T0sUJRB3Ti8CJs6X12fNk7NXuWXwA0diz0GraUcPS7OuGrF3Q54DmIHt+oE7i/3P0XTKWHkCrMxwcrToIU0Ib6CbU2ueBOrbdkRqlvaaWsC7dD+DGpwHX4T8tYzmAtFcO6Kn1QY/uUI2wC1Tf0DFr1RDy1ZTp2NdnOyraMhYBn1MHDG4OnSg5OD6B6YI5hs6gCAKDTk3XSx3aegceOGvZwzLWrHI6Pr9f53oGJIndY30BORzwQwikkqo6JVVotx+DCG1ZwCkDRIKrHOYV6DFWgPs5OtJ90zkCCACAcw9XIxlb95W3zAO7GLZBJBtK0zPoimtmui5XQEabgHxLNNfPbRy2ZWxvoI7FBsAlooba7Tp0FZvpO3K21G5nDmBbN3QSdqZQLoXy8qqlKbP6EDLXD4FQMJSga11d9zWQb6kYdLHmgEFq69+ufQ2AR3fgyZbZvRLbH8s1H3atWN3cNHUPFhl4B36q2Ziaq8bB39DUapZrtDM4gIzPRBcBOVxJoYbeBZK0elsX90PnvlzGXvnyL9O/dbW13kwP1f/y3P9Pxk6XwOo9BKr8E40wBqVultOeHsiXKroVbjhqWKpzLR/s/SoASy+B5ESq8iiXidphpe+3UQO5hVfqWgM7ajbT70uO1TmIoQPr5Iwa9eAQnFNgBE+g3OwN9KBsw144gRKA0ySnAgwdcPM8BRIb20BA6O3o++1DmcvMLLuqB/IQGICvzYCjDGpsLuyHxql+3wqwZ5nX1s9BJmIB4r7NmuI44q6+4zzW+bo40z2XnYEMck2/LwJNv2AX3slZDUoKYDwHM2bLij5vAHvpUkfX2xT2q5PAeuuqI/Uo6ELmgF9yQq0Q3JhAOTK4JGOnQGeRArP8fechOoXO21Wk62qR6Pw5ADOowBrqVD8lY9UVYGpL0AKF7CyVpScABWmc6Nj2HSLZ8+Reie2P5RpmS1vekoIfuurl7gIuZAqcJwPgsHDAk66SACO0gc8vAGvxHCK5YzhVc+giaGi248v/wjfrb+i3fY5LsSxbW18hY7/+X/6FjBUgXZBOQc4kVIO7C12Azw4UfH3xJTpX5UAzUlnxnIwNL2uZoXlWsVk7LXUOnh5rxDw0tf4XboBOUgs4d6BVd7jWQ2ILeOxiiKxjT+dlPlIOJQcO/A7QC4xiPSScYCRjPvB6TQB7s67o+43bGm0H0Mp+AVizjwB7Z2bWOdLPJoBLigDknQMprA+ElLmvDPmrK/pOzpJkO2CVDFjG85U6suNQF8N9cM56FyEbM7wiYzX/koz521r+WgLJ5HIG4texZmPrkFlzaorxa4X6vDEEhKuq7q+gpXY7AwHrcU0DwrSp9ukqSH7U4bzYjYCMtqGB46zQ73MOgO/LAaqIqf7tRlczSIeOviOD7GJjonM/hGXqz7U814Ln6C/VfqZAwjxuAI1Levt7W8D303XPQXqRV1KtmHNLF1t/X52N/VwPlATwD0FbO+CyVFPY4VSzSgcApktB1+gAOgsefkA9+E1PjcgDj/wZGfvlpz4oY34bqA4CvRcfnL/DtR469Ud1AwE23IK2joVLzdCsDwEg39VD7OljPSQu+Tp/LV8PNs/Vd1m7oof0GdCyug4tqBHQ9p+2dW0sDvUwOQbD4gO5XQHdTK1T4JY5p/fXAwHbmqlR76f6HNW1/m0SqCO1mqu5OgMAz8ugpO6tdV52OsDQXgHjX7IWW94DosjOJRlbjHSdr+s6h2eAqiMMNIvRBt2wJRxarabO13KiB1661O87U+o7CVJd+14BIH73S2WsHULWOwW2ftiH7Yrajmilh+Cwrc/boIaHNgghQ7aTTsd1DealAlxGATmnxLEFpT2QiCkr+n1TOOSzVDOgyXnodoPMSwRl7SzQd34GynhZoNmiQa6BfA3UGMpSndMJiC+vQAg62tfvq7W04291RwNUkt0Tq/1jufaTuYW3IFQX4EmHdT0UdoHS2jE43KB+3K/rZqkAjf0c+Ft2E93MzkAjkbQGYrCXf1HGovs0q9QFwGh7S8uHL+09LmN0zba0A+4nkv9Nxp55VnmVGolGMWsw6lXITtQn4Ozua6ZusaljtbaODUBMdxRrxvEiMCVfz0C8daDfNwEhzh5wadVn0ObbaMvYfAkH4CEIUm3qYTcCcruagdzKSoOF0vR9hNA4MJupI3om1Oi93ADg5j40QEAmNoCmAzOzXk+zhLUTfb4q6KQ1A/3Ow6W+z+FYX167AjhHEPJNM93X7Q0dm9fhOaCZYwpCtysIDnpACjkHcH4TtFpaCxDnPQf7FZzqzQDwVWd0HTUreoDGPQ1Uao7+RghRGGCRbQ5zv5rqXNWXuq+hCm3Zdb2/yyPdN7FBcAWl1uqmvqMwAnsCHYmtQrP31R1dpz3AieWgZ7SCoPMQ2FkjkJyqXAK+tAlQRfi3ZytLH8BpcN1zkF7k1dyfW3RLh4F7VhdK4elGu9HUsRi4JOxYnY16DVLigMTNTA8yL1fvfwmg1hnQBjw3BQbV39KFvLEeyVgaavYkrb1fxg6hXTyFDXnfVKN6v6slu2ugQ9TdBTVvYNK9L1OCyhycoQWUKEoAllZHUObpA85pRx2LEbyjy2q7bOMlen/V2UjG1u22jB2MrsiYB4D2RaTObmY6dmOs5SEPMENBXddkCE5sfAqipckrZeyxmgYBXa322eQCgLmBSLUPZRAzswPoSqwDiB+Sa2ZAA1EJNSjpQLdmCS3feaY/kgIQP4LmENISOwUx3Uamjl6v0EaLWghyJi3NhM1B6sUCfbaKp3bRcaHjr6HOxhpYpDMXutPW6rwEPjQt9KGjLtD14Wa6r11oaAkqaqN3oGe+DpicM5H+xgJ4rpYAmk9WAHLfULtTByzgXqZ/24USZa0JhK0lZPQXoHnn6/OehoBL9KGzsgtyVXfgSYvyHgbpj+V66aXzFt9SYktg4usdXSinV3RRPJ/qoRpU9YA/OQF8ELSBpo5u+lNiWoUIowZCtx3TlOlkpQb8ylrLfXsLjax7INS4qY9mxUqfYw78NxYSeFg/Fzg69ykgAD91op/bgOxOBlnD4UAPuyjVjUuY6sVY73k9hsOupcbh6ACYanOdv0eWei8XLiorejHTg/wUSh7n1xqVP7YLTM4TwBHUQZ0eDrGZAagXOIrakJ298ZA63l6gmaY1ZCbOAUmnmZkbqlM4n6nznQAmal3qvAZAtrkABfOsCwLRwDmVJcCxE+redAF7VoFstg9kivO6kqmuoXTugXwGJJ8sgPRJAt24zctwwANVRw7l/vWzum+yJuxXOB1PAmg1n6pdnIOKgWtKsQCcq1Z6es/XFvC7QLvgXNI9UoQ69x1H7zmMwREFrqUHIZD3HbXl1Y7eSwJlvBnQx/gV/b5ZT5320ad0AmdNtZX1+263qe70noP0x3K1N+dWvSW9vfQVeFg3NZovPasL/lFPI7SjpXZILU81k5ODt16ZaLQYXRjJ2OlaU+zpUqNKki6xGOrR0G47AjzKZgs0gmDdNgF8uV7osx3EuvkaO20ZI3jARqmlmiGoUfuOfq4EKYQa4D8mAIzueGqArsZqHPZGcKBCR+KFUv92tw4RX0dxXe5cjU0y0OdogdTFKtJD8ShTJ6eoqgPyqcUV/V0ACXslZDWAmTtqqWGu+nrgVyBz0gIQ+XgBMipm1oTmhqAKXGHgsAU14NuAIGIHzsU8gg7Opc7/BNTPG3Uoj0bAuwW4ySLVdeSvNUvoevq5DByuGtAfJAFghsZqFFZtyOotwdE+go6/bV1be56+ywnoPQbAu9UH3FQJtBLuNdCjiyHDBXQWwJJgbWDrTkkb5KUv0d/o6zw3FlDWDsGZ3AT+MPhcqcvZUlP76cS6hhxPbVvnir7f89s6McdrXePt0e3e+BQY1um65yC9yKvVmFvtFuBeIwKYPgBTFzFwWAC/hHnq/TsbunGjRH9jCE5TMgAV5/xj+n25diUVYPhmJSw0MKRnQj18S2CqDXY0PZ9MdRP4EN6dga6kxXNQogSwpAtYoCaUQTxXMyVupPe3gMOpBdRDUyi/vKTUZ9v5Cp2XRV/n9BLU7v2z+sOLhZYtJ66G9E6hGZ/eCAD319Won27r+3geZBQWfTWuaUeDip0z6hgM23ovJ5kexmtgaO5CpiPJtHS7tWAtthQOQS+l7kD9zv4YtMQiwD9ByW9W1WxYtdC1Wk3h+05AsR3A12vI0F7PRjI2AobyTcAgbj+g69IznYPBVN9dDDQQ/ksBL9MHgH1D788f6QGaD/U0T7qv1u9zdc/1runvEpP+qq2/sT3UNTQEId7ggbZ+DpjvvUv6ORf0LVshYNHA0bNC9+uZJWSpTtS5Ou5pxiyvQyC/BKJiCE77gdbJqZDQ9CCw3bh9Tn2oNtB1z0F6kddW8KjVw5uGe7nWN0YMtNTuCJlV27mqB/d1D4T2Vrr5AmBQdkGmJIJOjxzq/ouGGrSWo5vZdYC3KNTPbUCn0ikYeg+ics/T8kbkgd5Wpo5jWagRdoHjJVrrpq85UEKNRjJWgXLQGuwPaTutwPm7AZIzyUyNV3hOcVMhZAPWM83ubM7UGRoA23EfwJeDQJ/j2lANZAHcNzvgSC093SBrENitRPq7Y+At8iq6nk+PdF96AHI/grjFzKxd1+xup6Upn6sL9YKJB60FenGLGORH9vQ3DqBj9fpaW+FXkIqoHAFTuA8EfBXo5gMZm8lIdfQOQSrnQgUcKWg/Xz2iNjD4mL67VQeCnOUV/du6fq5oakY13VOnfwodkt0WkB+6iudxQKh1CXxT94302UYTDV7CUIOc2kCdPxecxKkDJXvgzEhGejb8DsTEdcDU1oEoMhxDssBty9iwrfdyDrBPJLfkAPP96vD2+5vdyyD98VyTwdjWtwCLQ+j+yqDuHx6pkauBTlIBuAS/owejzSCSq2m048BidBqvkLEcotkLIEUxB8dnUdHf6OZqCFKgwN8F/NJBomXL7Q1wQFyg2Xd0Xkqg4y9B2HMX8EYuqJwnPchsAD9PdaLPewrftwU0BL1AN/06VGM9PwBGZZAGWEPZx0x1xPxMy7lDyBAeTMHhqup7c0HpfbQATE3clrGkomvDUmKQ1oMjuwb8RD3Npl4r9V6Ikd7MrAZyKFcuq0ldAC3C1lQdGg86906hi62+o/f9HLAknoDcTQ0kRA6gJNkYqbPmFOp4Og9CQAOg29ZCHd4ZAPC8hwAPOQcBcBB+LRJ1LE7A0V6egFRTCN2aQE1QhZb+BHR2INFknTPqhFWgaWGa6F6qLnWtJbCF+0DjMgNs68FY94gLwYvb1WDSW+n3nU1B6xCkbjyQLlrEei9bS3WkZqDtFhZqt2sggnwjvr1KMnMgyQDXPQfpRV6X86VVb8HmnM4BNAopyRA6FXJY8Oc60M46UyMy7+qmykLdLKuxLtBKrE6dkyhxYgYAu7qnnkB3pM8734X2UyA/gz1qvqdGOANQerX+chlbrPVvXSCUbEL0NN0AYG+o0Sd0whtkv+1qTQfPDdU4XK5B1gtqdt2mOiCLY/2+8VqzbZWWGv/DTHXE9kCqxQUHKa6rKUkgmxpNAGPigGEGYsztI/3dZVsPxbqrjl4Imb/A03mug+ZdGzIdZmapq4fbxY6umaMxkG22dW9WUz3MayAXsk51rl8S67y+Bt7JZVMncwLYLtfV9z7p6zs5eAqkULZ1vqa52qcFsNe/BrpsXVfLyxbrc/iwv5wpZMIhs76GjrA1dHqFsG8yCByXgCNMjkGLERpVrAaB8lzvpQKA8RgCLsvVzloLSHChzb9IdGwNWdvlZf2+q8B8H8L9uYnCFg5Bo86DTuMSMsP1pTZVhHu3v/Nw8TnSwndc9xykF3lNB7uWL2++zAKUjudNXcgz0FjKBnq45SDu5wDTagmstAYRXx3EG6l0EU5B+yfQRZVeBHKxjkYYeaSRa7zShdxpKaDQXarxPwAgngWa7ehAZOiOdA6CBjh/qTqT81IP1e0LahyePtGxrbHOXxI+ImN+qp+rB/o+gKHfiqYa+qVzRcbiSA1uu6LR2A5wZJkPJdlZW8YiX2+w9zB0JEFHSbmp76gKZR8PeJXiY8XP1c/pHiygJfo40L11AsBtM7N6ruaz0hzpPUIGqjXTPQcVWKsAP9ewAzpfhYKR44U6868CTMl+E7As5VMyVs5A+BVKbKM+HLRNtW1rJaC3BRD4re7XsWFf1+/GHLpna2oD0zpoxU3AtlWAUBI4drZr+rf7QLDoQIC5C6SQ3kzXpX9e174LWe9hpn/bgXJumescLBIilARW6ing50D8dQkEqxlglbyH9V52C3Uml9BZ3XD1d6eQJQ3D29f4HKAXdN1zkF7klUUH5lduOjuFAUkaGFIXcAkp6Cm5M90EfgO0uirqrdeBG8UBRtYc6Np3eurV+2DCZ2DQvA60CINKblABYUXw7AsHSnEbwLWiPqdFS92k7S8CYC8EcseJZl6qC3XMTgHnUIf0/PmL4AwBcWec6GFe6ennDqGsmk51/T3u6VzlWwr+j0Djrw1ikPkDQHUAmKuTIy0P7UyAffosyOTA4RT40FGXqENd3VJDf0y6a6bPtpNp6cZd6TowM5uACPXOUu/x7Bkg5Sv0sOxCh9T1hu7X3kr30gQcqRLwMhNgHo9NA5rFTMsUYzjM29BR13hY53Uz0EP6qK14r1lD92Z6Q53gSVXXzMzXQ/URkBqyVNdH5qptOwHqhHQbcESA4Y+AZHJc6LwcXAJcF5BM9oDCoAb8S41EnUQPsu3TBTT61CAzFIFCQ6BjJZB5bQBbRAABAABJREFUbgIBbLsFxI5XQTPU1XuJQ7W9JWTREkgqHN8BUVgCZIGuew7Si7zC8KKFt4C058C624g1s+FN9HPbDtSPwRDUmpAeXEGmpE3tpwos7bU10l+aLuQ5yHb0NtoyVs2A4KSqn2vFQJ8PQLy8p8+xBcKURpwnVQUeTxIACIF3tT0YyVgV0rzXGiAHU4WINAD9KCDzCxdqgObHemDV1krS1ysU51C9BBEflBmqkNoPyURABrPdUGM4BTmNbk/nbwiZHCd7TMaiQtdfFkE50tdsSg/uZXKseysDtuhODCyTZtaZgyCpB+XbRA/p6YOajfkEYN7qh/p9MbAWj8CJ8IG2oXoEzgG0modNcAp1GVkT+JxyIFOcQYbW3b0kYxdL0CG8AOXlrtrKi1uKpWwAUeR0rL/7O5k6axFQiQAHqfWBq2ozBZsFuMkB2Nn1WX22Lkh0JNC4Ma3rPly6OrZzCUpxZVuGmmt9tglkbasTtYF9IBst1pB9BhLhMtEArp4BJUIfoBvQRFLecT6WkKCg656D9CKvURHZ6pYutUaoKWw/VGPoV0FNGcpuDQC/LhYjGUtzXXikVxRvA8B7qQtqDeBQt6ufG411CQUBtNEugBOnC51KkMlxoA00BdmJ0DSaqEME6S0A0+CrYYbMOcTaZt2xGtd5B8D6pRqlK+B0ppkalo2uAlV3N9UodaB1PQKD5pquyRmYgxtzkKsA5+oAMkh1wIQ8A2WGBmRd3UI7oQa5/m010XWfgUI69azMQfvMO9QMRmiQmjSzOQwHK9D5A+BsnwhHhzp2AhphBYCWnQXgb7b0nXgN6P4ClvYQyijnIQgrgNbAurr2l5keZAG09PcA02hVEMT9lD7vyTO6l/ZqwPHU0f21bugh3ct0XobVkYzVs7aMBcCkHZrupR343QGUX+cgxxHt6FrbgXXVBRLHAPZNGirutASogNV17iMfMmYpBAZjdbxdKO0Vvs7LHJzxRltteeiBDdy83XbMZ+Dtw3XPQXqRV9ycWFy7+UI8OOHDumZUUmj9z0FEcWZqHLbaUGcGsFo1BZAccBmNm7ohnRjwI6HeS7WuqdAaZGiWxFQ7BqK4mh76EUQdK8AbzAEfsMygRAmhcAjZgPmmZicSANwvAfi6XOj3LdpqmC1TR6UJ3Thhobia8Vy159JCjdcGlGRT0ICaAOniOgI2XBAeHg3UeVll6uQYzLMzhvJwS6P3+lk1V7VIDeSRqQPnQ1wQwevw6romh4BFMzPbgBLHuqVr5vhU12/9shr7kxK6EufQvQjYjmZTnaYFODlOqWurAt1kWVXnetQEBxWOkOxAJ5a+rw52YhxD5soB+o5XgUxJCSSfpT7c6ATs0xQEmHeAT2cPghdPHbODNWTMFprNboIclAHPlXOs9rMBe31+Xsegp8c2OnombQOY+/KOrtMllKtLyNTtQrCQLpX6o+oBHQ3gcUeZOk3DLZ2rXQDzucvb78UFZ5+uew7Si7zOxGetGt/cmN5QX9gMMjRFrNkO71AXQK2tKdhwrh5ycALq1pFmDnYXalwfqagh6EPUHNQuyVj9UO9vBsK08YameZOBOhbruS7c/rY+Wwqt8KGjczoN9eAYBCCiCo1K4VLvuXsIIorQsXK6C0KXLoCvUwDSdyGbVeghezLTw3wAGaQSMlJQybSkr4bvcP3bMjYCh9Vp6/eVwNe1hPIGJJrssWSkg3taOhtuKgtvh/TGIAu53NDfmDbgcAL8h5nZCN77bKH3eARA0mMAGVdBKDcC4dcEKIqP+/q56lTXW7etnxtDN995aM28Dpg8d1cP+DmA2psz3eurC9CJmqvzHQNeaw26hjVw3OubIGeUAX9VrrZjPge5kAKyIiAeXgAmjFAv1ydA9zK8ImN+6ytlbFmqwzU81bnqQYbQvabr76in89cFhuwWwAKGoIpQgjbeHAyFV0LQXjwsY7Wq2rawps97APFM7Q6tuKUHDJNw3ZWD1IVSyH/tchzHfvM3f9MuXlTx0C+0ay/cssotnEFxoMarEUJXQqyLuwjUay5v6IFcQKtkCVFCfalaU0dQbplCua8G+lgnoB5/cD+0kK4g2jnQjrXS143R8XST7n9MMxF9YIcNAFA8TvTZCmD1XgGXSW8Khh6aBXemQNgI5JvBWrdbk0qK4DjOuurkdEDKYxFoZDhKgMkZMHCR6W88fVnnNKpq2aIK5UNojrS8Ae/N1/u7Otf721oBoB06fuhWDJzT6yCjUlI5raDCqtmz13Q/nEwVZ7YHwO0UsiIJSG+0oHS56QBOB7jHvFR/IwQsy6uBB2nW0XnYAAHrNTgvO9vQcdkCDbgBZLgS/b7Y1W68NWRelltA7DoBodYDIMtd69wfrPRemmud+8m+Oly+r4HZOAddPdhz7briDStTEH6OAZcIzSGTpZ41WU8dhG6igWMKDQrPGGgTnlG7nfn63urgtGcgEeOfBWxWqcLUxUhtbw0wYbtnbn9vs+gPEaQ9Go3s3e9+t7VagLi/4yrL0r7zO7/T1uu7u4HP92tZDq0sb0adK0+jwE9C9JokGgGtgfOkexFKZ/Da3LVGNlVQRL7U1HeYu7pAZyAX4PTUKHWBy6ixqwdZBlwXg6UapcMYZDs6GmVljjoH65nenzdUB6m6VCcROn/tImjANaBkN3C1X/lkoOugB513IVAxZIWCghd7aqhKwFcFa5BbAB92P1WDNs10nsfQAeNUdF42EwBkAsdLpa3r7zBXBxjoa6zj6eEUrwE1u1SgegzMufMGkNZ9AsQ0oQPLzGwVaLlguQBG5qoebpWFOp4psLk7ICC670FZG/bwWRAVroO2zTUISmyukcC5XSjBNiH1Wqq2I1GYVBrqzDdAJ216DdTZE91L830F9jahjN+HY+wEMn/HAFBuj/R9HCXahDPK1JmsOYAJXY9kLE/U8VlAt+DsWO3dzv1qn866yg+XAhzhOgCXD3x1roJtXWsuZcJSdRJbF6FTttbW7wN5KW/6CRnzG2pnDRjpTxe3O/xzwPbRddcltte97nW2tcWijXde3/Vd33W3X/t5fw3diUXuzcmubwPwEzzpNvBV1AAbM3d0zqvrK/q3NYhIF2pwhyPgSwFsQb0JSoPAQwNNBNY/UedlI9ZDtQ/K58d93UCtCbSGxno4EdDEB7bpSqKbbxnqHDwLGlruSI3wvAVivwQ2vaLP8TyU4qzUaDsK1QnLJvocewCWbECUVfXB4E7UkW+HwJC7q4Y+WAFe46zOQQ1IP08HkHFYaPbTh7Lq1NGMTVHX9ecDls/mGqQkUB5ajQCkY2YLCEA8UByvQVfd2lXD3pxDIADZxHMdvcftCEg5XTXv/To8C2QJCmgEuQLZjmICPEi+HtKtDV2r23M94H93DPw8E/3dNNSDOzmE/ZoD0eFA7c7eDrCdH6s9nnR0bQXHIMQL4tJxphWVIlCbGjT1DNkBioUo0nXgjtWJbYANvA6yTDkoEZwBrc3VXO/ZD9v6fXB/c1fPpBPY/42+rpc6BE11CE7nE90zz98RVKxIFR2uu3KQioLFGj/XNYWSwxfqtVOa3RqoJQWIXQJ1/AyE18KZGs3Gph4AFXhtPoGHu9CGvNYDful+SsYOm3rodyGrRP096RT0gIBzJltBW3mukaYH6vHRgR46HojGLgOQFTmG9QwkiVCZshuAAHwIgpHaRN9vDvpMLeA88qBeugRBVwcc6rM1cMwm8GxAFOetdV5uAGg52tdul81cHflJHzJSUFoKoaQQJDoHz3ZAXmZXtefqTS0BXO5DNuXoioxVtvQd+V1da2ZmDmQJmmtgHneAlTrSv63OIBvb0XWUdjR4GYAzlCVqT2Kg/jha6p671GzLWL8DGa6OzuudyulmZlXAvIzg0E/BtpUAlq4vdM1ksH6rk9+UsRs7UA7q62G+R8iSK/psXSg9+m2QTOroPDe6xKuk5+cJBC9h0ZaxXl2/7zKQOLaWQGHQUBvtTHUPV1Ndf1FF5+BgoO8oGWoWeA7Z5wNg8G6WOvfeUtfGtRI06ia3/24CPFB03QNpv+grN7ultbIJzRXZWjdk67p6sIuLbRmL1jBW6o/kIGqZTyBL1VNvvQlGsw3pVgfApqR/BkLx9jSUAAOarCP9vmfmyvdTGWopwy01I9AJ1ctZ7+qmSur6jjZ1f9vDJ/q7ZgBAB9r+uQuilvDeBrne37Sic99NoS0cyDwXsM0nx5r9iM/oPbcaIOkAgpjZNpQyT0CZHbAPnqPO0CrRuQog2RZDdic6VmO9BAqNylmdg21XyyXTHeZBqk51bZWBzvX9oM5eb6jTdUWXry1N12Uz1jXoD4C5eVvfXXWk7+TSgzpfMwD7flmsQZOb6ma/AgSBrQfAdpQgYH0CIrkAXncAePwIsKA/C8DySwONaC719ZD+FHAZOSBZs5lCoNfV9dYDgcYjIDXMNnVd9SB9EoDCQDIGhwt4rgqiuNhv628Adqzc1TkNAB/oO9DdG+he8qpqaHeg+7AELO9sob/bAoLknTtwBkv/DxGkfef10z/90/bP//k/t8uXL9uv/uqv2sWLF+3d73633XffffaN3/iNf5Cv/Ly9/GnbguLmS5q39TPNfRBCvA8ckJoePEWqm8oHfok5MUbXNRJJAhBbPIBuqDqkiIkkMdOodwBcN7H/cRlLV2AMC+AjcS/JWPQolJJKiLKgdbUH8ghdyJKWS1CPjzU7cQqyHcsregACHtvGAB4GUnTze/p9JDXSgO6jyQqcYui8mcBBtK7pOuiCRlUVcMyjTV1rAXTAhQC4N1Cdb2b6HGGph1gGUfRFEPvM51CigHK4D4BxM7McEksRrMERkIaGgA/yXd0PG7BqYgCV9V4B7OvkuF/QKHwHyo/XYz2MFtBR6y2+SMbqdd0jS8jgWU8DpJOZOhZrKMWP1vpsow19T9WFHubpQO1iVNPfcACoXgch3gyCyfVYnYMAmOpjcCbd6w/ImN/V7+sBBccaNBu9OpAzAqWEuwOiwCDpRDDHTdD9C7dBVihVMs9+oevAAZJOeG1WiXX+diFYG+/cXonJgQKDrhfsIL3nPe+xf/AP/oF9z/d8j/3QD/3QZ8HY7Xbb3v3ud/+pc5DS831zb2ER9kd6ah1e1ENmtqebKpjqBqqHajS34DzZWelh/nSsJ20VuuLipUaVyyPAwTTg0Er02VqVkYyNh3rwxJk+SASb3gccTBNI9VwAFDd9NQSzEMotpnNQQjS2qivgdjmHrsI6ZFnaei/pGgwpiDw2QTFhAXIG3hAYwUM9JBIAZO+CoxKR6m4TMlzxSMY2oGuv0tVosQXZxU3IKvVXYHChfFC0wXOZqWPQOR7JmOvq+6AsmpnZwNd3XHHV2UiAeyiEdvaXtnVvdgIFS+em6yiHYrcHa7prpCsHBLVt/dwInMz9RLGAbkU5u2pQhj4FBfjJOZ0DZw8Au1B2613WbNYxHJYBEPcuW2qfCL806Y5kbOGBXQTHpwDy2HgCXHUlaGO2dJ5XgJFcwdLf3APx27pmciau/vFFUECINyBbnKtDkwPe7QScoR7gFwNw4GytDlwFbP4McKzVOzK7zuzuXJ8X7CD92I/9mP3v//v/bn/pL/0l+yf/5J98dvzVr361veUtb3mhX/d5fxV7gRW1W17IUEFjbktfdqOixqEExrYctLVmDVCobuvhsT3Vg5soAq5C5sBAxwkqhVYDLpjSdOOGIMBKNRN/C4QkgVBulaqRK3pqgKZ9tcxdIGfMN4meHjASU3U22tDPXkDkD13NyPRdA7LRKmiYbUA3UwrsyS8DRuDuGQC+j9VALgaqHxcA3sWF8sbEU6xCBnIBcUXnYA2kmm4AWB5wYpswf82WvqMTV/+2oj9hGWiumZmFe7pH/LOgsRjqb7tQ0ikh6593dG92QO8iN828+sbYqTsvSFiaC3xaS+gwXUOmKQHG6Nr9uvi34JfTAYjLvgRqqzN9J4eXAPgO/mAB/R1deO8rEJxtggMc9qGjFvCfJchGrcF+Lvs6pxVwbP1ddVi3R20Zq7VGMpaWgOEs1KkDFSBzPqk41tzR97aoa3Bf671Kv3BHM44JlNgrkLl2gPTTqeizNe/QmfOB5JWuF+wgXb582b74i79YxqMosvn87up6X0hXvVjarZ248w6QAcL+9mHqtzf18D3zRbpzSRB3cKAr2QWn5PpYo97INPKq19SrP/+gjrVDja73BxphjOb6HO0alGocaMfe1bm6tKHPVia6wXugMv9spnOwsafRSQHlm8KHlmPozOgO9IDpBfq8GWDCAsAWZTlojkEL+CY4Gw4A0AeX1YicASO8No00E7AtK4O8e0UP7QbMaQfKm0NHwdJNYCxPt/R9dD4JApaBrt1NT9f9BrTa+3Mmijw4q4fls1U9LKtDzZo96uozb0G2+GSoUclV6Oq82AB9tkCDtXGg+2vitmUsBa4bfSNmAYi4TGeAr4T4aLKGDq6K1mr9XN97ew40JKD2HoP4bbWnQo4uZLPdDEhcoYRt54HOAlikJwv1zA5BvMiDjCXg2c2eg0B0rUHJEKQ31iRDBRWH+Q1QfADAeAuakyKQtcmOgF3/OZAu6QA+EDCmIyB2jRu6/2P/9nvxwz/kNv/PXPfdd5999KMfFRLIX/iFX7BHH330hX7d5/3luB1zbolEzwRw4CUQZfnQZrjS1/HcVSiJgfxApf1JGVv7GrHsrECDa0O9+vs042zBKRwUVTWkW1ACeABAd/Mb+n1wRliyCdYVaANuDHRT9Qd6f2FbjdIYOgi3K3qATqEc0ZwCAaSvJZ1FRZ2I05VuZiBPtoSMXF+L8iStMmlekbH7mmqEbwAz97yujtQmyIVswosbpxCBtyErV4eIGfSZXg4K3Nem+rxxT393E4zmCg6JAk6iIxD7NDPLgCn8fiCuDLoK/D6CjqthrtiTGKQUqnCoLsFRaUMWcwWYxg0ot9ZrOoc5gIJjcCibgAEL4f58EGoNTPnSpgU4Q+f0Ob7sRJ+3uAFSI00FuZ94GiA1IHEImGo7Wej72D+F1v+K7tcUVAwc0AbpjfVmbkCm7hpQf7gVXb9zwCB2J3o2BKZrN2zqejkdw99e1YCwBwDqJUirLBeQgfch++yrHcugzX9cv32dLmZ/RBmkv//3/769+c1vttVqZWVZ2kc+8hH7P/6P/8OefPJJ+/Ef//EX+nWf99dufmrVW0Q0q7CBKlB+yFKQ2TjSz7VcOBiBbG+SKRV9EKi3HgMRWwJlntEQFNt3oEOqos+Rj9UQuIDibZ9qJuc5kAsg1ZzBDAwQ4I2A786yQA/9VkcN5GiuGb3lUo1SDzA5B4EatATwX40ldImAdloNGIvTps6fbxodn12qIzA7hbLbJT10Hh6DcQUHOKwDjmgJ73KpzzHw9HkdwC+NJ3p/60SNZg8ciOOV5j9iyPYm0GnUrigQ2cys4ipxXQldk3NfnyWu6iHTM80wzAEHY5DFnCS6XxcgIbIAwHkD2OaXUNZOoay1hsPXC3RNF/QcuR7mR7OR3p8HrPQhEEACGeUa8l4JkBoG0PF7GQSxu5fB7kCnl9PUtX8KYsvpDaC9AJLEeUP38AbQmtSa+i6PqxqYFUt9lyPIqDY2FVNbH+tvzKCzZGMOzulEv28x0/PCAVjKbh8oKgCQm660BFi/o6PO/aNq8/+2b/s2i+PY3va2t9lisbA3vOENdubMGftf/pf/xV73ute90K/7vL9G4YYlt4CIp8SGfQZafICoajjXQ2a7qid8A7S6lokusjp0TeQNNQRtANguT/Re9ma6mXeAqba1UOMwg01wBK31tQpQE1CbL3AFzdbgSoFgb5FfkbFTRyPXoAIcQOcgsrmm76OEiNnbBkHXQ/3cADIWD2Rq5IKJHorTSAGywY4+xw6UMicgP3KYKSg9yC7JmDNTp3Mcq9PpAudO40ANX78NCuSAE7kEWnanIBGxfQpBiqPliCKFrFKuBtfM7Easc1itKEdUNVEpijVE0hnQd3hQkjRoqqhB2nG2UOegEQLL8FjHVg2g/gAQr1soYeN8qnNN4PeZp47KoqprZv8GZBgrahMyEMROoIMzXKrtmAN+sWH6t4saUFIAs3zFg1JrDplIoDrIqMUdIApLCFQSCA6GIyCeBMxaExjjs2ehKQXIaIu1Ohz74KyFNQBaa8xp9Zo+7zTW50iB93QX4CGTO+AIDmRw6XpBDlKe5/a+973P/vyf//P2zd/8zbZYLGw2m901w/YX4hX6kUW31DcLSF2OQKDvgtpMazpQXxqrcTgGKnXCL9gFALBOAJAdA5gT8ssNAG4uYGPkAEbsAT/P8QQ2ZKpGczbVgyyI2jLWjPX7iOisbKjTecbUIUwKyGIsAJdU1/dRyaAjEcoRdj9kFyDyz8eQEgYwcjtUR3QAJbvcGclYBTpqfKBxsC7IXwC4tg2ZhCVQIqw2IaIH9l/rQzu1B7pa0SUZu9LUjEhrCMSnIAqcQonHzGwHOndWM33HWa7PDMT3Bn9qBWC7jnd0rjdP1AafgFBzttYM13nQrisXWpqeZuo8NrZJOgYCwqnO/5ava6sCeKjrAHwfQoa2XujeXNMRBxn9ItDJn82hqxMwXJbpGvSAq2oR6bvMEpBHWt+d4HkPujUX4By8aqEBUgKZ8PFUs89hoWdIDPxGmxWdl/5Q5z4AHOFeCNkcwnpBUNcOQSYLpL3md9jjBWDY6HpBDpLv+/Yd3/Ed9slPfhrvUq1WrVqlttE/PdfDm77Vb+GAcKCrA/YySjO4MdROobNtB1K6fQC6RqDOXo9BORk0c3wg/kqgVDADMrU5MDwD353VK2o0r4XQvt9QkCtl7J0FGBvQeypzPZ2oM5CM4dYpQFVd/b4FRJ8riIDyhc5pHcDSxRqA9BB5DbvAjQLtURmUVV1oCy8iwO5AdqwWAZUFLIMI2msrAMgM2nqYrCBz0jA1/itvJGN1ALlm4HAV1DH1PCsDHILjGYNYaJABt08MmK2hjtVbegCUh1DW9hQrUm3ouvQOYS1AViQApz+dAaHkvu6bM2QnYH+tprqm/YV+rr6h878GyodGS9fHAWjwlaAIsAb26vNrtW1TwMG06urQuCt9H3XQ6aw31clJWtBsMtI1NI/02c5luh8mEESM6uoM+Q0FnoYwf9X5SH8DSvF1cC8KCJAerUBndV/X/boG+wM6Uf2Kzr0f3b4v/eKPqMT2ZV/2ZfZbv/VbAtL+03pFjcCiW8pWCXRcRF1Ik7f14FmDBEYcair0akc36bmZLhQPnOQxRErLEal+6wLy4QtLYGndHetzZKEar4EHv1GoISgcHat1oTUc8C2tUCPrmatzb22NmGuQvXOhzJNDt0s31XtZ5tBxUYd7GWqkPvWBWPCCGoxdOMtTwAw06uroFTAvSygFlcdAzggt28VKnZw+ZOq2CmgfB/K9R1Y6z9dG+n1TCCCyrtqrTqSGdPIRGbKyRZoTZq0qyNiAxmJR6Oe8gR4oW57u4UWq72kbNNHmcVvGptB9553Vd+yPQJcPzo9grs9RJSB4ABxAl4HVG0pEKxDEjcYa5GRtvcE+SDpVdoCks9D5qy5AuBgEpyFesHJPnzev6nMEkWbW+gC/iHKQBhoCGUOuQdiR6T0frgC4Heqa7gz0fQAFlQWACt28CLAKCF4CXzOODoD1N7pQO4POYL+i5w/UUizKb//bNUgt0fWCHaTv/M7vtO/7vu+zGzdu2Jd8yZdYrXb7C3nFK5Qp8wv5yuLYsupNAzN3NCKoLtUQ50e6wTuOLgCQU7JzACh0ACSbQNltDViRNXTj9Nsa3bm5Lm4kWATelyXIHhS+ZoYeCXUOSugSKWAO+sAcnHfV8XETEOcdqcHIgTI2mbZl7BSYw+tQGnFmOn8jGIt8/Y02kFF6x1p6vFoBx3ush0QBRIWVUucvBSK2Kujl1eqAkQjUOX10DnQAgFUaT1St/WpFD2P/Ph3rLIEnbKlyNcMJgMhBIiKGjjozs0PotCkAn1EFkPzY030YrvR+tg/1MJ9ASdIZ6b3s1jQKX9wY6d+aOgew1W0N2MIQbMwISvHuhq6ZUwL7j+EATXSdd8ARJQb1SgIZpBHgF32oeXqQzQbST/OhRByN9GOhHubrfX1vWyN93oMmpCshWIsW+j4eBFmNY0iE16ALuJ1CxSFUt6FFPA6uzv2cMv8p6JeeVefqNAGm9Ine3yHgl+LK7TjCDIIMupyyLKH48bkv1wXNH8exsizNcZzPMmt/oV+TycRarZb95uX3W6N504tZT0fy2aICHQgZvCAgdlxe1EWbjtVhCODwGDR0ge4AgPIQpAuWOURePowFwEcEEh0lAJ4N9KjOzgHnNNPnTaHMUwA+yMZAQgat8CtXS0RLKG+AHJ2t9JattlKH9XSlG7fu6+9GQLDm9NVTTqGLZVo+pfeSajfeHIzmDMpQ9REIKPeAuwU4rXYd0IUCPboNyBouH4UM4Rx04UoVWk4LbZdfLUC4uQVBAGg7ZdTbbWbz0UgH68CqDg6004Cxa7qX4khfSg9I+cZtfU+zXXi+U53rFIKNFZScOlCK80ea4R4Een+VOsgAmWq7Ef1JCWzuNV/vzweA/SQATCM4cLWp2uMk1OdwgN/IK0Y6BnQvs7nu68ZSg4gSAMTXN6BjDaBeR6AVVwPweg809IqJBhGrQPfSmQ19DjeD/eWBQwiUN/M1yLfA57wtLQtG19W5ugawlDK+fV8v5yv73m98p43HY2sC5clnrj8QUeS96+bVclbWuEUWYQKQrG1oo623QHjvnG6MpY1k7EECc8f6wxetLWNZR7+PcByZ6f3VIXm5ApbhzFUQ9KAKwGNI1S5q+rtHAHLvHoIMSKKZuuCsouFnFWiPByfHBWZZFyQOWiN9NqJMhWYXa7gayk3BWI+gxu/76iQ6DXWGZsu2jI1XwGkDpaRmT+fKBdLKIlZTsgZ9lBTwH5dB2HfrUI3mSReyi0tdB84JgLPa6pxmqR7Q1tBTZwLt42ZmQQskRIDTKS/UYHtDAOx6CuwtoavzqKr3swsl8doz+swelOJmzZGMxbCmvRvQNVVXMkrvnO7rOjzHFSjBOuCgbre0w7Q60XUJFSKbhmp70wnoLuawlwCgnIIuXA6akl7ZlrHIUwMwWen9eZDJ3T7WdbkAUeudBETBZ2qN8o6OFU111jY8dURHB4BFC4H5OoLgHniLYthfnqNn0mihvxu4On81sOXrO8mQV8Qfr9cLdpCuXr1qjz/+uPl3APHyPLcPfehDf+qwSRudl1izedMQbYLshBl1+elbrEM6uA7dWlSSMIjCKd0RmALxArgXMyAQAmKyHIQuh1UocYAT1pvpxuiO4KA9B51BW3qA1kaKVSARs941MEoArs93ADvi6lgOJcosVicHqqXmJ9BCvq176KyjxuYIup7AttoIEmtVANyGwLK+LPSuAb5gc2BUL6HkGUI5ophqFPhsDppNfT0kMmCQtoegbAGt7PMWkJcCr9J2yRGmm8MhU9UAchrp/ndAD7BXU7JXG7Zl6AwwlB8v9FkqgR7mJ9BhttrXe2nVgX/tQWgYgXNmMtL3WULXZPe8ri3Q+rW0UKHrwYbeswdYoO6J2pgBYL0yV526+RK+D5wmp6UlsSNX56oOvzsBOoDpTB3lhyBbFMDcex0og6Zqt6ExWM50M7M5lPYdIICFCrvNm3pOxVT96EHGEcrX0QGoCZRKQzBq6vkzHdxuJ5ZA9UDXC3aQvuZrvsYODg6ktX88HtvXfM3X/Kkpsd28vN/77zMXtEUj1SFMfQROjoEzRBpLyPZLTg7pSsEJCqR1tz/np681lLq64AyV4ISlVTX01C9UX0Kq24UUO7QqVSDzsobOy3oNSDDX6jQtoYuliIEzZqgGMoOSZ5C9RL/P03veuI8UwzU7Vk51Xhq7kAlraFSeOlqOTFNdB56vWaqL4IyXjjovUV/XfRnr+22cARX2EzgRZiBdAiSiGRjwtq/390ooI8+g+9DMbFmFLy3USZ8BZ5ddAGf+ROch3oGSKQj0elCSWCZqi+4D0H0cjGRsXlPHogIMzw68khYwnk9ruh86B3p/pFc4B9xPCZ2ytTYoz0/0UJ2mus6LTN/xWRBvHgIXVATST1uFzjNATK1YQgPK+oqMlXCsLKCkmAJEoUzbMpZAyTM6BVvp6v1FwGkVTyF9l0KwS7ReFcjAR8ChNFPnb3CqUkgraP5p3yFQHEInM10v2EH6DNbozqvf7wtg+0/H5f7ef5+5yJje7bxAaQDwC4ZZKnKu6F4IT0FuCdE3QJsqGKoTU0OVQjp4CTiCc3Xd9CvokFrCWABs3eRMAjGvJYXWt6tj6LxJ1KkzAIwaZNamAdAQZHozIZQyJyDyenGu62UBSuCrTOdgNdFDogIA77AE7ptc19Wir797AFiFVwG54h7oIjnHOleLqjqEgKk219Xf9WEfnYHnsDp03hTqrJmZVaBV3wc+i00jokkIaJowrxOVbxpPdR/WHtBSV+0ZfceTOWReoCzYBOLUXWghtzas/S5AAFI44SHYmO8CKz1gEEcrzVKtgVrjKTBt+SF0EAKGbr4AmoRMA8wZdBq279Mmgxz4eZwYOu9W+n4nR9rQMmirU9KMoQvQ1/tbbgMWrQOyNmC31zCnAIuz9aa6FyeQucpB3BFgRLZoQABcf6mMtWp6g+UdIPzy7pRG7t5B+qZv+iYz+zQg+01vepNFt2Q71uu1/fZv/7Y9/vjjd/t1X0DX83a740AlRsrQkJMD7JGmTLVmhAN7JYzR1b7Lz1FGSg0uoW02wTHb9HXzHYFTFxh0hMCv+vC5NcwpSRKuAQjqwTsad9RABr4etH04FLszaumHrie4w9mJ/kYVOv6SLmTWAGvTgnd5LlJj/TzoYC0hRTCDTsO8AxE4YAv2wKMhTlt/Ww9eH/pJTmH+zoHcx7kxYDOqsDpA2seJKZtq5tNegpKkjdXJ7OcagPRSfeYQ7ifeVGcoPdFDIQDaCxKcHUFGau3pfPU76vhEsNfz6zoHS6iENqDrrAQuuNpKx+oQ5ex9Uj/3sg0NIiYXdI9UoIwagu/nJfq31zyd5ytDsJ8BlOyhtG8OODkb6lmcAeUABwLCJVQwAqBnSQELeJRq4Nhrq9sQem0ZS2Bjx3DE+Zd1/YXQROJAw0jU0rMwOQbqiTsgD0twfum6awep1fq00SnL0hqNhsXxzUURhqF9xVd8hX37t3/73X7dF9B1zrDf8rYL3GbM5HAqXy/1mtnhgjQ+ZF6MiB3xc3TPlI7RDZ6DVEajDr3E6EzqQRuD47iESGQGQME6ZGPckjioAIcB81yCoxeCjMooAY4skAtpeLotU3AwA0fXXVKHjj8QUO1HkA0ELpgCsEAGQOt6XTM+qxK4kVLtipnDPAcBtBLPNJ1+vq5rvFOHkjaN4X5TJyDDvWWWDdTI9pu6HxqANwphTS8ge3XU1705O4IDCjKRQVWf+VpXD+6dTL0XB4SVh0AuWhvqfiVwbgas1AsHGLJhuXkg9ttfPixj/jbQH5xAuR941SaRvvdhQ/dIlTKRuySOrIHy3NE9MgO9wi5gmkbQMbmE7soQuqOnQKp51lM74XjqlJy4mvk7BmzbOlEsUDoFOMJaMbCjptqdaKTPtpHrPlxAh+MMOLLuPLtWd4kEumsH6b3vfa+ZmV26dMne8pa3/Cktp9G1sNtLbFSuovIXYZUIH0QkdUiFBWNqWAz5H+heaGnQvcA9L4DJFOgA/BM1GIh92gSnKdE5iMHxiaF9PwexWnL0poD/ysHJqUXAzAvZhQbMlQts3ctEM3UlEGPWAW/gAWGoQdQLagu2AnbZSQZtyHM92Bor1YBbgMhSFcLySgCdSyDYGa216yyD/OIg1fvrQmlpBgzXS1/f0Sa1xZjZM6E66dVr0KLtgPOd6yE4f07zO9dTzRZFru7DKARmfhkxWwK55QE4ZrO+Bi8XIABJe/q3y3KkY+cABzdXBxp8Ejs9gM7Mle7NoqLrN2qr3clbbRlrQfdcraZz1djWd5mChFDyCSh5tnQ/bECXXTXR93seqrynp5AZApqx+xtqjw9ddZB+t6/wgWCutqgN3bNFT0uK/hGsyUy1Cg0wUutC9+sRYL0qUMGIR8BctHX7+3Xyu3N9XjAPktmnO9Y+8IEP2HPPPWdveMMbrNFo2P7+vjWbTavXKQPwhXd9hgdpPP7/WrN56+RrKYQzL+Tk0OeoxEb4IJp3WgSE06EMGJlXKliBcjoAZ7fA8RnuQVbE14OjDjxSBhmVKeAI6MmKE7U2C9DQo8xQA3xJwifOgewxGgEmBEpYa+ClKS5qbroG0ecEgLSzQg2LD9145dP63lzInkyhKXMAbeG0Sv2O/sYlALk3AyhvtqAcOVCHJgJwfT7RwGWcgbp6qvNXW9AeNIPkn008KCHm+t6vTcE5CNQB6fWg5DfXeVh5ukdCIK2NKEAC7pyoqfeSgEbg88Dtsz//qIwNV4/JWB1Kdi5ocDWhCNsEZffynHpXwYYu1hC0yYazkYyda+nnoAnLikwdn/C8vt/6c5DNeomul+yG2sojwD6NloBZgzMkhCAiBfkRq+oaOgKc6PZQ57QCElZZcknGChBVf/S8fl92CDQYdciSRjrPEyjP+dnta205X9l3fN0fAQ/S1atX7eu+7uvs2rVrliSJ/bf/7X9rjUbDfviHf9iSJLF//s//+Qv9ys/rK537lno3F2DoauRgsaYVGQtEIG1yfKg0RWy/UPgnUTTMKtHvqkE7matRise6wY+vtGWsOKPOUJRrV1e2rffnUHPfXB2BBIDl64beX900RWy5psmfBZ6Rs6CkvjOCyHqtGYJ5EwCyYy0VZB/SsXGmB9tiSzd7UtF7XgAni9cDckAoC6R1/dvqqi1jzaYarw7IbkyqaiD3rgCYeH8kYzOwbYtTnavhCUhsTABwOwWx6RUTRW5HQCHhQ0br7EjGkpU6L3NwzkooIVpbHbt2AqLRwGGzhmxdPNXf+J0DOPCqsBYaaouK4jEZ60LJeRbp81aqAJaugfakD9k/0OALMnVOm6QpB1x11z4FDM+gBzar6zynT+k8t8DR7n9Mhswt1TkoQZh60YF1CV3kNVinFSCj7S/UqDaAvG19Vu3OsAsadSc6961EO6v3Sj27JgE4u6l+LogUI1XbUzs7qd/+fcvFH5EW23d/93fbq1/9avvYxz5mvd7NTf6X//JffsEYpH/4D/+hvfOd77xt7JFHHrGnntLW1s9c7373u+0973mPXbt2zTY2Nuyv/tW/ak8++aRVfu8QePLJJ+1nf/Zn7amnnrI4ju3xxx+3H/7hH7ZHHnlEvqssS/v6r/96+4Vf+AX7uZ/7OftLf+kvvaD7NzObpHMr05ubfzME8jm8oLyUk0MDmRyfSlNUYqP8yd1FHVVIXS7gNypA7DgAKKj/MpAfARqCGchEhPtqbM4B4Dm/rot+AfwrvVLnZRo9ofeSqdMUnKohXfr6bLMGKGg7Gn6uJ2qA+rGmqRzI2mTQPe6neugHpWbMYijC16EFfNZRTpaXgOL9ZXCeQ7jBPcB/BAM1zDMH1ksNNLkK/Y3LY2ipgWbQBNrvF6k6Lu7noC7Zh+/0gWgy3H+ZjHUAn9U7r+s8D6DbCFrSXSixXU7VsZgAUNivQVkbOtuKUOe/l+jaSs5pSScZ69rqwLz2F9q1twi0LONe0KDJBTuWT3WvH3mAZdlRu9PtqSMwnuiROVnqmBvqwb2Z6vudBtA9S5wU4BBOB/q8D0DjAXAIWwBQAR/WmsF+9Qsgy0zVli8rasdOj9QuVjIIhlb6fnMgj1ylmnxYVPV9RHdk6nPiXIDrBTtI//k//2f70Ic+ZOEdWIJLly7Z3h6nov9r18te9jL7D//hP9y8ISCr+sz1vve9z9761rfaT/zET9jjjz9uTz/9tL3pTW8yx3HsXe96l5mZ/af/9J/szW9+s33pl36p5XluP/ADP2Bf+7Vfa5/4xCcEN/Xud78bKQteyDVdnJrd0sVQmWvU0SB8KAHJVpD2rEPNlniQQDMHhW8IboR9RDpWJQME2SffBeBcpO81Aa2eGajWt9f6uf5C5+CkBorSQ72/ARR/lh2Nduaga+RU9DmWZ4DIcgTlwxZgpJaQdt8ATNNC52VdQvdHAl4TtNEnNXUi3FijNgewCjc21IBvBbpexpCtIFLSMG/L2P1g1BdrIPiD8sEjj+k7Ohjp2pjMQYz4ARAZxXK4WTTTNeM0NTioekBICfiMPmC7kqre41VwXqYr6HwESoUIyGiPY4UF7MD5cdB6SMaapvIUrZmWgxuw3o57sH4X6uCXfWhuAMJbBxpLfChvNiJ9x8NM39Epid8C6W9rU7/PXeuzJa7OQQBZvtTVvR421Dk9A1klW+g6by50TYLaknUhgCvn+ryrTR2bA7Gjv6mOaAxM7v3rIxlrDHUB7odqK7u+2p06ZCZXxe17JoUziq4X7CAVRYFkkDdu3LBG4/fr5oIb8H3b2aFuK70+9KEP2Vd+5VfaG97wBjP7tFP2+te/3j784Q9/9jO/8Au/cNvf/ORP/qRtbW3Zb/zGb9hXfdVXfXb8ox/9qP3P//P/bL/+679uu7vUXn9310b0oDVv6QgKSfk7vEJ/qUPEyzInVUH4HFC9G2j6GFDqg8NtXMYDRXkwkGd93X2XAcs9A72yXlM3Qb+m6y0DQsR2CGWoDdBYAqmM+kLfm0+tDm3QWCp0AusdnWcPyn3LKpV+oMMk1TnNwRmqABi+EoBhAUbgwRpI6xZ6GA/39CB/voQ5hfdmTTWaQaBrLe9pZiKaAbsuwOI8X+3J+V39YA5OXTVXx3tSEo+RWbWiB3fqK1jVoNMGZAiRUyzwdS3soBK5rssc1sw80y7C7dXT+nVdXR/3LTXTdwJcRk4FpC1ideC6EPhMoHzYWAEB5FirDAk0Dq2H6iBFqR7wRUP3YTMGTBjQi0yvKbg5B2ppt6LrPIYCQQi0AS7R4QGD/xRAkstI19VWBHQgnlLFBE1w7gv925OxZg2rvwnYJ9BiHIKO4CmUD4NIJ6EAh78KmfDwjmAtcO/O9XnBDtLXfu3X2rvf/W77F//iX5jZp3mRZrOZveMd77Cv//qvf6FfZ88884ydOXPGKpWKveY1r7Enn3zSLlxQPhYzs8cff9x+5md+xj7ykY/Yl33Zl9nzzz9v/+7f/Tt74xvf+Dm/fzz+9Avudm8egIvFwt7whjfY//q//q937ZwlSWJJcvPlTiaf3ijz8sTc8qZBaPXBePXUycnHOvU+EMUZ6UotgCG7BicFdDlZm8p4VI8FpwSMcEB6asABEo50424GV2Rs8Kw6qzEQojWquvlOXTVAbTiItiHKOlpCd9Ua0sGFGsP1GLJAgJtIoTvNBSK2BUTClUAdwi7QKfjg+HhTNerPgmFuAUXACPLzkESzTcA52VqftxuB3tsmiFUCn1ADhEfHYOg9aCbwXdARg7XrBCr3AV3cZmZWLKGdHcS8LQJ8YB3ap8GBTsAZOg+8Vq4PemWwh49hnXsrzQxlEKxRoPKSldrPKRzw4y19juVA/9ZfjGRs4rb1b6E5JBzrHln1wGGAbtLOGDKvKYiHQ4kynpFdBLbpROfUz9Te5R3gaWvopqsF+i5TEIhu5LpOA2AnJ8b4NNPfnQE4vFkD5+USEMCO1badaelvXAIs32QAmDqQFYlMg6vqHSoGgIrA6wV3sd24ccP+/J//81aWpT3zzDP26le/2p555hnb2NiwX/mVXxEJkv/a9f73v99ms5k98sgjdnBwYO985zttb2/PPv7xj3/ObNSP/uiP2lve8hYry9LyPLfv+I7vsPe85z342aIo7Bu+4RtsNBrZBz/4wc+O/+2//bdtvV7bj//4j5vZp5283w+DRHgpM7Nf/sgHbuvc60Du0t0eydhmT7FKEfhCDrQ7FhsaPaXQtx2DwfURCA4XAPaW4JRYX7FdV8bEUAz6bIeaSiZw3ktqcOi40A+cQoYGSrZFAO32gH3yoNW8OKP19w3Aa00g23ZKwqqljkGSyo676vz5p2owdiBDcx189nZNHRoPhDNPgV/Hha0ZQTlnswIltpo6SNkhaHc1NWvjh5oNXIL0QwcSpx6wEzugNxbPYa3VmHY3IweLSs7gsA2hXB3M4dCvqeMTguODGWmIe2axZmMoZPKxe5Yu6gDSbxzmeuhnIPWyd6DrN4M5XcIBF0B2fLzQvTkHB38WqS1agnMwuKzz4pra8o1dkIOB9xYdAV6TUvouNEbE6mwsoOPXm0E2ewOCMBccfmgimQNx52gB9jMAkXYo989KfUceBOPeSOeqgO+rQDZw447pm08X9k2PvO737WL7A7f5/5//5/9pv/3bv22z2cxe9apX2Td/8zffRh75B7lGo5FdvHjR3vWud9m3fuu3yr9/4AMfsNe97nX2j/7RP7Iv//Ivt2effda++7u/277927/d3v72t8vn/87f+Tv2/ve/3z74wQ/auXOfrrP/23/7b+37vu/77Ld+67c+69jcjYNEGaTz58/bx49/3hrNm8aX2kDP9HRegBHeCgCchZCuLkGYZ50DJwuAaa0EHSIPAJmQXvZBC2fh6Ga+OgFGa2CMrZXESq1kgM1QD8HaUA3QATinG67+7gy4eLpAVlgFwzIEuYUEQJ81IhqCMuO4C1kq/Zg5QB45h+bD2kq/L+yq8RpBqnt2qnO6GervXuuqQblvqHd9f6yeVLGtpbgFYNHSXA2k46vxp2YCkqH0TTN/BXgQLjofn4u6hEpdIOtgQE4DDlKMFBwgbUMNGXM94HPIZvloowHUDhxgKUTmc+Ba60B5PodgbQhrhjTHxoCHXIFw6UaowV8G2YnRoe7X4b6+96dA02/c1Xl2a/o+HDC9BWDyetABG8Nv1EE0tjbVNfRcDnioc6CXCQ1BM9D3mKRqx3ah8WXS1O5jD3AVZVPXc/VADRmt0nWiWLkR8IRdBwhF9Q7tudVsYT/w33zLH36bv9mncUN//a//9T/In/5Xr3a7bQ8//LA9+yy0XJvZ29/+dnvjG99o3/Zt32ZmZi9/+cttPp/b3/pbf8t+8Ad/0NxbDMLf/bt/137+53/efuVXfuWzzpGZ2S//8i/bc889Z+12+7bv/it/5a/Yn/kzf8Y+8IEP4G9HUXSbvMpnrvPRjjVvEdab7ozkMzlMcwAdXM4aHJ8TAIhuaBRY3QMjvgthVkUzfC5ghjKQTEnr6rwkR2pEzoefkrHLJbAOd/TguDBXhtwI2rHHIC5bn2uUul8F4dc9yGYB4HkNkdza0Q2eTNUALTZ1021sQnbiRJ9jBRIinqPHfrijczoDMs8FsPCWhUZZhKmJ2nrPXwH4gKyjv5uDR7gGwHM1h446EI1d5jovEfC0WK5/S0zka5D7iAtoH3f0wDIzi4HQ77Cu76QGc91wNeuQIrO33mNIzN6QZfVH4GpjDAvfl2hQkgGguAHuKCReLAGnzu9A4wY4nRsQMmRdaL4A/rD5ZR17GjIgDmQdYg8INIHmwy7qe+vk6iSegUxYGqs9XgPreDNSO5aAf35mBRnzOcgUbQHmCsgTy56ujZM5BBtAM1E7px2JOaw1N9egooSOupqvzl9rpmXa6lWgj7mjG3yBPH96/YEcpP39ffvgBz9ox8fHVhS3HzZ/7+/9vT/IV5qZ2Ww2s+eee+5zYooWi8VtTpCZmfd7fC6fSYSVZWnf9V3fZT/3cz9nH/jAB+y++243RG9961s/62B95nr5y19uP/IjP2JPPKGt3r//5dmt0xhDl8MKOm1yoIn3Y0gvk/hgChitB8HyAdbGgESLjPA+gBHbRGRZV5C2M9RldXYAIHIAoKc1jUivAX/QFNLpdWi3jSb6HC2oRozgYDws1Rh2Q3VsF1Du60E3XgRwt3gXHNsTdRiakO0I4eAdmnY9bgKWogoHVnYR2IQB/1FC6oqqoCtfvw/oV8wgk7DchKgSWK4xu+Pr4R5Sadkn7jD92xj2gpnZAajWe+CBNED93EzbuxMoVy3nGgxtkY4UyERMgdvHp7IigNrzQOcrBaQwBXrhUOcr6QBPE+CrPF/3/+KabthnQfZkAiSkJWQTrK3PVoMs1QXgZHocgOAudNT54DiOc80QFv5Ixzxowd9TW37DU0el0oZgjbQxD3Vsmeh+fXC3DZ+DLDAQNhaBViu8RM+uGqQNQ2jDn4fAzdVQ52/3VRB0Lm4/R8vp3YGQXrCD9JM/+ZP2t//237YwDK3X693WJu84zgtykN7ylrfYE088YRcvXrT9/X17xzveYZ7n2etf/3ozM/sbf+Nv2NmzZ+3JJ580M7MnnnjC3vWud9kXf/EXf7bE9va3v92eeOKJzzpKb37zm+1973uf/Zt/82+s0WjY4eGnDUyr1bI4jm1nZweB2RcuXBBn6m6u3NaW34Ix8GGz1A3YwKBcQBcZIAvb8Ek4PIjK+C5lRe5DQkm9FpF2xRR1PTjyljqOS9ANag4VMNpaq7Fpd9URKKH9OYPW8BOgdogB7LsBmbUulDLqc/2NNfirIah5N33QNQISRwLclsCrUoGESqOpz7HogHQJlFB8iOiTGqyNmXpI4MNaAkFAAY0DMbLKw4KGMqjVR/C39H0kl0SUFxxtcu+rZg4SxKjpd1YTdQ6CCEgNAbvjTvSAyjeBpgJkIpYLkOMhHAysrTDUUtwQHJWMeHfg1Y1Wl2TsOphKN9PfLaE1vLLWeznf0L10ydO10AEQtOXQTUpFcV/3TQz2JCbKEYAjTB7UdfUQBE09sOUUEnum3ZYpgK/3oDzsQIOzD2vSgeAlAIcwr6kD7AJ+LhypXQzawM0HQU6vd/v9zaDpha4X7CC9/e1vt3/wD/6Bff/3f79kc17odePGDXv9619v/X7fNjc37bWvfa392q/9mm1uftozvnbt2m2/8ba3vc0cx7G3ve1ttre3Z5ubm/bEE0/YD/3QD332M58BbH/1V3/1bb/13ve+1970pje9qPuly7fS/NucCdI/I1NK0TAZbPJ0SfKDHBrCSNDfUiSt73Z0MpKxeaFRQtLVjQEqBVYHQ5X3gWK+p/dcgnxGdwksyHU99M9NRjI2H2qkPlhe0t8FQ1CDTop8pe93MCDaZ/2+Nbyj4201DlsgL+E0ILvQAcFecBgouTNHIDKIUEL5NQADOQeTU0HUFZWbIHMKIE3eM2Cr+vDEFfjdGskHmRkIMJuj7524mtrFSMYCIA0tO3oaedA9F3haVlhCOWgw1cNos6pzcwgU5VGmmaGyAVIe0DU1gFbzI2BVn/chOwlrIUvV3t0HjQzn2jpG5ZUOkupCdnKuzqQP7Nr0t5vwHEsYawIWbROJgNUpHkAG8xxStsAFkj8XQfFhCc9GkP4RuGYBjLlw/vgxEGPGmtiYQSbWmUMH3B0ZvfldltheMEi71+vZRz7yEXvgAc0c/Gm6bmqx/aI1m7c6NlBHyYGpFsoAHL2Sk0NdNfTC6fvIEGgU87z9roxFUD4cQF29hN/dhZJiPASdrwJAy0B/kIHe1mQEulU9nb+i0M8VrmauujaSsXWgWcbSV+fAgzmYQAmlC+f78poayEkMGlUV/ePTCuDToNTqzXWdRmuQHwA23BVwSznHUM4Fu9yBzrYcDvIYosC9lTqEwRzI/FztgKvAAe3D2g2BQqMF/DVmZv2mjqcHOv9tyKh4oR4KZQo6WkDV0TgHRKwQXA1AjPcEQLfngVgv8PVzCeDbdvDgJpulzuSwgCARyoKn0MzRq+q6bMAcBAh8v1s1AZJgohYAAvEDpACdfrLRVJKlv4U9RxUHvAiMRsHG3Z0XL+aiVEEK80f4pRU8Rwn3dza5fa4mk4ntbu3+4Xex/Y//4/9o3W7X3vrWt76QP/uCu246SP/xDgeJFg9tNDIid6uTRpvl7ugVhnZZBwFQ6EP++2QEmRwAlbjQUl0QSHME2CwQ58yBdr4OwN6mr7+7AgbaNnxuCgDg5YluyKsxJKyhTdU6msJdwhx0T6DWDoLPtZi+DxwGKiXBswVQjvCg7FaSjAocEgnknwLTcmQJLdFNcPhdyBCugMuIIHUTwAytU2BZX+meAdiZeTkdYmZeXfdNNgPyPtjrGcxXBfhv7s/0HtMSMs0rIPQLARBb18NgG7J6W6gVSWVK+hzZJyr0EOUIHVb0u2QrCUxP2Uk69DUDd7dwhLvX0CSJKDgvgN/MIqouUJCtGRUzDf7uVn9zCE5JB+zOH/alyFYzB1wpF/bWCtZG9451NZ1M7OHWH4GDtF6v7S/+xb9oy+XSXv7yl1twhwL1ZyQ/vtCvmw7SL1vzNowHbCCyDQh/gCwQrPcU2ujnPY3QFsDw3Aaw5EGuN1PW9BCsz8AZqoOKOCzkEXn/uRogHwjCfHAs2jCBp4lO9GwC7LqxGpZpXTsnz4Chdwt1VCbQTkLA/CwfyZjTAIoFMJrNGcwz2OA0hQ44HzpMYKkdJhqNNSHz4kDXSerrc5So56froAoOVwTGepypIasEmiHIMDrW342gZHdAWK/Pgcdr0yAICEfQ+ThL9L43ALeSAZfM2VzXoBvqoR9DFD5DwLmut110DqgUT4cLOQdXYIwyc+Rc0RjdH5GB6vNGWIJtwxhd5KyRw3W3GSS6F/ocOWb0OcLRUjCumaY5ZNtOgbaihfaYLro/6DSETxUwLx6cK0t4tsi0JP7cHdm22WRir21d+sNv83/yySftF3/xFz8r/nonSPtP25Vb3fJbDIJ/pC9xWOrGPZ1Cyt+FyLcKtPNglMpUF7LTBk0qWHjTBYCCU3W4lg3dfJlBpwIcMgSJm/taConAaJaw/dbwHEuIshqbGhmWiRq0Dhj1bKoHwgi4fTIoHzogx9MDRvAUDp1JrsZ1VRnJ2ABwImGlLWNNF7JyMmK2Fen+9eB9LH0qq+razcAYusDrMwWHZgZrqBsoMLcAE1bCu3Qgk0CHZwhli2009GYhrMsZcN00IbNRB8sbgKO4CQftCDi7PLhHKOxjftuHOTwGVAlZ9zE4/XMAI3eWuuJCIDocQxKoAgze0HBpxA9FjjY7L/Q5kGCBJhy+qPxFu44O57uFWhA2jp6Nqgu6OmokMg7QjQiejYI6Eg82gC2UsHYdWJMhMc2j3dEGnq07nNgY349eLziD1Ol07Ed+5Ef+SADPn0/XZzJIv/6bP2P1+s3FMSna8tlVAhIJoAPVOA8ZFYiUHFA6z0B3qQ20+KmjIHIXlJjdCpQ94KCtZiBgCwJZORAntuDQIuK5BRgRFzbLBOZqDYR3FbskYysQjW2CanXhaiZnGamxqZ6CblisR8wYsjtLIPhshyAhUOppMphrdqxbB+mNQJ2cUQHttsDgW+aQhfR1jdNxEIDjQ/meOhDZhb4C7nOIP3OI6Ctg6HO4Q9LaanyOksIhHh6U0dL7mUCGsQeHWwxuySjRtPIzsJfo+bxIHd4RCLVSwJAC9DGGwy1fghQKkSRmes+bMZR+AKM2iygTpnainmuAtF6flzHPAcmKTNd5nIIT2wFy0Tk4XEAoyY4UeX90TNPO0XfZx7KbZk+HS3XCRtBRF8GabEJGag1riKhiKgvN3g+qRACrDmEF9n8C85cmt+/V6WRqX7T14B9+BimKIvvKr/zKF/pnX7DXR3erVr0Fg9SrQgs5vLAEDMsYooQ61IrXJFMAUWWeg05VoJicdU036SbkfPaBtLICrcThQg3LDkQx40A3/RQ+lyAAXX+3A5+7Bq3rS5jTyhqAzBDmezMAeAKD9wFpDEPraiNWg5YhsFTXRn+pTmLUUKfuGNZfbQg1XiDurAJh6Az4g+5H8Ko6XBPAjnXWUMqMqPQIgGU4OChzRRXtFZi/FHAdhNYw446cMbRPj+Fz4QkwUG/q3AxND/Nn4Zx1IGoewb0YsEMndQ2kUM4EEmlZovuGuuPnaw1UEoAAnAz1sFqA01of63tPgPIhouzkjko1bVO9GvQUO5BPILvdqan9TBGODA4DPS9xZvj6bMfUOZa3ZawPmZwhKALMZrqHAX1hU6CoKEoNLMbgYC6Bgy6bQ1AM1QDK+6VgP73odgd4Fv0R8SB993d/t/3Yj/2Y/eiP/ugL/dMvzKtamFVvGpMMujU8wPhUACjcgEN/mQBmI9eNsYZVuwr0ICvAoA1PdBkMdzWi6mxS1KvP26pSfVsNZA6b9Bx0ASZg5JRg3mwO0VgdHK4KRFTjuno0R9TlVFcnLIX3G0K5NAaMWfDMJRmrb2gaP21oOTIAMkofosBuQw3QqqPZCj/XeSlATd4Dh6ueQXdaoGty7etvrGEdtCDL52PEDPp2cBD1wZQSxNVDh5rEnM1COIwoC9RIoO3d1/V2ivgMitb18MjhAG2CdR/Xoaw4A9FTEm8NIQu31qg+BpK/ELJZfaArIIehVUDHqquAZxcEXe9kUDYzK6AN/LiE/ZBdkbEplLpbie7XITixKdinsAChZlc/91imNnW6VifM8zTb9ilQDnAb+hvVVD+3bura7xf6t7MZPLCvmfUO0K5UXXXG3QO9l+JUz5pJU9dpH/jmqndw7s1nd1die8EO0kc+8hH75V/+Zfv5n/95e9nLXiYg7Z/92Z99oV/5eX3VsolVbxH0rEPpzPf1IHMgxT4HIx55IxkLoraMgZyVDaFsXQZQvtkFADCcCRlkfAoAllJ2bAXRbAadIycQ61MH3H3U+QSR13Pg5NAhZqkamyAkwPhIxoYT/Y0c/raz0s28AJB2AJi1fKUp8fQQDruWGtxVpt+XQTBbOnrYTRx1skNguT68rkYpbKhzH0CW5NTX363BgToA3FkLxkooidUgI1pClmRiH5Wxz2UmA8AgZYDtCCJ1pDYiPbifBnzLJchObJKz4eu9jKG055ETDMzNYQve3TFQV9RGMjYDxfZ1CuKt0GC2Ttsy1k+BygFIYYdQDgqAXd8HhvfwSPumvG3IRA414IoK3Q9FTd/bPAVgeVc/twW4vwOIDTJY+0epUo6sq/p+vbWuoaMZ4H5KkCQB/rXdHWjS8F8mY3aogfIhMPMPQJ3gTKn2szuA8jXQn6Tz2/d6RorFcL1gB6ndbts3fdM3vdA/+4K96sdjq93CQHra0oOx4qgxXEGk71c1Wk9DdQ7GYCCJwLAKEfei1N8NchA4hQ1ZB2M9WKhTl6R6L2eqmoKdh3owHsOSfAkcZCdQa6eYYAecphFkHdYhdfyQsrhuyHpXJ2t+ovd3vaKHRODoetmAAzUDzpigAd1RhDsZwMG21tbkoKXr4Nxa8yynkMKu7KqzG7sa9RK/kUclKHCKSYMshgyBAxkfKlUTuR3RJDifo0eHRGhHEBz44JCTFEXPPgG/oe84Q21HXf0dcOZHwOA/BUqPaK57btYAEH+i6zIGothlqI7FYqXzVx/pXKWe2o4R8f1AOShzdJ03EnUiogKoTvahBAv7dQiZNct0rkBgwHprXYVhHeSMAA858qFZZwBZuROwO/drsBvDWlsu9V2ejHT/z0K1MTXAZt5oAVFkAJUJoPk4qutvXAz0nd9fV+dqecdemEInOF0vGKR97/r09RmQ9k9/5J9ZtX7zgPSgrGA7QDEPu8Wbq3FYpPq5OYjmbg3UyM12dIPn4EglAFCuxDrWBb6PJlHlw6FVA2etCn87gTEfDtAGOE0h8gLpdXiiUcyNBHAw5+6OdC3CA1mf93AImcSRllqizZGMhSc6p9O2Poe/0idu1dWpW3l6sIWE4QDcyQoU4b0UxCUbYDTBydlZAstyHcDmpE4PL9gpoIQCbeYd6LYaAm6q6DMx3hDkOCq5TliNsnB1KIlDVFumuqZHiysy5mwBI7ujzt4RBAx7IDQaQelneQSAfSithhugsZarI3AMMIO8CoSyUz2icmDNj+CwbCXA9N+EFvK2DJk30jlwCoUA+BAkdtq6Ds7B+/VX+sPTciRjk6Y6wOVQ95cT6fOmnq4/p3e/jNUge+8AmW89Ik0+xcrNNjUjSlqlCwhVxsMr+ruBZtFbjtqJqKZZ0vyOgGs2mdlXtr7yDx+kfe+6/XruaGqVWw6WdaBOzk5KhG1tHRsDKBDI44qKbsgrHmQiQFtnx9XN4kVwAACYNvN1Qy4zNZo1MFSMNoCuOFiSPnEKQaQ/hhLl0rRDYrWpzxsMIHNwpBFQCJIfVUiJ9wuNUjfgXRrw+PhAAbH04G8BS1G09DCZDNRoRlB+HULnYgTOS62v5apVXf+WAJ5bjq6rgxq05fehHAkdTn0oVY2n6gR4bX2/B6AUUt/Rv03HxNRi5o/UyZxCp9IAAOdnb+gcToHzyM2g1HVOf6NM4YUOQDi3R5Qe+qckcZde0u+r9fVePGC0Hyz1YGyDTuIC+MjWjjr4jZIA9vo+jqGr2A51XVKgZ4BTiQAPVYVyn53qOtrPde496NYcljr5tQUEwFPI3gHr+2KiTrY71AxXdEYdpPPbbRl7NNCxHGz5xyDobBVqU+uwj2JH7/llnVfq58C5Skw/d/3g9iaG9ZQoCPS6KwfpVa96lf3H//gfrdMhci69Xvva19r/9X/9X3b27N1yRnz+Xlv1yOL6TSOxAAkMW6gRSRLNJjRjPcgOgXenA2nPiq+gxRKi+gSUxYNSjfCAskrVR2XMCt3026ByTgDqPXByGlC+IRmFArhC2qZAZiYrAxFFEM4Ff9DsBKRLZvremoEayMNZW8YS2IEYzzh6OE2BXycc6NwHFejhGug7d6AjbAKdMoOaZhKp3dt11Lm6HgOYez6SMYBmWH2o9iQoqdynMzg71vVcz0DO4Hk9OOYNIiwwA6iNNRe6aPaPdW5OAIQe2G/L2CLTyHzq6TOvoXM0dNWxW2f6jtdOW8aWK92vyYGumTzQtVCvaFaUjqNorcHaIFKutUqqtm3l6FkUX9YSUbWuc5UAsDfbB3LbqtreNhyZHdjEhadzsF/oPoxBXqaM1Y5lCCrWTTIdQwa5B4HtNf3b2UTt2LMOdNTeB230bT0bWqmW3ecddWhWgWbqj/f0d4/679e/vQEB3DnoDLwDMzIPOei587orB+mjH/2ofexjH7Nu9+4oxj/60Y9akhBh1RfetV8dWqV68yW16rB4gPxsPtLNkkJ6vu1DBbQEnaRCDanb0tbfBWQYHBAp7FaB1dfV76tAZ9YnlzoHWwAOD3xdIxNw/hxQgDcoFfSHkM3q6ObbStQZGm5D1HsZiB3X6nTug3baRkUdi2FDDVBnrpt5ScSYEwC5BppeznIdy2tXdAxKlO1cnYP1GOgegKDyzFoNc3NDD8DZHFrKQfD0BHikRhWd57yp97f3FOQrd/X9dhog33Kka2M8YFsWhNB1BuXvMNB3lw/18PDbulaHXTgYR6DLByXdAkqwSaY2PANvtLaGshY4xlNYb+MxdJOBHM9JW+faA8C4LaCsVYWW9Jdp+/4GNLQ416Bc3QBgOXBGVUC+peHqvhmA3uNsrfs/WOnzHoP8UAzZRQ+c3R7kJPIxdEKf0bVWBymfuX9Jxq6c6jo4Of1/ZKyTqZNY7YGOYwDRUFXfeQVKe5371SnuHev7WNVHt/0/HEd43XWJ7c/+2T9rdwtX+tPEqF1feFa5hUzvcKSb774E2icDNRgDwESke2ogVx1tcu9Ah1kDouYl5M59OBjbdQVQj0Pt9Mh9PRDqUOMfASmcN9XltwHZp8lIx27AAV/vAB4KMiXXiKdlqHOVQBbIgQ4z5zzw4YDxSoF1+FoBBGuhfi4A5zlYaYQ731D23xYQ2VXHerAdA3N4NdHPNevgTIKIWR86ZZYVUCAHbqnths59sAbagASclF3IrM1gbSx0/U2A+NSpcTp+utDMRmWqRjza3pWx+i6Ub4DhPRzob0SJZk+mZ/T5ihWUUXLgjToAKgcQR25AZ1EG7bNRQ7MEvRL4vjqXZOzo9OMyVm3oHqlMdR25DbWf14HsMdmFDlMP3tFCD/MsUXt3BMRPcaJzvwkZ/RE4ept9cIZc/dsYfEkPSo8JdInmZ3S/1lx14HpHug6WpyA/EsG+buqaLPsQdAIk4+IFnYMvqn+1jMWg2pY2VHvuk3dmZwFHSdddOUiXL4PA6e9znTtHNOhfeJcbZLcBsx9sqQtfWUAttqKRHBGnHYNR2oX2+KoDat4ABHUSHcvnesA/XV6Rse2pHpagIGAOcAVlu8AE7Wskco1kVDI9LHcBsDe7AYrh0O3WHWhkmDS0U2YAnW1bqR5iV5dqDBeQ6vaa+rcudEK5bX0fnfKlMhaDI5oCo/pyDNnAQn9jCJxbQUwyADovQ+iO3PSgbWehB1EEkXUOh10aqVEvpupodOdtGSvXAE6GslR7pQdRZUEEf2anLV2rBTCe+yBgWy50DTqlroUAyEpvQDm4flnHmiAdM4EM1w5gpHzAL01SYK9u6Dramj0gY5mv++sE1tFmoplwf6yH4CEIK1cLCPQmIIgN1Br+AsSQUhBgBocrhM7lxVoJVr0FsfpDdgwydWkPZFTm6uzWwSkOOmqPqyAvNXJ0PZ8ANGvdUqd9Bniyzb7ajqyhfsEmOGuEbrg2/L9lLK2DyDhkbGfL24OmOYD86borB+niRX3Z965PX83RtsXZTWNSAKP1dlUX440l8PPCAVBb6ZgLnQ9rT5dUC+gFbkDEXW0CNQG0CF+p6IHnAwapA1mg5gwI27aUNCzN1DiMoctmeqKbtAvMf4TnGSwh2jnSeanHuj0AYmIXy7aMzXuwAQF4HKa6XpaHD8tYegaEX/sjGUsARFo0tYTlACYs39BIrlpCBJ7o921m6uTM2pr+tqU6AckpKNbPgH8FuiiTUyhlLkYy5gLeyIeyRQSiz+sDkrUwKwDbtQBT6QOPT7Cnh9usCzi4lh4oZ+e6ZspjKFOE+ht+BvecAfVHFRTqIau0BWzuh1BK9l11CDuwl/Iu4G/mOqnbgEuMh7pWswTmudD7W9baMtYD8PUY+M1aQNmSzABXsQndc9d0H47AMascq+1Iu+o09QG2EFCHc1XXZLTUTOI8hyAMNNuqFbVPp8BH1AUpqXjz1ToGv9Gc6r1cBgkbP9cGGUvu+NxdIoDudbG9yGscZ5be0u4bQNvhR6ENvNrQTd8Dx2KzqQslAU20rFSHYV5RC7QJNAQboO12oyQOG83GtBdqHKoFgDld/VxaaqnQgc4sb6mG4BTO3hng3tyZPocHyvM7Mfyuo9tjlIDOHIAvkyPAyxBTeqQZlXZdn+NME/iwjnUSvKbeX5Lo3K/G+n0bLV0vM+APWrlaQhmt1bk6ezySMf8UMomAIR+vIHSFklh2SQ+Y7AZkCGJgOwfwtLfWgzzYJdV5swuAODi82tbv3IV28Ybu9damrtUZlGqXUG51W+Dg99QB3MoBfC0jZgt4n13IWIyr0CzRAjmJQ+gcBVZvW2p5xCv0NxYewAcgG2OeZtsrYVvHCn22BcypzdUGfgI4u3ogIdQ90nU0jdVu26naz+b9gDecAu1FH7LFTQ1E5yvAXEEXawLQCHek95yd1fmrQAZzAeoOB5d/R8ZaFWgcAjhHPtO1MYSOv6S8PXu/gE5huu45SC/yqjzoW+UWTv9uTV/sPFADmQMvS3CiGzLsjHSMylDAbb8BYO4qcApdm+sh3fI15bw51d+YlCA/4MFzzCBj1r0kQ0GqjkXNUSdxPdYIKGtByzHkiFeejh0AhiuDLrEeRJCnEC2SWnsaQ5ODp79bq4DeG/CCeEBQmfn6LhsLUNquaLvtBPBp6y5Q/gO/zv2llkGGUOJdtPT+FiD9kkx0XbkeZOA0ILWspWs3W2t5s4Sm3F3w1lIoR5qZOVlbxjbrcFD0waluA04TqCa2Y53/cFOf5U4xTjOzFLqc3IbupeBAnabjCLq1tkBMewWBAGQxZhtqF5O5/m4NWK4njn6fB051G/CBSQbCuYDnyaC7157Ve16vgYbEAYcQyCOPXMjGHOnaWEEWOHlO56ADrPkelB7LSLOBUOiwcKoO9XoGAttgxno5ZJ9ASoZoAkchlLAXahM2z79GxnYuKFt3G4JY9w5Ct9ldEkXec5Be5HU+Nqvdsk6dALocQIcsgqijGlOpQQ3uCJTTS5BSKMAZSvtAfpjrAd+C0tkhRMyJr59LQhLdVWKyKL2iXwgkf7VdLXs0AaTtfAy0yToaKSxABmA60t/dhPIXULzYxiYcHKmCTRfQsbZO1YBfBsxaJQamX5BXL6Fjre3q/bmZ/u26omvIAQfE9XRd7bW0LFgFrp8NaK8F0mFbAi4uA2fBb4PT5Ogab0IFatEFwegGAMH7DNLOgdi1BYcv6fylh7p+c+jqOgckRVNgRh7nmk3MAVvkzfQgK4FBudtRGzMBzplsrQcyLGmLoazt5nr8HEKpsEVZKg/EYMd6LydD0OCb6TueV9Ux60HX5PIUgj/gaXILmD8oZfa6UCpMoGw8HslYH3gmvHOaRSvBgasCl940UEdlA7TT0hnY1AK6t3Ndz8tSHdHmQu3TPADuphvq+BwCPnDnJQAEn97+t/PpHyIG6d71ua9ouLboFlJFkP6xChjx9ZEeUFcDrY1nNXWaoAJjIKNjS2DNzjrqRCRAyhVAWasOmz4rIXNFMiqRUgRMDDrbgMEwgA4zW2mqdhoBXgbad3ugC9UsQYsNcJtlTY1hDArpcagOTT1VRzmFDM3Y17+dQ9p4CNFxDIZ0P9T3VoFo2xtqdmcJztCZta6XwakenlXIIAUDAA5Tuq2jhnQX6BkMDp19eL/TQp2USgTluUP925ME0hVmttnSEvEICGCDK/q3vg9ehPdyGTr6lM7r0gGaCuCr2tgATicHuioCYB7+lH7fJuxrO9U1U4Fnm3b1XrogP+JCxjwF4sQcBGKdtTqdOxvqYNYrkB2DbOAMSBybLchcg2yPAayicqyNJRNoppoEgINbqmPW8fQ3nh1qZni50r3eBFhAEzI+V6AFv7XWJIBfB4cG5Jb6Z4HIcq2/EdegnD5SW+TsayD661C5vbPatwQnj64X7CB9y7d8i33rt36rfdVXfdUL/dMvyMs9Ls1d3EytxA1YKBNIU1Yh9XsE5QyQJOl3QKgVeDdsPZIhHwC2vQxKNQCIXZ6oYa61gfV1Be3ToEPWB17Hegi4JOLJmOjYItUopnJZMQj9UOd0owFdGCDAWD1ty9juQo1SH+RW5jWI7sY6z60ZCLBCR9gSGKP7gLXJM+hiSdV4VULVAquYHoBXCn2XRV8PkwPInKcxdJ0A/mPnCDq/4HOngNHxIdPhZ3rojK/rXpg7etOTkFTbzA6AUbjW147f6hyCjakeFMV9H9SxAFTSt4FrKdQ1mMEa9KDTK5loBL9ugUwR6HEs6iCjMoeszUXg+5oCyWepdrEBQNxyBESnbV0zffBtHehirZQavBiQOK4CKMW5asjiEHh8dnTtT5sQwAGQfrah97KY6VlzArI2Z7rQTToB8lLI1MUzKNM2AQxfheAeRLINOpwj0AKcQGNO8oDe87oPoHnA1J7eQVG08iHYgusFO0jj8dj+3J/7c3bx4kX7m3/zb9q3fMu3/KlgzP5c17V1avH65kuvQmcMdA7bfKobzd/SzRKBMGB9CLsegLhzwAetN0nkVUF3Dmh/zTzQZhiBPAJ4+mvI+KwAzD2GkiI0FtlkR7M20UJ/t1LVcstFSDkD9MkG+2o0G94VGbva07nPQR4lvqqH73wGi+OsroM2ZKlsBozqIzUsx0s1pEMgstyEUtdRAiWZFHAYoXaOLJcjGTsBnENY6g/n59QY7gEgswusyFuQPu9D43AVyEaLXXX0FgB8NTOrlep8ZxAIDF1YXK2RDHnAtG6wR4ItwLIcqrNXQKdXHRo3Siit5lDGW3rQWu/p4ZbBkib71JuDBx1qpu8QuJsqNaAwqemzbU9BX8z0MLcaOOSpAqNBGtMWG9AxchnsYk3nNIZ2e5eIYg/0QAc/1B4Ggs8F0DP40IG5BgHl4oyeA3FT39t4rvfnntW1UQF+KCJizTdBwPYqYMKg6eCEuqjvLIOCDBddL9hB+tf/+l/bycmJ/fRP/7T91E/9lL3jHe+wP/fn/px967d+q33jN36jBaDM+4V8jaqhrao3LcJAs9XWqABILgQAK9TLI6gLXwVZB8hIWmMBpHensKtmapTmkT5IHQB7s4lGY8+D959egMjrk7qZk7ka0mt6e7YN5a8HofaYQcvs/IZu+rINmJBE52oPpAtcSOkGG7oPjqAE6ICEiJuoU9eArq4VZBcywCU1TbMLFWDDftrXdVXUr8jY8ljnJYeSvgc4MVvrQd6A6HN1VV9w1QMW6GfaMnatSxw5+rm8A+3eR1AqhE4eM7NyT/EZCcxhDKK99SqsN8iuZZ6uj5UmWa0VgXyGo+tjUdEMV5G2ZawWgjgqcHYlR8pbVO3pQVacqD1ZQpcYQO1sB8h3i1TBcdc8wO7Bob8Z6jqaAr+RU9Puqo731TLmz9S+u4B99KG1fr5oy1gB2Z3yQfg+FwSKgQKiOVbnJVkD6DsHqIALzSvkyA80AC5AAWEB1BP5GhojrgD2DlyL4rxmP1dzNcj1ye3roJgDWhwup7xbeuzPcf3mb/6mvfe977Uf//Eft3q9bn/9r/91+87v/E576CFt1/xCuiaTibVaLfuxf/VjFt+C0q7u6ssZgq7RErAYxEPTcNWIZOA0OTFwFAHFvIUa7cwhvTwExycCYK+f6POuc2BQhtT+slA81NRXQ9BJNUP5YEWXbRV4geYQzpag/dPO9bCsACGfwRxM4UAIS/XqClfnftwg2Ridv1YARg4EYkvoNHTm+rvlQg1aHficAPpgh6A9N5zrGlpVwYDv6eemsHaBfsX8QtekV6qhW8302YJY158HLTXrCbzzalvHzCxqAp4PSqE1Uy6jrQoAWBfqfKdttQnbLSDWA84zd6mf2wTR43lPD8YFNHhkc32OqK3lpVakB54P5b417KWqAzp/YAMd6ERNoRwUAlB96YLjDoS81TYQgW4CHmo6gu9r61ihwUE+1+fNfJDKAMhaXgC7Nig03AA+oqhQZ3cTdP9Ocj1r2gC5agCOdegBxmzd1nspFUd0ZUJNGvouCR0YRdDmv3H7TSfz1P7pn/1xG4/H1myi+qWZvUiQ9sHBgf3SL/2S/dIv/ZJ5nmdf//Vfb7/zO79jL33pS+1/+p/+J/ve7/3eF/P1nxfXgTe1indzYxaQQXIzCIugA+ZwqIvxkquG4AjkEBIgNXQgs+F5atSXAEJdQZ25Y1pGqRWahi4BQ5OFQGQJnEeP1nSuylTnau6rMTwAo3kx0HmZA2P0HHDg7lqfrQr1fOcIOEq24XmBnNEdgwfS0GcbhVCSHQO/FuA15i2wri5kHCZqWNIcOtsAvf7gtopVhqAfV3bApEG55PpZNZCrgToQl6GTJwZG+hVQCQwK4PCqK5DW3WAm7fBIHc9tWB8rAJdfBxmGDnR1Rb6W8dwI1sJM99IQBF3Bj7L1NXXSw45mrtoDwJTAXC8a0IkK7P/pZdCFBAqDGWCBNlYjGUuq+rwhaE+2OqC3B2vV5vpsIWRewkT312lVnRwHOnQzwInOazq2hizLBvBUDE6hbDxSj2YQAmM82J0iBUmsiq77xNezYT7Sd74agv0EBzgBoeUYdD+9bc1m34BGhJ1h+/a/g/OXrhfsIGVZZv/23/5be+9732v//t//e3vFK15h3/M932NveMMbPuuJ/dzP/Zz99//9f/+nwkE6jZcWxjc3YSPXRdsa6wHQvKBRbgRdXc8f6MueAjg3XqrRnNZ0kbXnIGoJZYFaX3FJAZBn1IFrKd6E9CW01nqwIUuoDVeh7TXx9G8fBebbYyI6BHX2FsASBqBBPgcQggPt4l5dN+4CorvoSCO5CWSQzl/X9XIEjA2UwSBISN0DTEOsGYIQKBEunoLeE5BHFg44ejUoC0bqLFShLTMOATcBwFci/VzF6vhUMsAqTXX+5sDrY2bmQvkrgEwJcHJaY6LvMwVyvLSh+zpe6OE7mqvtONvWtZVDN98k1rXQBWD5URecnFydpg5ITFyF5gG7qM+2nACB5hrAcT11aFag/TcFSYlGrtmOylLf2wjEjN09zYqsofO2PICSXfQpGfMh25YctmUMmkntCsgjBfB9BiDoAmzMeKXZ3doBlL8AclwCaWWvCtUPqlYA/UkvAPoIaMA8hGaOOnSDr9e32+P1HxUGaXd314qisNe//vX2kY98xB577DH5zNd8zddYu91+oV/9eXnVw/y2lF4QaIZmcVYX2aQOwpRT3WjHpUav9QaUWxyIgNaQOoRunkVFD8ZLaz3gt0od88E4eBNdtI4DCxJSXAV0JRlgrs4C+eZ1wGH0wLnqOrr5VgBADyrAH7Kj97L5nBrw05VmLMYhkLjV1aFurvQ3Vl3dqgFgM4ItcEACqOcDfsGtarnETSG7uKnvKAb2uElTnYAmtD+7e2og800wrr4enufA8Q4K3R8ktLpc6uF+qQVMxFsMFxj8jjp2Q+C/qgLdhl+BzqwtoF441Ocbg3huVBnJ2BXQ5fIqOlYHAsMB8JXMSj0Z/RWkqbeAGw0O6dYNWm9qY1o1zVgcHgIeLRjJ2LajGek9KE1HEQQWIFZLEizJGrrdTPeIC05nAWMH4CQGMUEALsnYeAqcdpAJ7wA7+RiCxCmIRkfAQu0CpUcO2nP7c2C0b4KkS6ZzUICeokG5tBjruZc2bl/jKZ1HcL1gB+lHfuRH7K/9tb9mlQoDF83M2u32H0jg9vPxOjN93uJbmE+Xji68o2N9YUkJRq7QTX9+BS2VFXWuRrDwnJXWlAlR/NK6gngDYOsmwGNZqnGdQRdBAzICq1hT0+HZtowtoaPGgBb/AuAmSigfDuqgjbUBVAdrLeksTvTZrkODWaXQ+Wt3ADsGDkMIHFQzT3+kBaWMEEDuaQk4MVJmnwGjOhjILFSDu4axKmSGZiNda+fP6UE08HT+qmPdW7mjB/4UOqac+UjG2mDDvELfedEHkT8zq79GP7v9u5rWuwGkl1NX1/7QdL48aHqpO7oGJ1BKjlZ6306k73MJ4r5pT50hNwX6CXjvp0O9lxbYO3re1hKoKzI9S64B0eFurmPXGiMZi+BwdOsqsDsHTJjzFJAzAkVIErVlbJJrEAbym1atg5AxZNvHfdB7u6F2LK0DJQpknwuQM7KJZtF8cPjn+/q5ear30oSGhUPQJY2giakCZ1KDsvygH7m8o8SWAnEpXS/YQXrjG9/4Qv/kC/oauHOr3KKNtgQcAWVZgjHUqJvQKg0p9tUMCMxiPXicbd3gu3Xl5+hF4EhtXpGhZKAbKAXtqs5MHQEXuCmapR766wl0NIW6gUoAh6crdTDrIAC6gCTVrAUlzyOd5yBR4zqHUku/pYfiuTUYL9BnG7b1czspGLkESiiBavz5wF9jFcAbrDRSz/bA8a7r/a0r8M7bAK51lFH9GKJo0gccxYCRKvVeLgLvk7PS0tJ4W9dasVR+tzEA+M3Mit/VAzmGE69V16xDsa/rtwZAuGGoZa08UuelkgDbNCWQXZ2HJXRcOtCCn6yAYwvWfu6rQxNVIWMOwcsp4Kc9T23q7hoAyhAgDcBxjAG7MzwCvAxQjnQhUzIvdK5SwBvOIWAADWUrhpp97h2rHfNASiaDTNOVTL2h+r7asXYPunspwwWiwLaCRhWQpllDCbqYQMm+DioGLvDwQVap0gb9wqu3fy5dMXXHndc9Ju0XeYXP1yy8BV0/PgcimyV089RhwQND7gREGf2WLtCdU/XgKyBcSviAPAXGbcgMbcRPydgaxEf3m+qEdVe6aNfQZePkeoB2oF0089QoFR09kE9OFDPgA19SCRst2NG5XwSKZcmhc7F1Qw/PJUTRQQFs2FB+TTyd+xUA81uALVqDvhV1EPobsA6oE6VUIzyFNhufIshwJGMRHO7jHPT3ANqGtCIgfzHYhg64vpaWDdTGzwQsVjvP9B2n0UjGqI35vg2guIA5XOWXZKxm+k6gamzXar8rY62prq0pEMUGQ3XcAQJmgQct/SOd12SuwdqGqzbrBvA+bRq0NJYaCMxq6l1lwLtzBED1M21ilNS9NGrovSyGGqhUEgB4d0BypgOH+UDtyWCkjsrOCsrkXc0Cdwa6rsDntMVVfd6UCI170D3b0zWZrUD3E8rkvS5I58xAfmSidsxvQvCyr1nN/uT2dZ8BjIGuew7Si7yGZWBReXPBVP4LCGU6UGtvqnEdQ9mjA1IZDrQIO8B/4wI+Y2rqNPlwwDuazLI+dKyUoNzdDJ6VsWhH72+0gEMrhY4hyAxtphoVpVU1XtOeRmMlrPoIhGnTSH/XBzHiaq6h+mQNERpwgKwbug6cHDIOsW765hokU87o/Q1B/8y/DhpmACJdbwJH1lIzTb1tIO5L9TkOCigPg5BpBeQbdiIog5ZAfLoFfGKZrnFvXx2zNYjIpgM4Uc1svQNZAmBN8Tz9navQcbmcjWQsBIi9P9Pf7UEmwl19iYwd1EAqI4Hy1xY0X6Rq22JwQCrQgZRDRuUqOENNkN5wAUfogjhy9CnNCtTOA3M76Ecu+4AJA6xSAKXuEJx5pBKAJsDqqc5fvNR7mYJW3NVIGyM6OTgR0N23nAErNXHfraH8NdD97wCmrgIvc5Xp+mvN1Nl9GZRfh1tq7xrZq2TMPaN7/XD79veWLFb27+VTet1zkF7k1U+uWejcnMbRUl/iuVgP0DEA2JoJ4C6ozAMR7eqMLsbWSJWOowDKeJ8AYosAUqEb0EUEAXwFUrrjXBdtAsKgZyfqlCQABD0GtezRQg1ko60ZHwMNrgWUtcIKRBmQeclXmnXYAPB1QZpeC10vEWinFTU1wjHwXNWgvDQ+0gOwCySJY2DrDsFEVGs6LwFRT8x07CIo084jXc8hkK2MfXUWcshCEMO4m2tWs9UEeQ7AL1w/z6RyUQZ6UYAZvAJORAQg3g1wtAcQ+PggK/QJR7MYZU+dRx9EqDeb+n1dwHHloWaBpgtdb6kLhy8IYvfaegiuR2o71tDCtQRZjOg8rI9UM9JhcUXGrA+ZMA++DySOykD3+gwyubFW8awDHXW93kv0g+unZah/Tu37NaDCSJ/RtVbAukqBsmUMUIE5rN1WVR+uQrQGgdqxABQkKie6Tneh+WJeA8oGmPv77iCovTsltnsO0ou+DsZL829Jp7pzjRyey+FAabRl7AgizQjYV1sQ0J490EMmgExOe6X3F8eQn4e0e77SseUccCFNULeG1uTeBMDrQLpoFXA2IO3+kOn91U40K+JCJmI2hWxbAV1nDrD/AuN2Ce3PUzjYPOBBKitqRHaXen974ISNr0FtvQSnrgKZCZC/cUlrb3xFxiawhtxA10EJDNJVIHaLB1C+cgFftQEM48fqLDRjnYMq6MyNuzp/SQg6XWY26qth3wetrl3owlzsq4meJ7qHZ5BpHnhKwVEH7rEYsIp5pAfZuK/vpH4A5J3ABF/G6qBC8tQuQAPK0RDKy1Vlr04TfU/FSp/jBEqP4eQ3Zex0S9fb7GH9W+86dJMB+/ck0fXRXUEnFfBu7YFjYbTnLrxUxsqpBmaty3ov5YYGB4uqBmu9BWSfoeEmHwERI6g29DrqXhAl4+mhfl/ogGxUrED6BpD57v7/2fvTWNvWszwbfEY/+3bN1e5m7bOPz3FPF2xsmo+KEpOUylEqoFKMgLIKwkdkEAnhBwFbYBFyhBQ5UaQP158AP5AVpYGCIkAiUmUIoUsMGNycdp/drnau2Xejrx/GPnvv+3Iw7Ir98WkNybL367nmHOMd7/u8T3M/953qWisbk0cHIDNG16WD9ITXatow/yFx1r5p1JaB55sCo2h/S0txB101DruQ5t2G2m52oU7JaAsyQ3Nd3MU+EBjGwNYN0V3d140266jVdCJI949UviFaaqQ/AKLDItB7vg0q0zXI+EQFpPHvgi7XnhobP4KozdXoKQfVauD8s06ha+j5KXAFgRhTCXpPNfer9f6GakiXQEoaAqFaBmCgDFi4u8DJsljq/TVCNf71hhrNEniVlkMoFT57KGOeTqktXM1SPQDNwHOQUTAzG8YgaZJq2YPkFWJgr87XgLUDfrNoqvth2da18Op9vW8PkNs9EBUdNyDLOtQ1vYrUnnSADuQuZIEXhMOE8u22C0HYVD935gGbe6J2p3ukazq/r2ur8mW615OZOjQBYIGICj4I9H3EwIZ9el/nZQsY4+elas54B+pk5yHUX+8C5xZkhl0Qob4OXbvXWvq7LzvgJC70vNhv6PqDBkKr+LdlbAyExlf6GjgKg7f3hQmIXDpIT3jVRg3zw9cMhwOGam9Pp/kpAKblDXWGxhtdeLfLiYzdOdHUdHugB0ALwG8bByj/ZxpxT6Z64B2sgBtpTzcaOSABNRJU1MgVrhrhs1Cft9jVXVUH+ZHCUxBpMNBOvjUAwWvAKbSEiMU3/d2oo5mhRqzWZjiALNpVnYNsBIDslpYUiwv9XAayHV0gtzRg165vAxEj4NMCAGl7TTWQcQjs7pB1rQHIvdqC8gsQh6eeOi4ZCIWuASPRAJ0uMzNoTrUV3ONiCpkIOECp8biT630Pa8/KmDcEzqktxZRsIMO4gIxlNgbx0UyfI9aYzkaOrqOgAO6cCbSzQ0fYCXAZBQ2dvz501FWf0b3emGvWa0SSPzNdH+2VlqGiM2Bx7EOzTqS/Oz/WxXoMKbiJKeYqgIzjMoXyJgQ0LWiPj65DA8++vssldIC9CpmcC5CNae/qKp+sdU6DXbVj0xGUQSO955cvdE4H0aPPsV4D6yRclw7SE143W9sWhq8tNh/ap/d7eriFVXVUAmgD3YKIe7ClqcZ70HqZQMdKmuthbqAAfT7SpXExBoZScBgGQFpZAWLHOUQd7fFEf8N0kx4BX0p0rMY1GmhGyjL93fEDILwcqOGbrsDRm01krFaFqKimh10admRsstR7dsBhyIA80oP6e9rVd96Cg8gAz7O+olnIkxbU+A3odUFJvQAQfpBqJDwFuYBiR9dVcUtLI3Ng9Q4gm+J09R3trjRIuTcH5LCZrR2ghmhD6t4D/T7A5BjwjN3baBZjDeDc0NWDJznvyFgAnYo5YN5G0BZXK0GoGbRLVrm+z1qi5dElkFHWE7VZPTgEw6Z+rphAtvMYuknbgHkbQYYbmNZfgez9U31dRxsou0N/jKXbmmVpL4HWBDKqfqn7tdvRZ0tCwOl19azJhmp3urGuSbeh+3AG+nFdoIVpbeB9AEnvwvSsqYf6bM0+YB9XanfuPlatiLP/STxIl9ejV+Wp1KKH2Gl7W/oSLza6QPegmwdJ4foa3c2BJn0H2GtXEGmWEF6fA2h519QwVwBkvGMgdLnU571dgEzBUg1Lr3yLjOXQ/nGtAq2hu/q898YadXhgbPYgHeDEmsavNvVAqANhI4l99qECX+3p57rg/Pk+dBBOdCyH0mhKrfUobgoZGkfXVUcrlDYqNVsxA4WIDrDwzgDgfRs6q9Z/BKSprt5MBjI0/lwdMyoLNul9ABeZmdkaOm3yc3UiNgmUFeGg/eRC13l9Ww+KwbHOzdwFgV4gZ/VyMPmefl8NsIo5HL7OFPCVpUb/E1OnrgWHdOro385m+u6eed0NGTsA2EIKWMXJsTaqvAw4sU2pDOo3IAtU+pp9TqAp5W6gDsMh8AJR1+PGVxtTaeu7JGC0xQpU7410TQ4zdcxqvt7fxtM17kEloZzrvSwgsUb2vYRy5DSHzs+PQ6kMSueVpx4NfMrVZZv/F+Xa3mpapfbawg9SqPG3QN06U0OwASMOSSCrDnRBjTKNpGPI2qygw6S3hrLbuS7uAURttVj/NgU9ui3g06kD54z50MXm3ZaxC5AVGUIHV17q3LeBxn7i6KavpnrPW9A9F/RAJHOsztW8VNB8fq7rIPC0btFoqbH2t9X4Z8Chkg3IGdJ1sEjB2b0HRGxQituAyPD8vgJuXwGOnAy0k0bgPFeh22oGLfhzeB/9in5fE0oFZ0cTHSsYr1AHor7Ktq63EuZmAxQNWwXQO9wCrNK2Zp+iXDN4JTQFLECvLITMRsPXZ14WHf3dGmg7TnX9QpOorR0NzGaZ2oQGUGGcTkGQdEuzAlsllMSe1vVh0GzyUqZgeDCLdh0igbyr7/yNxO11oZCCaVf3dT6H5wAWeVqpM6ANSTZABwDyQ5OmOirLFJpmAIC+3VOHMAfnb2eptnILOvQyyNRtQdDevq6LbfkYMeYquARpf1Eutx6Z+5CD1FrqonUdjXaCvi7aQQKEdFDOsEwXVAc6uFZAxGiBGpsMAM+Dp9V4jSAaW+V6QDWgevB0B+QzAIOQj9UoLd+geCN3rkt3AKt5CtmTAurvAYB9pfPBzD657sjY/I81O/a0qcM1uALinDWdrGlPs3f1kbITt9b6uUlL76+A1uTpAsqg0KU0AYDsvTONwDeB/q23AemCQjNN6UjLL+5AD8oFCLJm0BUXhrqGJqUa1xlE7zFkcfolcNqYWXUbcDqJOoBAAG4hZATWDT207s9hz02gbXtb77EHXUk90Jqr1EDvLYWM9Jj2q/7GXlsdgUWg37dxNQC5CtQEYaoTeAMwQwHwDJ1f1X3oD3UdNUEC4y3AS/fHkHh4CQgl64Bbix8AYeOOOnUrAOZ7gPEjvdVprPfS9qATEjypalP3zXQO2ayROu1VX23RNhxn1yog2wOZRK+rmbpmCIkBIORcvwySRBeP7rf1ivf049elg/SEV7fXtWr9tQ1XQpdIE9SywxBKbIUu5MUC2Ii7AMQDanY/AaMEXCsl8NUYCDAeVPX7Fhfq6XuDj8nYsqJOTlTqvPgdIJmba3o+z9QRDaCjpg5SFBkwILcAmlVtQes0CArX36B/XHfVORjHaujHICHgHOshMcr1Xl5YqEELrkKIO1YnbAiO3gwkXWoQk5YT6EgCKY+tumILipp+Lo70OTLgtMrqQEYJUKoVBAthCtHsAjIOQG9hwAhsZnZ0Doa4prQS5ulJUS/0mQm3Ugl1HsIt/VxjDSLU0El5Fuha3QOKgJbpAdICyhGrAu4HqDB8sDFbW+oEhwkI7EI2azgF7bm+zsvkRf1cB1TmcwikzqBbsH5bhmwb5uVeHZyIGjjkY3US25k6B51In2P4lD5HE6hTnIWug+0C1nQEYroAzD+AZ5uBSPscYBonI80Wuz0N+KP7IPAMJcog1PVSQBIg7TzqsGZQCaDr0kF6wiuZdMx7qFtnOwRcCHjSxBXihboAejf1+5ylOgxA+mq5qTH0gCLrzXXdLEfAGB1B+bCTaRllBfXdFrR3DuFAJqeuA1mRfA7dFSdAuritxnUQqKPijPUgqgFHUdPRUsZ8rSRuY9BY6wLZ4+6uvstyplmIcaBZyL0barzWIO40Aj2ljATpgOR0BeKt91x1IvYA0J7d1vlbQYYgOoDs3V04xFLg3AGcTRXweAbZgHwMAUkFtM9ajEGKoYTlAJ4qh2Co0gH6iYn+hrOjzugqgTY9aI+vASoYsNw2AkLEFPAjuz0AHkNDRh2yp05Fn4PIpj3gHivASSyBk+0BtMfPavrAZ4lmQKmhdn2ua8ut6bPVQOR5uwqdwRUg8/ShXB3r/i+3dN/cBNmoiQN0JUYcZYBVXEMDSl3LaZVA56DZekbGOpCp34ckQKUFmME68LRlAMzfUTu2KtT5iUaPZq6KDPYQXJcO0hNeny5XFj0kLzCr6SYIToBfAhaejbV2X+j5aZUtXTw3QerBoDvAC/V3/yjTzdwGZ2gDzpXXhoX8KY0cNlV1Ihq5RuALwBvEoK0FdFOWtvWegxMgbGwAISK0d/srPcTWLTW4PuCckvGhjvlA239VN+oCUNAB1OkNNMxyyMpdvarPe20XulMydXKOp5oS71+AcndFHWr/zXoo5iDIOjnT54AEl1Xv6lprN6B1GpzdOeDxWtCNcwLA660Q1p+ZTUI9ALoQqETAl5SeA0C5MpExf6yZ1y6IosXglHThMHfhb6cbsB1QunSABb2SqD05csD566shqyYabIyAL6kCZfxsV3/DqevejMD1yRIAN+vPWu0G0DsstRxMEhgp4HRGwGUUpBCYebr/fXBeVsAz1AIOOv8KlPZAjC3OofoBztUu8Jb1QcjY21N70oQsUA+0nxJ7g4wt+jrPzgQ6W03vJW08ei8OlNfpunSQnvDaryVWqb822SlsPuAqNCcA1tIRyAqAvtgzUUfGKmtNU9VAvHEE3jXqfcLGpW7nGgjdzq+DEngBEUaon/MycOqausHXFTU2DQewGSD2OyvVGg729LA7czRt3B2rUY+mExm7DozMRaCfO72jk1/PoYPwCvBmAQ9SBiR98YU6IEkPsBmJZhwycIoL0PNqNfSec+hwqu8dylgfrNDwjv7t+svV4I5XwIc1AWV74A6zGuUNQCRTE39mZgaawtaa6/rYOgecyU1dl8u5zmGrAaB7dyJjAZCGtnLNbFgFiCdBid33IL1T0fd+J1HjtuVA9rmqunBFV+flBgh77wEQ/ALoNs6BVsLtwDr39Xc30I1biUGUdQAYLiDaXbhQij+G7i9gXj+rawapC/dc+JBdhMxQM9TncBt6f0/5OlaF82zdVSen64LzUgGhcFhXeV9/N8kUvN6sglQTSI3UAZbScB59v8sAWmzhunSQnvBy06q5D+EWEugsGEOppguGrwKotgNXHYsdKJ1NgEwxmelCdgG46TX04DkFZGlken/U6uz0IVMCZQF/o5t+6kNZINGNVgU81BpayK0Fwp4Qyc2Aa2l7A4alo5s5qKtRT+DZcsB/7UF79mIDmAto+MsAc1UFyuh6TccK4AqZ5roOZj394SvQ3ZcbYGCqAOolMDfguhrAwt1KQHLC10PndF87A91AW7GHgEvqrDQj5YeQxjWzDfBVTZv6nWdv1jHwMc3qulanDV1b3S6Q/AGQvAmCpB5kSl7ZhmAI2rubEJgdAo/P0IFOPked1m6mz+HmwCwNNCkbAK9fySAjBQS1NUedoaAPB3dT92E2I7JSyORsFGtzel3560Jo6nnGV3xastZ78V3dXxF0GvYAc9kFeEMPRHwNmKp7pdq2eB/s9lTtSbunzxFmIGFV6Pet55+WsdJVF2YJskJl9dE5KP3LEtsX5VpVR1Y8pEMVnr5VPrPp6kK5VdWN0YCupG2Ixj5dVVbVhwVzP/d9LRDTdNQANaEEYBlgn+AAbQa60E6ApdQBMOcKsAX+hRqvOaScwTaYQWo1yrU0NVurcVhBatpJCCQBQGFoNY0ACxSZOlxTXw/zZJ+IDnW9hAk4a5uJft9a37lXBUqJnjpS7gaU3lNdLzGkus0DMV2A8wS7On81AGTHCXTU1L9GxnYhI+JAtN2MtaPOb0P7cwUEj80s2SiIdw0dQ46jjlSrC4dCqov6wRSyp6Bh5qa6tvSYNRu09W8bK33vVQh8Wh2g0Qh1DqogNArnMTogJZTTU8BwNaCsPS81+xcD75MD7ft+UzORDjgWPgD7195ExlYJYG020LQA720OgfJVX9/RvcWhjB0Cs3wTSrcZlIdndT1/DhbqbARQ2qsDW3wNuKWcC11D92vAGQf+Swjl4XlCHYTQ2Tp6NCPlQBcvXZcO0hNebq9jbuM1Y1fxbslnSnAi0hnQ5++Caj3U0BtrNV6Tii6KixmUl1IFKBagB3SSabS+BWnPQaFGqQpOxBHo0WW3dAPND0BMd6V/e/wpyI5BO/AaHLMWdD54wES+AbxMABifLuy1IeCDirG+36KhuDPnAbRYl/rempkazRK2NHED3odMSX0ERg6wBdUDPRQT4EYaE80E4ERKeG+jJXAHwf5wN38sY42FHrxroKjogRCsF+qc5qFmcc3MorY6PrsJ6KnBO/Gmuo46QOnRCBQDVoJmnldXhyseA7eXryD0oqHrrQYlp4up/q1b0Xd8sKOHbw/asSdQKhwDT1MJkjVJW21RuND724ds1lFdM/BBCmXGFLpEDyGDdAKkpuBgdvZ1rlrADl0/VYMyb6qdfepAf9eBMtQJMJY3IXNVmaqzNolIKByaTcaa3Vl0NdNcWeh5dmWp+/oUEPxOAdpukKWagxzM+DEZmjU3psp16SA94dWqlVatv3YYekBS1z0GWZECwMMgJ1GudUHl0PLp1qCbZwE16lQ/N3N1QdWbExkLIXKYLvVzr8A9r4Bhze+CBMsYWpMXwKcDMhH5GCLh22r4llu6+bZMf+Me1KmnpxCxAIYrnAI7LKhg18/1ME/AoV4D0WEImZzqSCO+ZVvX1bCiBvwaSM4YgP8bJyB0uaOHzk5TD7YZzFUy0nnuAMasPtFMzhCydwWIc1bhwM8BfB21QSsOcFhmZuOavs/7QArZnum67PU6+oXgMNSAhyoD2oBkoSUdL9IDNAy/QsZubm7r94HsTKcN4GsgmY0X0EHRg8zVEOAIQCvxDGQEMyCF3d2DDjjQZ9uK9X0GBazzNwCQ/khB2hmUg6wOIGgg2oXkky0P1XG/Dq31CVAiVBqQpYJgfDLUs2ED2cp8T3+3Drp6rw61hL0FmKYmlDyLuTry5z6URlv6vA6U+xttte/1xqM2YQmZQLouHaQnvALbPFI4WS30JaZAOJivOvpdjmZtmqaRks31+5apMi0fQZlnBhIH9TWIrea6gZIlHIxnaqgqoRqW3g3duNlSP7cEssxGpL+7D6XCNXBbRLv6uTtQV78ABykD7EMI3SkxiCM6K6jJQ9fUCLJKQQwsxhBZZ5AVSQDQmsQQtZVAFOmrs7Gc6vtY7YCCex8A8kDtUBsD/itWxzuF97aeA89VXQ/jt2wr1uPa018uY1egJHtsmgGefj4zCQfUW0I9KAj4/YVeSidr1oDShQtdOadw39sggTMxlffZNgB4f4HX79/5TzL28VuflDEX6AVugI+e1XQWZkvdc5s/1veRQedT1dOy+yrSdZT+PujggQNdgzV4BgD+s2PdS1ugTXYdqEkWkA0s4P3OAWoxG+g5UGlA58FGg7A2NKDkVWCvDzV7Z0toEgIca0EdppCFLFc61gh1/mb5RMYiO3zk34ldYpC+KFdamPkPrSsXwHTrAMoFQGUadDUKjFPdfBs4LHcTbaNPNmocWudaa0/bymWUTTVab0Hk4PUB2Ffp6PflekgkoFGV9TTFsAXdeFOgIfAiAA9OoAsw0YzZK8DM3Yr1c/cg9RtXIPIKoL14oX/rQ2bjGJjN9yDg2TI19PcgC9GpqxFZQVkr8fXAqoLmXc17vYxNwdEbNDWir1fVKaYSTxd4i44BSNta6N/+/lKd8fuv/HsZe+uuzt/r69ptVbeOjJmZfRxEXv8I+K++HP/6C7uAd9gM+GXo0p1uFoBTuA1jT3K97fq7ZOxP4JbPJ8oOb0e6X9cJAPbhYEyAY6F5VW3vAJo00o3O1swjzKXuh2ahv9Ht6Zu7ugdA6yMQZQZtsmJX7ecKzpUaMKdeP9PJr9dBrqYDmohASrw+AYerop9LiLAVxLQ92MOZHoU27Ojc35pDptnvyNjOY2SZCZT66Lp0kJ7wmgwz22xeW+TLKZSNAC5ZiTU1uBkD02qknxuEuhjvV9Qw16CMsngjMFQea/dHCFTv4wl0tkBKfJPr36YjMGgRgKChG2d0Ajinhm7SvqPPO2/p951napTyqn5uVurzFs9PZMyHrsKopY7tAZCkxdv6bG+D88o/0Pe75elzbN9WLIAL95dfU2coHqljEVc00iwu1FC1AQNXdAFYDp2BV6Es9YYtYOZ+LAo0M/ude5qZWC2Uzdp7pSNjfwilr2lvImNvug4cVGb2ZZC16UNWzyA7aXB4/P/7gmrml+x6y56W9v7kBJDbsA/vwllW3ei767ahO+0VEJc91O9rQ3DQAWLMGDrv1h5AI0Bj7UZVMWvFvtpeZ67ZmAZkn4M6UH8Ab5EH9mR0Sx2pByRPBhiuLroN0NLvaxA7gYaANnRq1oAN+w40zdR7etPrJQiZtx89p9ZAw0DXpYP0hFe727fqQ1iLFtDEOzm0RUMLdP9UD6gpCBze7mp0Enb1YHQSyBYBNiaKdIPXmiCwGwDAFhTqLYG26ECjxU6sh8QEdMPiGA6YUCOHV1eahk5inedsH0Dzns6VN9PUfrALPE1AfjYDKYRNayJjb+9BGeSK8qA0QMKmt1Hg9s2vUhK3o40e5Cl0eiwgAxcSSeIVwIm9qt/nQak1aUF0DIb5v/x3jd53u38gYw3dWrYPchqbLvAbgcL8OPm4jN26A5kOM/NBmfxTAx3cjkFrLoLyjU1kLAb6iaXpexoDDGZ0rl16zhAkMABEF0CXUxxBWQs4tnIorVZAAd7P9fuGoWaur7c0U5cCuWVZ6D2PbkIzAsj2pPtaGu0CjcMeOMXNUJ2hRaLPsYCmj95cM/9/mKotas2hcxSqEA0IiieA8YlXen9LyPIBDZLVA91LTahWQPXQJt6bZWxm+i5zoCkaAA7Th6YPki4p54+uobKgQEavSwfpCa/1IrTyoZR3HdLf8ZWJjKXQYdYGwFkn1FzjBDpt9qAMsITOkQhEKB1olVxAKnQFLdB5Au3TpeIXrnh6IJwB+28bJETMVyNXcdWpy4AEb+qCQwhOSbx5Rf+2A7IY0IZcvaPOaQ0ELJeero2XgOAzONMMyMDUoF3rH+r9XVHj8LpCD/jSVSNct78qYwYA5ZXp+3jlym0Z+2+v6kHpA3mcA3ImhwAOb5b6fevloYxdONBq7ytVxH6mnytAKsSaDOhcAibKm2pk/ip0Kq5Bf6hz8JT+Bjg+JRCEtgDzsrMHHEVTvZcT0NELcyDbA26qCEhXl3u617O52oRkrbatmuq+rpa6pvM+zCmQ4FZAOSCDfbNzrgfmoqtj8VIDuP9+oi8p9aAU5wNbPwSxS2hyGUNmbddXx3uR6vsIWypT5PT1Xa67L8uYOwfdzy3QSavoO5oWWsbrLzTodCe6NqKO7gWbg5wRdNluIECvtR+1O47L7PiPX5cO0hNernNkrvPaZM+ANTsFToxeoUY3aenBcw7AtPYcIvO1bqAo0w1egMhrY1szUlWIIJ99SvEZg0gjryU4Pn/4R78jYwaZtaSuzkafuitAzNTZhw4zvRVb6b41577O/W6qEdp9EC4t26DqvtITqwkCoKOVHtyzqyCw21fjegasuYsX1BkaQrv9LrQwx0ByOBqpwe3UdQ2tSYMQIv8dIGgbXNc2+mB8W8buz9TwRalm9A6r+i6rpc7VPFXnOah2ZGyTsCyBH+vh2wKx23t1/R0PZB1e+v+qw1AC4/kCQMv9I+WDCpqaGYq7Otd+quv3BOa1ChIzDSjfrh+AcDZgVM5h31SO9HMLR0tONQjCdiGoSyPQFwNx3osWOCrQCh4nt2XM65CGGXCeuXroJ0sgJt1Rh2vjgbTKFdANBBHaZlX3XLYCnrZz0Has6vtogBMWNHQvuEFHxkogQ63vANmrqeB5awAZMxc620zneeex5oQlZEPpunSQnvAqys/857NXOVRnw23pInsZ2hjrwChqFV0UaQtoA1xNp3sz8JJhM/sA52xDGe8MIr5PjnQh+yA/Ujb0OQ6y52XMKQAzcA0Qe8DFcfRp7UBa5sC/NAF5lAgya8dqMCpAsWBQV193dANuQfZuG/Toju9B2fKclLv1Y72uPm+YgOPdgQhypmtjCp1aixgkE2rqtH/ZtjrAyxowoN9Rj/Virg5cbQZ4MqAcSN+qAcmgpg6rU+g7unuhk7r4PLa06uu7m29BKekcVMhN13TRUTuxhKyNa5o5mDsagCQ1wK20QMx0AQEciJ6uQfh1vVIeqm6gz3Hi6vy/DgKQFOgiepDUa4W61xt7kMUATM5oos5LdEedjSYIdi8gwxXuQJdtC/QoofN2Bc7QAtBjN0F4OIVqRWoQrAGX3m4DSD8B5zRHulEIVOB325D1ngBWqWIa/L0Fysip+yYZK6A8lxTADzd59HPFDJqL4Lp0kJ7wmlWuWfxQ3beSAGtxWzfpNrBSrwvNJuxmeigEbSjjQbTjticylp+AtlOpXWz3bhEbrjpmEWgJlbAhzw2ijoVGqWmmB8wK2vcDYMNew2GyBG4pH1phwwyizwa0A1cARwAcJc8CXUHX0b89B12j63AgFCEYUmAifnAfJGKaIM651AOhFWqGsBJqtmIOJI7JDMg8IePjQov64kLfW6UD5eGVrsllFSLmT+uBdRoo1qPTVwMeQXeft6FDwmwGgq4XJ+pQFvCecgAUO2t1+uu+zkPXFHuWuzr/MRCsegs9fHdAJX0CB7Lf0XmtArnoGpjbQ6IXAEX1PWBQX6da/j7ydK486JRtnWi5OgKi2OyKbrrKUO+ltQ/vLYHOZSjF+eUfytgAAr2nIRu7PNI9Mgb+pRZI73gNdXadugbFNxq6r2eFnl0Dl6hEoGMtUmzWdcCsLjINkB4Uuk6TUhUkCiinuyHcS+/R8mHuf2FMkZcO0hNetWbDKs2HHCSIlKL7eihMSx3bhpJJqwXaRC/p4VZNoaMBhEuLQg+8JhjrDWBPpmvNUmUr/b7OWg3pAHiBJqAllmYKjCSD4QE9/Qnw+a1NS4ALwLxcaavB2L2q83IEEcsaOls+AUy6z8L3Tdea3Rku9Z3PgcuoCviAZaafi2bEIq0ligBKHi1PP7fMoc3XB22nUtfpi7qELNnoGsoczWCuYmBKB4drA4f7UwB8nYCAbQcyiSMoaZuZtSGbsA2/XQJItgZMxrOKlgb6d+GZF+ogGfDzNCEQIIXd+QQI/YAXqHFbHYFpTw9VINe2aA1rBqSGTs8UB1PuAz/PWgOu+oU6UrNA5y+cQqPFETSH9HXtVwt1aFqpOnr3ILu42IJ2Mih/9yqaRatCU8rrJ5p5aYLd8YBE2D3R93tS1zW0aoOmE5BvtiK9l1sGGpCmNuEYggDq+wzAbvuunnFVU8xV47F5cYFKga4vqYP0Yz/2Y/bBD37wkbFnn33Wnn9eSy+fvf7Fv/gX9uEPf9ju3r1rW1tb9i3f8i323HPPWeVPd+Vzzz1nP//zP2/PP/+8VatVe+c732k/+ZM/ac8++6yZmY1GI/vRH/1R+0//6T/Z3bt3bTAY2N/+23/bfvzHf9zatBj+jMtbH5n3ULmsvtEU4sjXU+Gqo4txEUNK975u3N4JsKn11BHIoFxQAu9OBK3ra2CbfiXTTRpBxDIb6ecCTzuzUhA9pFTYXVfHGkAnT1HB+q5utaKrRuRBCcDIiT5HzVNjmDTVySHF6xdf0fsLgYNmDc6GgbjsaF/H6pCtnFyAHh1044CvZl85UK/z69+grclxEzQD76kx3IKsTQFyC9mJOk3xWMfau7run9lWZyg+0XuZnKlkytmJToJXsoPkVQBnAkY87ukzb178Mv3CmQKP/7imB89VALoTmXO/qWWeXcCK1LZ0rE1s7hEIjULZuAC80V0gDbx3T5/XPH3e6qvqTI7aut6mJ3rP1R3gbtuHYy9RxycgIC/oC97taVakBtIlZ6D/Vduos3HHNNO0FepaS/u6ryMIGGq52oneVf2+DbCJb+ea3UlKXWxHQNmyjR2YYNuAQ63R1Hu5NdfnuFvXNbTzCuArH8PKLgHwTdeXPIP0pje9yX7913/9c//2wZv87PWRj3zEfuiHfsh++qd/2t75znfaiy++aO9973vNcRz70Ic+ZGZmv/Ebv2Hve9/77Ku/+qstyzL74R/+YXvXu95ln/rUp6xer9vR0ZEdHR3ZP/tn/8ze+MY32p07d+x7vud77OjoyP7dv/t3f+77nw/vPiIGOlpP5DN1Tw+jTwCL7NUKYBUm6ume1HVB5SPIKgHeI0k0ejoBEOkcxAer0CVyAv2Yjgft4qZO73XoA70PRJsGxI5raK0tR9By3NW5au8CTUIfgMyJGodTIBUJWxBFOzov/iFIsEw7MtaF8ri7o8/2AFJmsxVE213dU1Ng654Aqeb0WOfqE0eKn+neBEHX4P8iQ+NTNWjFxW393QAicEDXNz6pf3v0CQWqu1UNDIqZHjDEeDR2uIttCPQOlfqhjG3OQXOwppmSCLBsT0M2ofNWZc1vu2/Xvw3V2bsHeLRjkJ144Uw/NwPnr7MHXZgRlPumwL+20j3SDJSEtHxKbdHeCho3gFbNnWt2dzjW99bzgEqgoTbmDjTcHLYhQNrV9fbVkECCypQlUCocAqi4nOgh79Qh4ALH9v5Mg4M4ASmUmgYgZaB2Ymuh68CDTt4ugOtbgDd6Acq0HeA32s7g/nb0HZ09xvHmAv6Qri+5g+T7vu3uaqRD12//9m/b137t19q3fuu3mpnZ4eGhvec977Hf+73f+9xnfu3Xfu2Rv/nZn/1Z297eto997GP2Dd/wDfbmN7/Z/v2/f41V9+bNm/YTP/ET9m3f9m2WZdn/0EGja/uiYZWH0qTZAroS6nowDhw9UO44cICC1lS60gOqAriVyZl+XwvYjYeg4xSDblhR1UU7BjoJPwKBXQDO3QG9NwOK+eWF/m0LOvQceHdNaOndDPRvKwmQBoL0S1DTaLGdQ9YAMrhFTY1wb6CO7eII9Io2ik9r7ChocZXpD1+A+jsBIxsA4L830KxNChQBt27LkEUJlGTH0H3o65rMAkjPL/T+hnDuxl0w9Kk+xzUgQ200QboASmRmZp8GMDJ0Y5sLXVMGUgdVwBHNgHiyeUfvJ7hQr/q/A1ZxO1MXsNrQeTgByZrgTPf/8BZoz3n6jqe5ls7PoRuq8CGrBEB8Fxj3G0N4Ty3NDLkztRNDALT7UwC0A0bqhQKkXyBQ3jT1Xm7sKDYu3ahUTjPS8+IEaC8yyNRdgY7f047ukTLQ9ZecAxgeNAjPAZfUBMyq1T4hQ2tHv28Ftry6AwLlAEvxoQrhnD/63hzIltH1JXeQXnrpJdvf37dKpWLveMc77LnnnrNr1yAaNbN3vvOd9nM/93P2+7//+/a2t73Nbt26Zb/yK79i3/7t3/55v386/Yyh7fWg//6hz7Rarf+hcxTHscXxa57ubPaZze16iXnea5FQAWDf4EV4Gdd0YzwFHRwBaEPddwGTc6rZnSqk3Y8eAL8upINXI8iUALlgH7pELhJNcfo9AOKCE59CVNleg6NS0XmOInAcHY2ero2BpDPQMsoaDJ/Fiv/YtIDEEbq6/Ff1cAoKNRg1yEwYAIIfTHQN5R39XNfX9ecDE/kYVOKLVNfktAJyECFE9BAF9s+0PBQGen+bhc6pU9W1cRVKMpttXQfjYygZHekh8XxP5y+psJZa+2X9bLYBTFRVn68KZcWwr2XoWQKZA+BqCgbq2OWAu7pl6mhvwAlzIABxobNtXmgXa8XVNdP0Nau3BD7Z/Jo6KtdArPpspXbn1NNsgrvSHxkAadQaus6ammy3+hwA2dBN2trV7NOxA8EfMMtvg7zU2QSIGNd6Dvi+ngPPOzpXyQyoMNa6Tu8DrvPKSvdmY1ft8QWUI6OROokxdPzGEGhYrHNVCfSDPWi06D376N+GpJoNl1OWJdQ0vjjXr/7qr9pisbBnn33Wjo+P7YMf/KA9ePDAPvGJT1gTlI/NzP7lv/yX9oM/+INWlqVlWWbf8z3fYx/+8Ifxs0VR2N/6W3/LJpOJ/dZv/RZ+Zjgc2ld91VfZt33bt9lP/MRPfN57JbyUmdmPfvDvWeWh9E0EUWlUh6wNpFbLEhTvZ1rimCYaDftQo+72NV09TjXKWh8p99A9IJlcUVcHgFKzTA/fdge6g4D51l/ppgpBv8vf0sOk9pRaqhr0aPdyYG4FWYFaonN6AsK0I2jL3YfuvgQEYuegKh1Aqauo6P2lFRCwdNTQu9D6X1Z1rmagAbeqqmM7HEK6MgOSTtCja7b0+xrn4BRP1ZCuUl2TxZauqx0o3dZdPXRqW/ocM9CFcoEM1cxsGyLkRajvZAI4sxsulH4A6nSy0FKID4DdAFJpLmhrpSCLcdZUJ8JJQN0eRK0r0E2WHus7acJanQMeJXchqqvpc1QXmiklOoAVyAoVQA+96qrDmgJO52pDX9IVHyAFDnBsubo3R6Ze2NZVnZeioufhApymiwlkXohttAsNMi6UyaA7dS/Vz80LdSMKgBm0wo6MBU29P+qeK4BHbm9X91b/rq7deO/Rd7mcLe1dh9/0ueTI57u+pA7S49dkMrHr16/bhz70IfvO7/xO+f8/+tGP2t/9u3/X/sk/+Sf29re/3V5++WX7/u//fvt7f+/v2Qc+8AH5/N//+3/ffvVXf9V+67d+y66AfMNsNrO//tf/uvV6PfulX/olC4LP71VSBunq1av2A//P/6tF1df+brnoyN+6/kTG6sDFU3Y0Db0N7y46UKfp7lSj5ruf1ENhAVgMG4I8CoBue3VgoAZW3yqAZNuAS0q6+rk3gn7PYV//tgplmQCwSiVwy2ziiYytTvV5E2g1TUE9dBXre6uZRpA5SLNnICQ7BYHYEu5vdKYHm9/RLFURaRkvzYD8EMjolnA4BU1NYd+GkuK1Qt/v3rZmfFajZ/V3m/q3/bGuDR+yY7eArbcR6xpfgR7aXqRBxTu+BpTPzSwAHcKXfld/ZxNoe3IAjuIUOvzW25qNLSe39YNQwj6f6gE1Bud7XtF5qIHT34cD6n6kh+U+BDlOHTrHIEBaLYEktQrIsMpEhgLo4AqAvb4LxQQflH2b0MiwWqsBOGgCOzTQvWSQGE560CxxAbayqp+rV3XTNUsdW4MMSAEkjjFkcj1X13MIGKRaA9jTTyDYgCM2qIEu2rlyfTUoAzfX9bKp6f3VH6O8WC1W9v/42v/7n+kgfclLbA9fnU7HnnnmGXv5ZQUwmpl94AMfsG//9m+37/qu7zIzs7e85S22XC7tu7/7u+1HfuRHzHVfW0Tf+73fa7/8y79sv/mbv4nO0Xw+t7/xN/6GNZtN+4Vf+IX/oXNkZhZFkUURLPoyN+8hUdN1TUtiTRD3XOx1ZGwDgOfmSg32AbQoxkD2mB1q2/DTLfXWt94KoRfxwYCBDOGAupdB2zYYryZQIgRttV6ghGIXwGibTjRKHUBmIyPujLYapRXgdI6merA9GGnXiXOiz+bDEstdBR7XQOuo0dQ11GuBxhc4SKtMO6auNvRA9V3NVgQVfb8XHT0knIkaqtzVzMutQo3hHMo+0VD5a26DbMwC9mQDAGBzKAVXoGtv0tb38ZvHLCzbGOieu/Z/0vX7OuCXWZzr+xxsg2DyvYn+bU1/49WVOq1DR/dhBDACypTOexpwjVwtj3q+Oj7zHeiQgtJKVtEgormlTmdQqOdYI66bvv6tH6tty+pQlgVsZhuyMUVF7cRJpu/32kptzAoaZJxE7y861ncZQykp3oOyNiSLFgu1J4OBfp8LWE/CXBHe9STVeTm4ovZ4CkFJ95au51WoUIY01N9oluqYVcDfmi0eDTQ2S1Lm1et/Vw7SYrGwV1555fNiilar1SNOkJmZ533mBX42EVaWpX3f932f/cIv/IJ99KMftRs3lBNhNpvZN33TN1kURfZLv/RLn6MI+ItcxxcTCyoPTeMQoo4DwAyd6eJxYzUYkwP9vjv3dMGnW4cydpCqc1AFBu/JWjfGsoCOulvQgt/Xg2wObfTNEYCgC33e5B5glYB4MpuqEa7BAerB3w7aaqioHDEL1dkAyhPbgRbXBay79EJ/l0jrzud6sC1P9XmLGCLNWzqnfdAhI/LNCCKvAFp6X4GyqoEMQA+yEOtP6WHSe0aDijDRua8E+rd+qYddP9W1OwGW8AD0spYjnb9pwg5S87Zm8F6E2rkLHXntKmTDQKJnnkCbOpTJcpCo34Kxkw5oBGbqDPU2EHABzUewo8+bjvTZmsDZlXoqrNwAaZAmTb8PLOFL0E70QV9srXtuslT80m2AIwR9oJCAbNHttdrKg3Ci3wet8C7oasYRCJmP9X0cAJayCw5NBPpxeaTg8G6m8xf31FF+ioJiwMo1dLvaeg/A+hd6Jju5PlsZaYZ1Ck7xovfoO9qEX1jh7EvqIP3gD/6gvfvd77br16/b0dGR/eiP/qh5nmfvec97zMzsO77jO+zg4MCee+45MzN797vfbR/60IfsK77iKz5XYvvABz5g7373uz/nKL3vfe+zj3zkI/aLv/iL1mw27eTkMxPYbretWq3abDazd73rXbZareznfu7nbDabfQ5wPRgMPvc9X/B1GplFrx0ENYj0fd23dg6kd0VVX+x6qdHTvQ60/i6AqXqjm+p56Gg4A0oIJ9PnuDrVuYEOZqsd6+euDPR32wcdGcum+jnPB+4WSJPPofMuhqjDTfTAmt37pIytU41OJnXoxoOOCGcDEiegDbJsdmSsGuvYGjIEGZB5em11SpYAlmwAjiCoaKmwmYChB1b0NRyKwTVdHPvQxLD+lP5u4Os6CLsgkrl8i4zdBIf/fArSCoXyOc26sF6OuM3/tAlRPWQnvJk6/YsxYIbqoDUF2nBW1bkZAai64ugmeQrEWy3Q+yuBImQz1UOwaGg2MW7pvewM9J3kMwD7gnJACnsubEAGIND1salp9SAHItsoVkel0wMwNzDzb+AYrQ50/o58/Y3WHOSMruv7nQHQuhVrEHECRLZUDRhkSqpZC9UuukDZ6B/replV1cn2oCSeex29v5namADwUAXox60CwFKtQD+u9ujcexD40fUldZDu379v73nPe+zi4sIGg4F93dd9nf3u7/6uDQafefF37959JGP0/ve/3xzHsfe///324MEDGwwG9u53v/sRcPVnAdvf+I3f+Mhv/czP/Iy9973vtT/4gz/4HC3A008//chnXn31VTs8PPxzPUN+UTX3IYB0vNSFcgKqy7uQDk5CXSj5iS6AfSCetIEa5ritkdJuAiKPB7qQr4zV8CWv13t+cwKK4bFiXqZwqNaXkNnYAM1+TzezM1Vjs9HHtcEJzJULHE8AUHavQgvHSqPKaKZdl76v6yAZQ9ZmDhE9iK0aZGOoTdEhzBB0p603wBk10q64UU/fRwvGTk4h8v+43l9SU0PfAZ20RqT37IJUS+SAziFw2hS7wFnk6MHrOZBV8pgoErDX5s1AAgO6g5LXa/bJS4Ao1nS/Djzdczt1oIYAgsB+rE7Thel6a1f0HYNOsxW+zn/UBtqLVzTrMHY1gNss9He3YE03IYgYQ8dfXOj37Xtq2xaxrsHzjZZbVx5gs0AGCBgg7BhsVgZUDAaqCC3o4FpDuZRK4ulSA5BNU3F/56GuyRFgJHdGt2XM1vp9axBQr8AZRwSVzgDeJQQGi1Qd4Ks13ddHj+E0NskX5iD97wqk/Zfpms1m1m637X/9Z++z8CF+oAyAZDXISl0DSYgwB62utn7uAmQ2ait1kHzADDlgHFqRLhY30VRyUFGDe25AiAgEi8FGn83bghLRQg+jJIG5gkhpNtcxao/tgARGuE0st6BQ3VCjNO/q86ZHOi+rlb6PMZDC9Xr6vAGUCucwRo5UCdmiEFTnMx/0txw1VMNcf3er0OcoAIviNSB6H+ta84BNvASh1U6o7+0IuJYaAEC/04AuO/CJWznxGJkdA/fLHmjw+b5mqoKroGvW0TXTOezIWAEZrQvA2rWB1yo70e+j/bUEzEu+gOzJWO0O6a65wCxfzfXoqQPlSLHUQ3q4VNqAAg7zbk3HVkBX0grUEd30dV6uALfXbUAe+6H+bgW4llYOZEoWulbXQFeyDc0SayjZJSXwB+UaRDiQpQ5qujebUwiAl2rfW12dv3VTP9fIoGFppfMcOED2Co0SG4CqbD2ml7debuwfftM/+csF0v7LeL1ydmrBQyW2QaFpzzgAICOAWfah/t6KdFH0IYqZOfq73aou7hnQ3Z8Xen/dXDfpDJyh/Z4eZKvoDTK2AGB5cWsiY9NSDaTnA6keOJOupwd8DZywFRigLAQNqEgPfR+yYykISVpXD5PKRu/vBmgYTedq5ObATtyt6ufKc3XQx0AAFwAuJrinn5tCqaVeha4u4G5pnEIrtvqSlm40g7Fc6oEF3IW2BNmIegQda0C61Yfv26krpsF1FE9mZjbIoSzTARoDaHc2KMFEUEqOgdRwehWi+lwj6SVkhqyre71Gih8Xes9HITh6W3rA14Eo0l3oIehVNdOUzHTfhNt6WD410LV6D+gnLjJ1BFqg7fipjq7fw5XO350QtM72tRQPvoGFkClZ5foc/r5ukiZ0nc7BnrShdNT3dQ4uPF0HAXRRn06gYWoX4BIAwg9Bp68V6fudrXXPVQu1i0WhCzWFhowc2NPvZI9u9nj5l0CL7f8I19n62LyH0rjrhW76NoBL6zf1kH5loWno2mYiYx0Q8gRxZvs0KXL3daN1a/q7d4EJdp8EcVdqmBtN3fTnDY1iLkL13MNYN64PjOArAH2vgJBvHWhnVrTSDTRdqeHbrk5krDlRQ5ADn076qpYFTx09PGtn6hD6C43kPCCyOwfSuhIoAhqebnPHg44VQMO2XX1vq0IN+JScgDowfVN2sQNCq51Pydg80a7MbcjYtoHYcd7QufeBHyox3atRm5PstSaUfgCY2ikmMtaogkMDfDrWgO4qKENtTOfregOA/YnaHe8pdbgWBzoWL/T7chALBZo2awNosLbSEnuxAwzqUFpdg4DwAajWg79l/pYOvh3KS22gHNnEuv8LKD2GVc1iZMCn1QOqEw/A+nXowly7wIMGGWkf2oCrsNdroAXa6EDJ7gJ46aCcltzVOZ0B1GIMDSO1MWg2Aph3BXxYG6hv+o9BXxLAM9F16SA94dWyvvkPlTXmu7rIStBYSgqoMwOOqO3/FRlbp7AjRzrmdzXiqwOHhYGjstPWTZBBye7uWKO77kKNfwJEYg7ovRUtIAhz9F6cnkYdYxDnrVPLPHQQufc0UzerT2Rs04ISKojptjqHMtaeAqX+tv7umb428+eKabJQnZcYsjs58M304ZDYEMDzVOfPG6gTUU3VyLVBYyk/BNqFtjp/8xeBQTpS5ypu6D4a1p6WsX7ZkbElOKI3a3poH59QKdOseq733QG+n/UW8GmlExlrAD6jf6xZjAVkclttPfAcIGL0Q30+B3BOS+jMvAnZzjHQd0SZrsEqdHNcJJBVAgplHzBIHmh6VSFL3YaOPwMppFas3xcCj9Skoe+oB2WeBEg141ApW/wL1aj0Et1zWQDO0AQ6IYFraZOrQWlA0Fk0gIA4UAf4PAS6l211Ja4BR9ESFAY2D2CN1zVzFUL2OYYHdiFj7oWPriEv+0vQxfZ/hCvwx49IlDwN2K+wrgfKMAdW5Qm8WEfTin6gC8ptaHr0CjBaZz11XjYOqNafqHHYVNUZqgLvzmSjzkse62buznQOpiAxMZnrc3QB19XdBecKZDEu1iC2+pQeYi6w4TZAdHMJGZp7uWIkkkKfYzeF7pRU7yUGoPC8AC4YIJ6rA1FcSeRx0EUFEASrQQZuBXxYyzVkU14Fvax9CAzeqhmf7lAN/XgCnZ9Vnec1ZAPrgNfaROpcVSErZ2YWlPrMp4E6Bw0QBo6gGSG/r5nhwa5+bg+65yoVKF2Aw3sB+KDcbssYiauM7VDG9g3wdwPIdgD/TesZErXW974517G3dnSdD6A9/uwY1huA5gvAYdY9dSZvuPob2VM691mmz3tnrut8DvfiAlN65B7K2PIa2FnIYO6Fau+KmTo+47ludsqS7gX6zrsjKAWXGvwlpa6NWhuwsgtw2guAUEC8X2aQQX4MCpKAE0XXpYP0hFfbuWbBQ2n+OrSkjjPdpJVQo88BMHA5oNWzSlXN2y50s2RXABQ40ch8A+TaEXBd2IUeUKAjaU2o8V+4SvyV93VjdEq9vwExPEN7cQhU1fVSI6W70Fp7z4VuN6BOeLFQB+5gpN+33wXdqi4At4/1bwPA8/gVKAECuV1BXYDgYLpNNerNsd7LpKqYqww01rr7ei8x0BBAE6BNhsAj1dC98NJadevCnq6DK+Bkn091TjeQudqqqnHdaeocmJmtIbE0gexOSeWHhR5Q/Qgyw6CS7nhaunCB0TpMobkBAjivDsBo08BnF9jha6Y2gQhWQ8g6luA0xQTwbun3vTDVNfhionxaUQ9KRFCWaaT6vL1Ind0K6NZtwTsfA9P/BLJtVZBCmUPpvBcBP1yiQXEMzlXhqB3LwHn2rkBZtaPPO/u0PtuDALCAW/p967vqdFoMvEohZA0P1TG7WKpto+JZe/2oQxR/Yf7RpYP0pJcbmj0M00hr+nr2gIF2ZeAMzaDlE4ji2i3F1SSQxbgYqrFpb+lmTl5QB+QceJDyfej0AtzEHPBvy1dB2LNQZ4g4e1wQYBwOddPnE/1dL1DjujWFLUTZol1goAUW7npVfyMHMrrOWo1cFEELOBjDxULXRgy6VQ3gw0kq+n4bwPRrwM1VhcyVH+vY6A6QDR5OdAy4vvYaAPoGYGn9UN8HCRm34dGsoe/39FwdjeNM98yDC9CXMbMVsNLvVnVvzjZKiNiCjsF5C8oeps5ZC7rO6vc1q3w91FPAaYNjvFQakqxUZ2jjkdyFDNkQHJ824Go8cK4WUMaHSpfNdvWgnYEDMjrTtdX2dQ1eBSG3O0DsOr+lczB5Xm8w29KAy6mr/ax2NLC90dT1NvKUgiMGNvGp+qHmJfrO6zX4jQi4tKCb2e0BpAAwjdUMzrOWft8CAOi3oLrQBnxguAt2ca1/29961HZsQKOTrksH6QkvZ3FmzkMlkV0gCHRhwWcg5LmuaeRQuUIRgd5HBXgd1qCjM4PUYjQA/agLPWW8WH84BoN2+scAkmvD4VuD5QcO19EZ6JpBVN4BwrtKrptqDdJOnqubahJrlLo8gKgcsCgGzmmawzxf0/SdS7w72xq1pVM1aLfBONTrWqY9TvV320C5G3p64Peuq5Grg9hvA/iXCl/f0bWmrvEt6BzbTEDi24cxEIyNQFdvd6AH9DUAkcbnEPWa2csQhY4TDYb6UFrZgPiwe65rxgGW/5ULnXKFOsEvAQvyHrBhX+Q6N3XgN3NAqNVAoqcKAUMLgpxTyDCemIJzwxo4dXB0Faaf2+uoY3G61HX+W69oA0AE4ObdfSjBwmE+TnReGj3gLcvUGL0MnV5NcED8pjpwPVeDiGWpjtQpNNIcAkh7DozlTcA5nXZ0H67uQ9bwis7BtYXaxaKj6/583ZGxDLjDypbey8h51G7HOWDT4Lp0kJ7wuhZuWfRQOjDd19XTBIfGB4LAIWQEKoUa7CrwgkAW1TbQpdM4Bt0lAEZPoR04hJLTOFFjfRf0sZqQ2Wil+hz5EBS+QXk6DtSZPD3Tg8xf6fNmbQA37gGPD7TRb0FNcQJZvgZ0BhZ7iv/qtQETAnO6ACPcrOgceMBEPIWOnyq0x1cvQMKipvcyOgE+IvDar+wDN08XANmAm7ARGP8Sng2SO7UNzB+kOkJw6p6GdMXZFrPr56nO/yDVbrKpo4eqB5mhdaJOoVfVsoI/VQfkHOgnIuCcmaTEtKyZtLiujnHqAS8YlGUb9/Q3Xh3ovIaJrsF++DEZ29R1zaTgNC2g/HWaAwt3pllH39VMzgLA3H+UaZn3+kj3+tN9wOSdQPa5VGeeKEzKHZVCAYUTa0Fy3IN6ErX+j6HTEPpA7AKCkh1H5yq9oXNfA56wUQP48Mhp90G3EvgUdiDDevzYT+TlpYP0RbnK9q6VlddWUQi6Rsuqvoyipym+dtiRMW+omyqA9v01HGQBsHAvtvSwPLrQ8kO9r0tj8apq9YQQLbZhgxcVaBFeQkZlASldIFgMu/q3zpaWCpJcDVVnX583XGpGxcuAT6cGvE/QgpunKl0yWXVkbJjq+7hqemAFBBQj6YKOOmH7gTpXKbSKX0CXmL/W333dPpR42hqBn4z1N0oofzWprAINBqOafi66AAZpKFGW0AEzBsHKZaJG3QHCSzOz1HRuOv5XyFgX+IhehQPgItGOpvka6tUEBIeManWl6+gOsMgDi5dlEz0Zt9WHsMZNENi9gMAMSFzLjWZ3Smh7L3Y0A5pA+XCvUMdnUXy5jFkDsnwDbao4f1XveQCYUOf16syPoLkhvgUZae9tMpaDs1bAHvGgWcKDbGXNAY4i4Llbe7dlzEn0b7d8depWY8jUQxkZm+s3msndqUA3KPj2s7E6z62qOuM79ui+XpMyAVyXDtITXjd6G6tWX4skX1hClHUEqsZNIPmrAk+OB2RvwEbcneqrHFeBlRr0dlrn6qzNmvobOSjAOxHod0GEMS11QY5Boy7sa9ah5eumH2b6fd0BKIZDC/4KjKsPrdxTKG+UiTrAuamx2UC3YA/Kh9ECOF4CKKsegHYfCC4O5roOZgt1IsquzssAKBGWhTr3dxY6f2mhIPxapFHvKWRO74GDWevpPW9B6cwDPFQDFNeLqzp/nZGu8fM2cJaBAKiZWW2ha2GxBqAriB6/CVjQzwBAPT4HgG2qhwcoUdgtoHfIl7o+hkB78eaWOtWfhuaGxlgP/atgO4j0vd7Ve1nDs1VO9P52QYds1tT1cR/mZTVRZ6gCivebGyCZAgS/fqCHdHAGXaJQ1hoDLr/Sh6y3Aw+y1PMirUIpGTQg72TqnEaBenUkxB2X+mwulAXrgF+qqrkzDyodOUi/hDNda9dNbf7adA01H+v49ctLosgvylWtbFmt+pqB/+prelgmUDqbb9Ri+KEalqxUg+2Fmikp4UApZmrYt8CHPwY27Fu3gOQLuo2CLfVy5pCadh2N7ra3FFvUgRbmTazL9OqJHqozECntVwGg3AJCyS39jZ3hRMamEI1FNd1snVy/zwdcQjPVOZgHoGQPqekMyq/z+UTGiACy5QPLeqFllQykX3LQ6RqmaiB3U82IdLvP6L2kWh5akeO4BfW0fR1b+bp26w/gcKKsBoigTs7V4JqZtT1dCynwSwXABG+5GvsSDtrrsTraGaTDbsfQnQp4SB+c6hBEqF9c699GwH/j3VInbBhpRiBeA1RgrfP65QPAxl3X76sAQ/YA2ruvB2p7b4NY7cs7EEyarsEDADy7ps7Gxbbu4ZXpWt0Fp9jd6DwTl94Kyoz7S11D64napz5gW4OVOlxzcIATT99RfQXrrwGs6JHOaQjzTOXDErL3fl0z9RWAKKSPNaqkAHuh69JBesJrFi4tfah3tmG6ULZDNXLkSZ8AWDV29ABtlJpVWr+kG8MH5ekigR+GNL4HHQPtmy/I2AIOxmpM7MYajbkBcC1BC9zagHEbjEMVWH178UsytoRIJAA+og2UNysHakhLkD2ogPxIOoaMRaTvDc4wmwGz+cABjA9ok0XOm2Vsp6IGtwuaYZ1rCppNIEOzAfD/OZBHulNdGz2Q5+lBsFBMNNweR7quGuCPpOAQ1rf1njeuroOtGqD6zSxtQvCyBF0ukJOYg1xQBRzZUwdoqSe6thwoK9aGWpo6cnXPeRt1VKog25PPQO0dUlc1APY+1day1l/9W++VMR+4jC7u/qaMFWPAYYJw8TZg3raBOfwrTctatyEzPE41iHhxrHY776udqENjTulDYAucVt0GyNoUukdG9gcyFkWgqwnPm4Bj0TdVBLgA6gQH8HwpZLNJxXe2UIPXB6zcGrJZOdBgWKi2LYonj/5dzNxmj1+XYrV/weuzYrU/8M//rkUPtS7vRaCj1dCDLAUUfQxdBAVkAp1cD6hFWw8jfwbgN1fHtg8UP5Lf0lPmOFbHLCz0eSeuGvUxkPf1piBICnjdfKXPNgP8Bwm1dqG9+KIHhwmIZEJjGzqdCTjFRROwCj3QJjuFDMFGD+kuHJ77r1fDd/0c1OjrAMg+1jkN9kArijqmgo6MRT09eKvAtbKELNrxWDMie5DGDzpqDCulHkTH4FRcg8aBBpACTUo9FGGqzMxsQGJu0MiwXKuJdYGWY88HxXbolLs31DXYAFX4EZCvr8/+RMbu+pD1CnWua6nOoQ8lnQy6yZKqrn2IcWwbHPxWW3/j2lWlTrgCQq0DwGZaFRzexpfpmCkg+7+darZovoKSYhVA0I6WnIO67psbEFDPY+hiNRB0BYLKKohpAy+mTaDDlCBwFVf3SD1VhysE6pQM1hooWNkih0C5osFaN78tY6sjALkHj2bvlvOl/a03/p1Lsdr/2dcyu7D0ITzMnRhYUGcaxQBdkkXwNkYVNUoLiO6iI42us5Yu+NMCVLWPNLI5d3TVHkDEbclE7+9Vvb89eLgppLXbQOxYg5b0vQs9BFctNeoXdDDOgPivqmVLv6W4GqAtMg/wAdMzNZDtY+jaSzRDGFc12j7vAb8RyLyUTTX+X+Pq+rt4nc59moLjCK3YteptGTNo417EEOWDSOSzFcCsRfq5CXD43ITuzb8C9N/3Af+VxIpF2a1D2W0H8v1mFoKOXhXKwQ4QmCaZPt+DY2i0iPXdNWuQeT0HyYW6rg8SVm1AOXhRKu6PSPlmI+jM0rPcGjlACqBt+6iiczWEAMRA088d6Jze3ugCcSFoatc/LWMeEF42dtQ5uAY8XuFK1+Ckpfe3C2z4J3OgRIn0XbrgsC58wN+cKbwhg5LzdhOCkl3Q2oOzwRmrgx501fHIody8cPWeb2T6u6fQ9NHxQC8P8EW1x5oYnM0lBumLcgXVroUPZZBc0D+aVPQQ/DS45jeW0A0FacVq2pGxYUXr2/OZGlIHorvbgLuorHRpjDb6uw7YrgyU56dH+n0VINrzAp2De7AxKgDm7u5BeQP0tqJpR8bcOaAlaxBZAIi3chtIDYEV2duCyKamhiCCer57Dp1BTS2JOcBiflSAgjvgNRzQU8rHekgknt5zuII5gPZ9p6IReKsF2ay5trwPQezzIgOdw5k6OfsVPYiOPHVIzo40Em7fZybtYFvnYVTovCalOq0eGPGE9AUBL1Nfgv7ZDV37z8R6mF8Ag3pjqWWji3v6fcuazkPP1KGp5ergN3oa5FR3NSgJ4o6MmWnWqwMcYMclNA+sVXXgGZj7ABy4s6reS3lb/9bfAv4qKLunUI68G+hYmev3FSe6zmMoaxctnftbieJ0roCmpIEgdgmZoRV01ywjXZPhK+rEtvdAy9LXtTGFDtNGTwOaYqFn3H5X39H5Y+s0g0wlXZcO0hNeZ2HDgui1l+6DkxOCt1rbAXmFIy1hDddAtgdMXc0m8JYc6WGeN/VQCGM9QIeuHtJDwOl0oCxQBep9A3Vw6wGhF3STJTVgDp9BlwMQcnqTjv7GY/VoM7N2ATgCiO6iFbTMNzQizaoAKM70XpKhznNYV+Nw85q+t8MFCGJCl40BWNKFw8TugzN+qnUawu4URD0BJeMqcOkAxZOtlzqn84beX8vX8lU8VnD4nzQ7+iMrnb89UAyfeIBzMLPijmZU7ji6ZhYNdey8Dchx5PrudqATNQKyR7ure2S609G/haxDMFb6julMD6N5qY7slX0tG/UXQN4JmojzTLFF61z/tjnSQ3W60jW4NdPn6A00C/xqXztv9xZqx67AWrVAbeUM+MMqgQZX1VS/7/wC2LUTfb8L4DKq7gJlw1id2Gsg2HteAYxkrIG8Cw6w42h2rDPSteuBe+FAtcIxEN2GZp05CBlHua6r+KQjY8VjYukF4J7ounSQnvB6ZmfXKg/llO+/AppoHaCiPwMtIeAKGgDnTLLQTfoqREUHb9BDZj3SVG1ObdYZ8e7oUOroAt3p6QcnwBIOjPW2dQhp8rHWnldn6kyO55omX6XQebcB6ZeZGv9UA1xLAEFdMc12+ACq3gfsWL6vB+oYMC8PAKezruvheQidWWtfD7EuAC1BBs/yQz101uAMLW/p/SVwoMbACZQeASncFhi+I9A5BPbv+x/XvZU31NFL2zp23lanabVWZ8bMLJvpIdjr6D22gRQy6Wj2b3Sq33drpo0Rmy19UU8nz8rY4lW1E08d6Fr9clgLO9A9m8L+d9r6HFcP1d51G/pO7k6A6yoD7qZQbeoElAO2gPW92Gj2LnL0vYd7HRk7j9Q5iEBCyJ3IkKXQUPDgXNdBPwMAdQMcFReaG450vayhFP/8EgwtQDxSD2SoQg30XGgSahe6ONpgJ1qFnkkj0N/zQl3jzQ3IMgEFxLihtmh782iAnoLYOV2XDtITXtdmG6tlrx2aPgBip0s9pG+CTs3MV+dlfqQLfrXRQ7W/AzIb53rwRJA5WJYTGatBFsMt9HMVD5yrXI3S+r46FsdTNRgbKhUCQ+4yAAbqvoLNm0AA2QGepiDV543H6qw1gZyx01SD4eX6HDEAgC/0PDAPREFnUGo9h+hpCIDD+kIPmCkIdtZ9/dsI5LLngEsqKqALVwcJgWM1fPenIE1zDBgk0J5avqTs6fEMgpQRMDlf10xCB1ilG4FiOMzMlld1Hh4AZ1e60nmIIG3WBZt9e6D7a2+kIN6LTDM+8QqyekMNuNpXdV12ofy1B87QYQOIXeFUOano/moaZNaHIK2yBjt2T21MAdis56/o3L9lCoSSaz1UoVfCEliDrSqwSA8hg9wBPE+ga5VwXfMZkGX2gSpipnspmmvmKqur/dyU6jW1YgD5Adh8Cd2fIVCOvFroOZDTvTxQXFcCGprJjga7ByvIKj92PGbZF+b6XDpIT3jtdHpWfyiDFDd0Su++DGleYE67AdmONIKykaOZg3iikeuioQ6IBzIlAaTxgwd6L0WuB+i0A62/QNjWAdzUYFsN7uvepMyy/RtqrMeJHkR/ePu3ZWx5oXPQXAAge6CGYNWGNtpcN98yh7LliW76GeA/QkhhT8FgRMdqmEddfbbyVc3azBsdGRuDEdmDVPwFpOznwJTsefobu6Awn7n63k5TNXx1IBZsAQFcOdX1l9YUS+G19NlK6GJ7BbKL1oVSi5k1IEsfXOgzJxX9zqQKHX4LPWSAjcHuhmpPqtAuXoPGiAwYvMMCHPwESBKHOq+fONY28HBbn62+UbuzzPQlN5pAWnlN7VMGGJ9squ9zB8pa+UrlYAIosXngDEWO7q8xHKOxqcO6DV1dQ2iMmAMv1V4BfF8jtdGgsmEeiPNWTqEMGgB9TEcd0UYA+xrEzU/qukEiR+8lgtblZQsgAKHaRXekG+SlrTfo3z5WGl0F8PxwXTpIT3itzs0ezhB6NYjQroPWGWSG7gJTdWukhn2ag3gmpDjdWDdz7mrU4c31njc18OpBMydc6/fNJmqUFhBl1XM1wr9/63dk7Mv6+rvXD9Vg/JXbmi36b/Ab1tFU/Aqik1mp780pNMOQhVDm6aiRqwLu5D5klVrgiGbAH9KLNXp//TM696ORGvo1aGhdgL6dQfnVA+2+NYBmbyUaHbumvxE0dQ68HMDrQDa4gWxWx/R9bAb6Pobnuj8yYDGPgefGzGxzRR2GAvZwkWt2ohLqfvAaej8baLcHdR+bmAYbrbdAQ8arQAp7pCWn/pGu812QKWqDrmHyx7ouw1K/L9mBQ3Cp64OaPnLAnrWGen8nmc79DkhgOED6aeBUuyHYVKBdSKAp4BQEk7MZ8FwB4PkEStOtltr8MCEVA/3bs41mcjshCGeD83IKxK4T2NcxiG6XQBXjX4EOR5g/Ayd71tb9v+gqtu36Y52BpQNrD65LB+kJr6h2bpXaQ1psgLvYB+dlBaW4JdCHLjw9FBoAHowd6CyANlpnrRvS39F7iaBd2QCMWAJfQWNxW8YWAG7e7KpxHRyp9Y//o2KL7Ou/Wob2QCByMFXDd3KixnoOdfq5D7xPkDaYngFgfKzp/voQ9N521AA1oPRQz/X7xqYlouMLzbJsoFtrDFIjGbAO73t6fyl0aloFWs+BaC8C8rhd6JQ7diYytrqia9wrvkrGqoEa8HwDwK5rOgeLqhrmTcRM2q8zvZ91C/igoPTjQpfYFMgAnZnOzc0DjerzIxV5Pf4TxSXt7el+rW30PU1AJ9GF8nJK9bQWNIz4egiGI5C2gcwBaDJbUajj+GIK6RN47WEA+3UFrP5nwC0V6PoYJvq5LqjGtlK15adAEQK9MOZBd99yBiBo4LnzINAD7WY7B666NpTdkrXumw50W853QHYHmiB2lmq3PQ+ClxzKeMAn2IC5CqqPLqIARL3punSQnvBKlq75DzlFNV+95ppBCyRENsfQ/DUExfuFaQYp34BOVapGrgklwGyiNeW8o5s574BMAYDdhmtgpS6A9flEF/dyoFG5gR7Yf/jd/5eMJRBBZqC3lQHmKqtrBL4NQWUVBDu7oRr6ylt0rDNUB2QMkeFmpPc32dJ5efM26C5VlTV7Dgf5XXByTk9BQuRID47tnv7uSaxGs9/SOdgyXVfTuR4wO3V1/voA8GwM1HkhEdl5HdrlG9DYAHI6Hnf52ylkNpsr3a/jNRB1NtRp9aD76ypoJx4/UKfJgxJ20NBg6FPg6zU8XQvXt/SdNKE8QiK0Z6neczWBEhvod6VgswxoPhLo5A2hNTxMda3GA5DygQaZCThru6k6Zv2N3t/JXdjrdQgwIWO5JGoHILKdhMBp56mNdgw0AwGQfQ349aD50E6nICW10Q8+kwBXXUWd8WSh+wiw+hbWdfGGc/X0/KaWfZePZWLXC2AphevSQXrCq9auPoJBSoDr4gSYViuQNvYLXbT5iRqMDmzwGKQGdoE40YOMQBswL3mhTtOImJEhVTt9nT7v4p6ueOdE52rT1tPozpVDGXvqWB2uYlcdgdMudMCAFErFVVzHeksP2sVY38dWCBscWlInLR0LIRJOoGT31L4avq+AjqsoUq/Oz6FD7yW9lztzfbbeAMp9YPheNwA8BMzzAhiBg7b+LTGHRzOItuFwHz3Qw7h6AEzpYxChhYDEbUz0c2bmpvD3gOPKWrqOLqDdeQ9I/l6ALHA7UnxLuwuR/lzL0JVEM7mZq8/xSg4lIoj+fcDk2EYP7gxwZtvA2dVNFIA+DPTdNYGEtLZ7KGMLTx38AKSGYk9tarkCMk/oJksbOqdxAAz+4PjUIGvoLfWAzxKdq2YX0kAP9Nk2QLexBOzT7RicBiAiDzK1EwEg2u9CAOz21LkvVtDFBuSqxR5gC2tqt4s/AAqDxyRi1mugEofr0kF6wsu1hrkPpTC9qi7GHnAZJS3oEkm0iy15gy74t+0CNxIcFGlLjc3TIHtyezqRsaMT/d0DyMvOQY66cqKGvg6A2HOYq4sj3RgXwDD86YoapUGuY29wNRPRBuZm0i5MF4AnAUD7WzoKChwXgNOJJjJ2YPqOetd0W774ih4wf7TW590/UAfzqboa/92OHmzPALh2Ogd5CUjjW13XcxVa8BMfhH2hy9N2df7mHV1/bVcPk2hP12Sw1GwW0SSMIYPhgdK7mdncBUFSYBkevqpr+qZ229t0rc6tC5mXeAWO+1KdoS2Y1jrwAy4h8BlCE4mBc+tW4UeaoON2BxjBQZ4m9l6WsbOeYpoGwMjcDD4uY85a16rb0uz9Ako66RLYuqGkmBQaqPj39Xf3+zqnJydaDaidgcwGAO6PR0AoudGAsGoanC483devTmTIsmNdMHXAJc631fEJlh0ZS9f63toXhNcCoDpo5yRAJRAXkB1/zJanEMDSdekgPeE17nmPdK55jnasRF1IG9/RBZC1ddH2gPX55Zf15TZA4NDp60b7b2CAgoUequEa6sI1fbbeQI1D1QcunkQzUgd1PUxCwMtkG90Yo3Odg/hCN/MEMCH7sMGdDsgP3HyrjBmwNMdwfj61q4fdItf2YnetDus80Jbeg3foQek/gDZpoIAYA2tsY19LivURkODdV6O+Cr9Cf9fRA2EMPDf5DoC0AWjtgkhz4apj8CLw9XQhOlwFunZzwNlYVX/j/C5E6ma2AbqIJrQ7v6EGuKbnwSWvQUYVunRCIDC1rr6n+wHoMy4hw1XVeSBx1AtShQf5jA7UZaZNfY57I7U7bqj7q/a8HqrzBhAOtnQdlUv92xI6s5wCuIIAPhBsoJwObNNZV526MRBeuhXd6wvATV6AHIzrTeD7ZMgWCXjjQLpqXXUs6hW1CS6U7LaAeqIGGeQpdOjdvQZZ5ak+78ePIfAu1E5Ebcjsuo+tU/fSQfqiXOXSrHwIwX98BgfAth4Uy7oeyM6LaviCLTVeWU0N33oFzK3nGl3HECEHEE1EUA6aA/nZbKKRwxIMZGcBqvXQAWMHQExmQFo3gMjrf1EnbMvR59ib3dafPfw/y9iNm2qo7jzQZ/uTF3TsLbC1HkD2bnSkmJykoRmpqAkA9LpGhu26zsEMwNeffKDR8bKuzzsBR8/vqaE6murYBFppr81AY8l0Xhxozw7d23ovHcAbAW7nzgIY0EHZ3t3o5wh7Z2bWDiBbBH3WIyjbDSPIwuW6znevgNo7CH6uMnWuwo7e3wzUbwdNnS8P9M/aByDAPNV9HUGWpdfQ70sKddJJ6/BiF8qHp7r/rwFuze+oYzwHvcegDSKvwM+1gnKaVXQNOnO1CQ7g4Dag01mBTsi9QG10O9B1dX4GDUHAURa6ei/BRN/vrFBHz/EBFgCaUzNgAl5c1XvpAWa1DyXKAvyeuw8g2B3qXDmPvd8MiEbpunSQnvAaZ45tstcclhDwCtESokDIYvhXNBWfgp6VzYBRGFKX564ebiMg6trZ6OFWgKffByboCpTJ7o3hkKh0ZOxvfNnXy5iN1GH4nQvt0Bn0NAoMT6E8d/8lGQverB1wrW19H//941rfPoeDY7urTuLv3AFw7Z2O3t8W0BAAB1X8J0qC6YOGXrj7xzI2B2DpJNO/vbu5JWMBYB8y4PrZd3StDXb0fdQhM5EvdP4qID+wcUHENwBOsBqQVsIcVKDFaQ0En/maHaTAAV4mAKtWoAzVAxwXYJtt0AKPYaZOcARmIol0XlsNOMx9XfuTUJ2DnVz35hAwSOlA1/QJkFYOIDuWAzgc6JIsGOq8fHqq+3Wn35GxKkjHEPnuwoF1uYBW+CoQp8JYCjqYJa3pmtq2JNRs2wyc8fYNHZs5wLg91ncebesCbKfEH6bPNgbmmVGg7/IAgt0AiDGXFdgMvgZ1XlvfByhJ2fgxEEWOoAr4yS/oU5fX572qF2urPWRAz3b1AK1eQLToaibHmXdkLAUjt8h18eQb/Y31aqK/W6qhOttSY9NNdGN8CsDmt6eQlvV1g9dvaRr6X93/iIytjxSHtdLA0DptLcUtofMBNCjtl1/4RRnr/Sc4aIH8zKkACU0C8hQ9PeD3ACDfBQ6VCIjYXMgCpYcTGduKtYut1pEhqwE2o97WrE2xrwfglCj/oVq131Sn7hT+tt8A2ZO+HlgxgJi3l+BIlXogLOF91EGd3mlq1FtCidfMrHIL5C4gys3y23qPYJ83QLCaznSdB6B0Hi/1C0vozIxDddayid5LFACTOTijy2Noc0pApijTZxtCd28EjRsGYOl8T/8W5NnMLnSu9qCcvrynwcGsovv1HBzWa6/qWl0BVYcPBKbmKuThPATiWchcJTDPt8FurydqGH2Y+20K2lcdGSsOdA7ITtgMSssLkPy5qu98Pdd1WgMM7L6vUkgpdB83h486z2mZmSo26nXpID3hNQ9cyx6q9e+M1UFyVrCZfeDn2egrG4MxDEHM0AW3eZ5BdxV0LzSOdBn8NyA1a670d9tv0N/Yvaen5ehAD6j1Hylfyt2busHbKQC8hxqBlyXIHnhAzlYCp9BMPakKAG4r0LnUandkbHeqf9s+hG7Bha6X1IdtCazeEbTlz3Zuy5jjwryQFhPV5YF4rtfTuZoCDut8DWSPoCO2gt9IIuAJmoLEAbSyNyH63PH1nQfAMbYFWluzYUfGzMxmsT7LGA6ykacHowt8ZNvRl8vYMXR/OQmUZXxdWzFI4ITwjiPgkomLjowtQUw7NzjISg0izlOd19pKHV6/AhlL6HbLU3DSl/reV8A2fwz2M4WuvQxE1py1BnqntdfJmAF42ChZHEF3JTDudz8FWa+qpm1mHX1H5UL3YZHqOn2pARp6QAobAmN5egqR6ACycg907jc14G4DsswVlBR74MIksJ6HxaP3nBeXJbYvyvXy+Miih9paSyAD6wNPTgbdBj7gLnzQ4Grva4RcbulY61SzMZ2OOirFTBfLNnSnPUxn8Nmrmilgd95QAzkEjpK9r1RgXwgZlelCy2Sztc7VBlqYowQcONhoBeADFsDm7DY1qixBo64OYPP4vkZ81aehdRrwopML/Y1VRefPL9Ww0OEZgOjuuqVGrgq4jvrLGi3Wd9TIeSAGO4eSnQMYuH0of72hqd83zlTI+AS4qpZAq7GpqhMWLADkvoH6lZntAYNhdtTRMWC0jgoNkO4DLqK/DazgwCg+DUBYFbIE6VL/9ghoFgADbclKT/gw1nfyEjhNAWSBDDLSQ18bGdYZCAAD2Bx8OttUYK939XcTD/ivliDBtNY9PIbUVQFakcmevqOtjTr9TWDrnl8DZ3wOjkoOzPzAfRel+n1dKD060PyzACR4BGLEFSi711v6fsuqrsk6ZJ8gWWxRoc/hL/X9Fo9lU+NNYp/Qr9Pv+gI+c3n9D6537LpWewg0XYeOlWyohvQMgvXqOYAHgRBtCEJVdeC6sBSyNif6t0vIsrg94FABXJIDRv3+UA3G8PRFGZuuAKs007naQMtnCfpRjq8lhXWoRm4CAMCdlW76I+ia2od22w5wUG2gGyoyraHHx2/S+zPQXYoBVwOt2BGQQm6NoQ25A+n+lwA3ASWFRQX4ks5Bbbyt66pxpvd8flX3zAV0xcWlHiZFALpwV+Cdg9PpQ9brGLTi+sBBZWY2P9XfufG0ljNqdZC2ATB4paUHyjkQBC5IJb3U97lMFY8WQF9Ef9ORMZdayOGdJDVdb72Rrt+sqs7BCKppVWC090EC46IOGosj3ZsplADXrh76NWj79vaABAjkltK+Pls+08CiCoSSZVXv5R7Bb6CC1Wl2dAxKYpMqBCXAkL1oQ+CTaFbJA8LLuK1rdw9wYtOW7nW3AD1FCHbXAOZugqZhd0vthPdYRt9zLokivyhXIyusnr32gmeg3F2t6ebbyUCk8AC8daDPD0/BYVgD4HRP/zaBbh73VMdqpjiiB5CezxZ6QN1vqYF0HughswJCuQJwJn5fD/jlGHA6UJqq73VkrAnlpat7es9f09IDsApp46NjYNd9Wp8tBdC8c09J/86Af6XVAiK7tjolOXUawaGYmWYX2/u6JpNwImMpCB57uzovt0tw2kGMtAvdW5UIRIGhWyjeKIZjArxKz7o6L0BzZc9AK/sahJHNzKJcHZ9wqeXMvADh55sA2vqUOgfdSL24Ksj7tAudh/lGfxfotCwBpvBZCaUzoP5olPCeajpfwJFotarO9dKDAC5X8HWzpmtmloP+YQ26hamLzdXMxgRa6moLkIiCFvydCmg7DoFywAMmaOhYcwvNlM6q+n05yMFkgDvNt8CJzTsytg7VFm1a+s4HsdroeEvXs1Nq3mZYPC1jjbraiQiwnusQJE5KPUfrg0fXS4KANb0uHaQnvFZ+YOa/ZnzDY33ZGaTyh1Cnr8XQmQU6B0NQYiel4xCcnGSpjpQL4MsLAIIXb1QjMrinBuiluwp4vNXqyFhOIL5c72Uz1k4P378tY23QTrvS1bnf39bnKECN/kVI6c6hU65nWtO5+3F1hg6u6KEz9vVArdS0bOFApB509G+7gL/Zr+vznoD6u2VqWHwC/1aAmgC6n/ugXn4BIr4rD0RtIcCrHmupFaAU9kYoz0VQ9r0HuLPmkTokNXDgzMy22/os+UT3w6vgoNb/u5ZHC5j/EWXIgFjvfgJt1iVQFsQQXOmf2uG+rrcHp6DiDqKikyaUYLb0OZZTKMtANjZrqnOwALHlstDgalEAg/dcX8iDOnBaQfPKErr7ahN9lzFgizr6MYMqqM0NMvql4o0CkAZJuro3W8eaWc8i/ZxVIFsMjRtdR9d4DbBZcwMMLDBuN0ByxofOz1Gua6jVgDQkCNFuHmO4Tz2IFOC6dJCe8OqmhdUf6s9d70/kM7c36qiMABS4C7X2ADJDAZCp1Vt6WG5WmnYPenp/EUQ7dejgikagrbMBssJD3VS9TwLHRlc3QT+DGn8KqXNTpwk0Cu3eQn/3YqGGvglRkTXVOejH+rclZA0iYBM++6Qa/962jrVBSyyE7FOxvC1jKZB5noBKfA9kRcoe8LRA54gHh+zUVQOehVRu1mxAdaLv9wLAyeONvssyg9pDHQwzHMZDoK3w26AzBSByM7MXIGMRdw9lbDOEUqOvGbx5AMCfsa5L4Ae0rA6dYxAgAYzQ1jUdHJ7rmg72dU3PActiU8g0L6FDCryDZUfXR3IBzhA4rTnIsmzXQUS5q3suB2qSyX0g1axp1hY6+i1wQA7qNmjPXdP3W4VMqV8DewzqBACbtI3SqlnV0xIgUXMZ8meqLVpuoCMRAhWvo/ecgSO19jRz1YZsm0HH36GrkzB7TAg+AWeLrksH6QmvO+HAquFrmz+ekQwIKBhXYJMWIOsAzksHlIgBt2kD4GSJd/T7VtBKXAJRJAhy2/YeMAe7+rxnN9SRak0hTQ5YKg8wXE5TDVCtpos+gRr68ArgKzoyZE2I0I40OWZ96IaCLL7lEKWugWE4BOmHBERtnRAizRw65U507s931HgVr0L7rg86U3BgtcBoxhWwrpl+bg5g0zjT91af66TmTX2OSawZszDXudoGbyHu6h58cAr8EWZ2OgUnvTqRsXpLncc1YFTqwBUWA8nfbAsOHmIjB/D70NHsZJAAgzeAjAEqYg3oqc7HIFa7Bfilvjob8bHSVCwLLWXubHTPzYCI1UyDtQT4iHxwfFrER+TqnAZPA0QB2u1rvs4BsLMYYJYtgozvFDJNbdg3KYDm/Zn+iLOltuhmT9dpeKxzkKQ690PY/mlHzxqAYVkNiIVdgIQ1gUD32NVM4voxUH96iUH64lz+pmqB+9riDUABOr/QF9YpNe1519VozCs7MtaGQ4vUB06gMrAfAFDw6kQ/mOjhsQJSIbcJkaurZcGbAHh8AFmCpyGCD96oB8LRSH93CoryO7DEzwELMLivBuMEugq9Hf3bYksNxsDV51hP1Yj4d3WDh1BOS9aQuVrowTFf6VyNr2qJovGS3t8ZoGZrVXCkQqX3//8AsWMDyAFLABhvoP+5CoR8VZiXxQiyVKk6AQeQOZmCg7n9LGQNmhCWm1kdwKpuDM7y8lDGzrvAMpxqhnYBrMVVhY9YPQE8HzijVejqynJ1vn0ohVbnuhaSTPeICweyLfS9L0BEuYR3vOXoPQfeoYzdhP06AjmYdlPnxevCGuxBELuG0g9gBg2ywAsIIiIoL1VBMqVS1XPFgezJCppDbAGyR8AzVBkAPcsIWNZBpmgFuKlgBRxewUTGyhBIwa6pY+ZMdZ7PTDOd1ygwaDxqE5LFJVHkF+fqTczqry1eb6KbNO7pok2gNt4FvhrSuHITjVyXhRqW7QiMEvCHOJkWxzfAHufvQ1QE9L/bU8DVAAP1YV/v7xjKI+MF4ANA/yioAXgYDORfh2hxeV+dl35TjdLqFDIWQPU7r+vh29nvyFjk6BxcQOYlD/TQJzmIURe4hyLQUxsAi3EX2ovv6vsYhTq2BRnRsgpGk1jlHYhmAUvlbXSurkGFbQrdc8lEf2MbDqdOqlmDbdI4MLMpMM29CEmMzZYa9sNMHbs1AGwPqJQE7MZ5TdeCu9LMV5ZrZL4w7foJxiBFkUFjBLTHz/dICkV/dw9KbE6h67wKJRMHiD/zOhB3Qnl0k0G5r633t4IymdfSvbnnK9VJtNH3UUnVCWtCJ2p3G+SWgL5jtZzI2DOOnjUryPIvb0IAXAU6C2Be78MaaoNsjwVAcnyiz7GGLpJrEOyWB3quLCEbuEg6MnZQPDqnG6odwnXpID3h5Y5r5j6kcr0KNKqPa+qAHLu6CYoMOosgMi9gU3kAoRk5ExkrgXStsgGcwwgyYQCqnsIh2IH0fAzP6/bVCN+MnpWxuxdQZgDD3GpA2jSAvCxgWZpv1Hu+f6zz3APnpQHObr/zZTLWqWhkOK0CL4gpGDk70cNutq8NAS0AuV4DFewHM0ipQNv74k1AETBRr4SMv+/q2NxRQ7peq+HzZzpXWQSdMleBmuCO3l/o67w4E72Xg4mu53PAxZmZ2Zt0j+wmShqYnujvrHZv6z0CD80oAAwYcCO5IKIKVEtWbetarZ1OZKwBOJgJdC8uEp2DjqfiqA0oadRqOteThmYnGqn+RrsFWl0g/AzTZ+uxZupckBqKEhBvBm6fKnSxNZZ6L1lT7cnGOZSxYwhoKqXey/W22tT0BnQ4QhAxgAaFuEIOsL63SfpJGUMn1tH36+3oHt5eQLMElEt92IYNoHtxU33p8+6j79cDOhm6Lh2kJ7xeXKwseqjjq4AUdjKdyFiYalsuEJTaDBiyw5WulGJXX7gLbdb9U+DxAPHb0AMG1TmAKk2N1w4cRjnw8wQgIOXX4AAFVuUa1N+HY32ODLIsBYAbPeiuGEJ77AQwJtlAD/jzlfI+DYCdfGusmY1w2ZGxUY+Arzr3pzA2aWmJrQraTjZTgxZCK3w00b8d1/Tg2EDmpQ7G0AX26jW06kLm3Gr3dL+VRPoZ6PeFcEDfr+i9tKFRwszsYK57bt7Ug+xOT+ehBDsB1WWrAzg3WEGQA/7uUR3wHkNQo6/rPU+3gNEeSFc7sJfaqVJXVEG/i3htXpeoPI3X0ncyNX22IgLBbggIBx1w1qBsmYCz24IYbFmDTKSrYHO/hNJeXddWFbKsRLsQgdTILnQQtm5pgFQ8pe/jLnCZRcA63oaMTx5NZKweqsO1geaEdlMdmt4JiBtDKW5S6h7uehrEni8ezeKuQcGArksH6QmviZ9YGLz24sI5AIVhE8xCYIy9D5xHUHtOuuDV39HFHRwC4BTKUAYtqZYBEywInIYTEI2sa2o1BU6hYjzR77sPpcddKD0CJ8v8CpzmE02TDyr6HH6mkcgBYAYCoK9Jz9VQjYDZbQoZwpNU53kTAiv1RC14BLxPO5mWppZjNZAjAIzXIGorgVKiCPR90BoqgOfq3p4e0MFC/7Z+pgfvtA5lN2CGziALFMP6W8z0ZS6GoKR+FTwXM5uCQv0qUWO/BRnVNpS/wz6wPoMMnAvrw+9CFgO6ugwEYmc76mx89Vj3TdWA4iIF/qC2vqd5fl3GVsDn5FYmMjYIdC91QSunWahj5q8g6wXkuxcNXdPVEsDhR7oPvQjsDmje+WMNLJZzBfBXgDQqhKarKnQQTgH0HYOT3fk0ZL2b+n1zyAxf6eocBIXajuON/m0fskoG72i1pWtyXarDdQBNOIu62qy95aN7eAXQBrouHaQnvLKbTXMf6oYJobV+Dt5qOdYFmj+jL21tIIQKm6C9B/psdWClvQoO11gXWThUA+kDtsNAaHC1UYPRhZbPDXTjTG4DxfwIFNLben/pfQX2NW7r/Y3eAOXDNehblXp/K1/LByVkqepLSKc3gUemoxFfBzIEfg6HMZAfxsBl4kGOvXcBbLjziYzNoLNy4etvDBp6IMRwcAxABiAyPTwtAqdiCmKuQG/RbuucVkAbLwVR4CX00K8IdGxmu9BGvwudj+fQidpI9R4LEBCuQNnTLfW9R3cA7F/Xde4e6uG2D11sBZBbLkDa5kpb57UHXbbncNDOwZx4gCmpNQCgDFIEh46u85Oe3t9gAxIYdzR48ZrAM5aqczq8AxnQJvBpAWGwDxTv/lJtYBpD5n9HcZjLiZYouyAAPN/tyFh9rs0XW6ZQgWCha60F9qkJHZglaBUuoGs3HgNfUve2jI2h9Ojcge7o1qNO2JqI2+C6dJCe8CrHsZUPebH3Gho9ZV0ApgJL68yBzoKqHrTtBage9tXjdk90U3kgnBsCr03Wg26eHLAiDY2Awio4egBALVfqbOwOgOQrBV6gBmiOnYPEyZfrRttAOa1zoAdHGKnB6I6AXgAkRKb7OvfRGoSCA838LXLYvNACbsCbFXUBv3CsTraBUzfrQx83lHgDKHlGFcg4LHVtLKYgGhso5moFHEoh4F0qFUix3waS0waAUaCj7tBRkzh+kdPxIeAzDBxFa0NmCKRjKqHeN3SLWxuwJxkESBGUursJyDoAl1Tb19RVPtdSbQhs3eB32h7Qlu96uq+dvjov2Uon4XWQiLgAPbCbrt7MMZAfxvO3yVg00kz4BeD1fQgsXHLW9GPWyHT/F8CQHy41K1Ke6p47AJuQQie0P9XfyHKgRKjoHMxHOgkZNLTMAYPo3QNOMOBQqrcA/xWTU6PftwRaHSd9bJ1CNYSuSwfpCa9hf2zBQxtzF2rj4CBbBqnG9jE4FjM1zrW6vrZwogfyEjoaWm09zNe5Zl4yU6NeL3TBx54evhOIDCukOtGEkkl5qGMdIKjM1GDUurrRBhFIfjivl7FkDrIRpYKglwM9zAvA0Fiih1gMNA5X62qUtmuari5CjXAXHWAYB995fA2ylaEeRLvHajSmkMKYAzt5DuXDa5ASTwEMG48hKu9AGRQUvjunQPewp6WMxgSEZWOYU6imZa7Os5nZy0BnsYCDpzXVQ2a7qfsmXKlhb0IXZr7SefVA484ALzeO9OD2gY/sNDyUsVYFnGoDkWxo6Qc9Z2tAWWZ1pvMS1NSAvrwFWVYoG4dAo9FrayZst6PPtqiprQzP1J5Ue7qXuj3dI2uNt2wKXacVX23REehlWk3PmjrxpYEDHEfqAKdTXX+tVO1YY6qZqw3gsNZQxSoTfd5mTdfu3UxL55tA93AAZfcUyIGTxwKkDUAl6Lp0kJ7wum4HFtlrRj+lIvAKeIs6d2Us82CzBLoAFtAV09kHUrMqlDOGb5SxINfvI1Bl3u/o5wJdjPUYUvtQripAhNa7qic8CTA6xFECyuJrAArv13R3FMBsHgc6B/VCy0EFMCr7of7GzUOdqx2QAZhCGrp09OB4GrIdxQ1oAb8DquRVvb+JLg3rAo3DEjz+edaRsUoI3hr87bqhjqgPbbjeQp/jHIDWY/tKGXP7ethV17rfhqHuy+KUsk9mF4U6SAHgZQYg63B4HSQcZnpwV3bU4ToAgsUVlOyyFBzoPXUKgXrInFKzRVlH56EDGak5lACre2oT6g4Ek7keyDNX90NjT+9lBaVCe3tHhjLI+C7OYP87+j4m0CwRj/VvkzO1vW5P91ySIKOsDPV2gZrkzisyVnOhkQa6ulYJZANj9eBiiP3WAVAngDpBI4ammb7e37QBfEnQqb0PgPEzcLKHQI1Tf+HRd+4STgWuSwfpCa90cWruw0h6eGHxdkfGCqhHr12Idib6m5VdUKiGjbsNKexkS1d8ZaM/kq2g5HQB5HE7UOqCDjgXSoDpUiPrANrU69t6f01Hy323Av3dnTM1SncW4EjtqKHvA4FeG5zYKXAAOaE6NE1QJW8cAmg+hm0JpdtgV8eiqTpI0wMwwo4eJt1MjcY50CSAv2+9HFL7b4VM2Kka+hQomh1gBK4EgIsbg5o3KLM328r1k61BzmCtugwrwNmYmb3R1y6nAFqlO8C3FEDWcXETSFLbxPejjucERJ6f2oA8ReVNMtYLAGALGKQMZDum4PC2YB3VINtZgLxPPdDvu2q6r9em66gNn1sVamO8ArBn25pBWgIJYZgBnQU0N5yCvqCz6MhYOtDfPQJNvq/tKBzhDyDLPzxTZ7zvqnO6DwFS6upvBBEEu+ubMtYBOuws0Xe0gQxmcqHB0AB066YXul78if7ulYZ+bvFYOT2H8jpdlw7Sk17ZntlDxGPVbTVy0Vw3aQyguzfA63gA4d0yVkOwtda/zRMtTfVDII/bBt0gaP13O5AFSiBVWwWdHyBnPHxVD5i6p3OVTDRrE2/podod6txPgbupdkU7b3Y8PWDaoLE2rmnk37wGGxxq401gZE40YWFhT39j5amhaiz1AAyXarzCgT5bACDIvKlrqLvSA6ELhCSLvt5z61S/zwHuqyowwydrtZCNFYA5+5DdIXGsAnhaGgpATVdAQDphvEK5o/trAPQJ6QNwmq7owdjfAQJYIEmtTvRAvgndgc0emPdEM5YLaNXfBgcVmtMsAGZ5A06cDRxIPhzwuenv9kBGZWW6poG4wq64wH8ArA0uQCOyif5utQNMoL7a6Hagz3bh6D0fA9t04ug6/+j5b8tY2VA7dja9LWO9QvfrBhz5tqfvMqJYDXB25ULnxYOuuK3bOvnrQ/3bRaTrtFLRvbkF/R3Lu/reWtGj72NFKtdwXTpIT3hF9VOL6q9FeauziXym21YjXoE66aRU56C5gBbXUiPfAMoPC6hbN4AxNjnTDbSXAK8SRCJOHQBWoUa9r4JYXFxVi/t6R3/3DFiVq5C+rYGgq99QQ7U1AWBpCniZcCJjbQcAlEDSV/N0a9XOgW8K+FeGKz0kdtf6wQRKig0A/zeP9dkuXD3cgypgWyq6hrzGbRm72tJ5jhrqKI9BgiWANH4KWoAzcIoPZ8AJtNQvLHPg3Mr1wEquEDEelG7MrJNoabCAjE91T614OlaenM62HrRXSt0Pq31oMgAdwvFKD57Dvj7fHJyDKmT1mhu1T+ZS+ZE00dQxJo2wADJSKZABxq4GdTcgKCG3aQWfCwq1qd1qR/8WJCo2K8Co+bremsdQ3gTnagW0C38C+KDJidrU1QaaNNq6hgYd0OQDuRA31/dWjvS9rfqAbcsgKHla14sL+NRBAPg+IPpeAGgw1SPOsseIMTOPqTsevy4dpCe8iknbiod0y2ot3SyzCAxBFdo2T4DNFWqx9QVoE8Hh0Y/h9C31lXf2AHTb0s9Vx/q5DNLawevUiLz+SDMC5zkw6R7qRnOA78dbqhHpX1Ujcg5dQKRGH64hWoSU+AWURm2gDmYPHK4+MJbfC/UGe6G+8wZIyZQzyEh5ICVTV9DnPpRLLIdsILATV0H+Iig0Q5A2NFuZZ4CVa+nz7gL4cmP6u5UqROA5ZGy7Gn3uucC1BLidXTjEzMyiVI14ZXEoY6mrCsc5BAKVoTqUQUvX1gCyu3PQgNsB5nbP9IDaBocmhiyQVYDJ/AIykUDRUIvUAUkBQB3sAcdORce68BxzyFJ5ppkIM10fGTRQpJHOXztRW5RtU/ZJP9d6o3oCkwVkLB9o8PKVsF2PoJuROvnyIwD/w1zlbf1cAVQWBp2yNaCyKB1dGwPAp56DwzqDc2UyA0Z1EFoPu7ov14NH13MGndZ0XTpIT3i1+g2r1F/brMMFZEoguq6NIPXbUyPinkD7/h6Q8kG3Sx1o+40OeMDz9ICKHjr1zfPUrT8fQiRX18+9daAGtxtpzakLm7nV0I27gvT8tK43fQ4yAONzNVQuaOhdzfQddWJ9H22IjlPg5/FyPUw6EWhFASFnywccC+ia9UIFZK6XaoS9JXSYASCz7kELPgBp70J5qL+rn9uBA3UN77KV6DpwAa8x7upaSwwAy4UeigegVbiB8rCZWXWgB1S+BjbiWA/k4kCdvajo6I/AeluDo7gF2L0cSlgLOHhycBiqMDYBbq+8CxkBcDZqoKZdaDXYDDquSBrIgIncgaxoDYSQx5jN0msbZJ6ypmaBqiBM24PEGnCQWssDVmpg8E+hIeNGTQOQ9L6+t1mszvjpK7ofMijt7T0Lpf28o3/rA+dRpoHZnVL3TA0oEcIhSJI4kMFsfFqGVpFCNzz//mP/vmzz/6Jco8bEooeIzKo1fYl7Cz0UZoDj6IFsx2xLjY2XA7cPMNrmVeAeaWg5KOyBLg9oZvmBlvFc6Na4MdBD2gOdtArgCHKIJnpg6LtwgI7hb0fQ9uoD3b37rL4jH1pNq1BRDEA8lPiNcsjGxJApmQDviweK4bU6ZGOg3XYKuozOQn8X1AysKDVfXUD7eDrTH7kGh1g30rExvMsInNgUsl45AEqaa91b6QbkL5b6juJrQAcAB6WZWWkaNQdVwGdU1UHNwYFeunqP9VCDnAwChg04NDWI/rvW0d81feYJdBEmTWgiAZuFh4pLcwh0APDHLhCTFtApkIKWXQbM8s067HVwOuM1CKEWOqcFAMvPTtS2FVAGdaDzbly/LWN10ME7J2qXVLOQ6RCoIsBpT4HzaPUJtfmjG1ARGem77MH+osaS0RQy4TEEysC5NampNE28VsfxYvboubcG/CZdlw7SE16O+7I5DzEabyC6Hl0AmyuVM6pqlJ6FSKTMAdwIbLM1AFUWEz1oi7lu+sgHodYEFKWhMyts6LI6AHLLfFcdvbqBvAeofk/gQCCDuwvRYgUO6Q7EkJmvBmiVQumxoYagB4DiB5FyKBFuwvE0G+D11FmbQ8dkFUo3oaPGphECOagHWC8Yy6ENudFRB70GOBbiw+l21AnLQE9trgkC7JgsQDC6SkrloWZOI8AvbYNU0GcuYtgmkwolXejC+kLNcROVyGEOIXuytj+RsQsAQW/B/QXAcp3YRMbiFXQlVijrCFklmNMVOUOmmYgNdVKGqnkZnAImL9NANAenyQN7MupMZOw0UtvROgaRYWC0HzfBJkBXYQzEqW4LeN8O9J6nQMrUgJLz+Qa6dhdKL9Cuq2N2BvhU50Ln2V+rfW+0OzI2awHn0YkG/L2KYv7qjy21Fcj/0HXpID3h1X/FtcpDRGbTPpRRZnpAUWq663dkbL2Gei+k/AvQ2ykD/b5mXTeQoz6TLaAN/Kyui7u3C5pjYzUsORjXYKQbPIo08noFyoc9cBL9mv7teAIA1FydsPYG/hbueQ+EM9v31QAFDTXgh82OjJ2UcFAm+o5aiWYXpk39Ww9EXkMo42VAAJmbGiA/hMVR1XLkUQ6iuwk0GEDrNMArbJUA+/cSyn11IMbrQIs6AJF9dCBAKw7v0MxfQfRSAcM7JuZ7cpCAghoyoAR4NvuUjPimTOZzCDYqUCKqrQFvGEMptAOUI8A2vQ3zOofM3Dm0fHvA0r5JFeQegN6eU+jBXQK3HL3ii0Ln78X7mn3evKx2cQ1BRLKEvUnSGyAKfpFqQN2HpeEuJ/obIFfTBgkrm0FprwqwhYuOjFVykNMBGaC4qmstvqKfqwTAyQQl8bCrvzsZQ4eoM3nk3+Ulk/YX5xqeLCyqvmZMqktieIVW83tqCJYgotoEdeaiCfggEO2bOFrkd0CxfTTTRbZbwq56nK7dzGIgwQthgy+buuCDRH9jBKRrHhxu5w7oco0h6wAUC2FTQbMNwLJEkJ2YAb5qDo7oIUT56SmULQlk2IZS3I4+W/8OMGRDx1XdU2fN9fSAqbmgPQdA1QgINN25Hvg74EyewtkengIYpVSgpXMN8Him6fQZ4I3IxYlgXVWg68nFbI+ZAcOzpQD+7FO0CoC+iUbmSQL7awsY7aHcF22g/Aj4lvxCHd4MaCVSmAeQXbPtBhy0EOhFBTjQE9DRqut+vbqr724JnEezhc796EyzrGOQ3nkx1cB2Co77CoDqZQNwZxmUCkFTzgUG7zZg41Iof8cgZJwfA3Ei+EdVgAX0gEy1BTxDtYbuuQFod44OAHu7AUwYBM8hOEiUkg6h3c1/LGD1y8sS2xfliiuhlQ+1Qq9LdUCaS3X15wVEGADiNTBUGTCjzgEoXL2l6fQHHd0sGZRq5pmmKfvApxH8EXCU7OgGbwz1kC4ggs/hgModALSCdEQOwPdNCM8LlP9W1Q2+gN1xsq3v4/pkImPn0M6+B3wz+Y5aqrr7oozde1WNzQBqqDsTrclT5LoCR6rs6oEFXH427uuzla66IEWkkf8MlMqvtZ/XH4G2/O5KD7s50Cn0MnBIoGvUXM0GGHAMmXEXmwF/k1FWCZwcAxJSgzJPONP5il+ErAPcyvm1joy15zoPmxl0dQEupNIAh/AAMs13db3NUn2fVwPA0LTU4R1BQ0YKOpN5pg5DDmzn/Qhwf6CAkJawpgs1Hn5bM9KWT2SoBKznbKTzHI7UfuZ1qEzA65isISsPcAloILZqRe37stS1fy3V39j1dY2Xjr639qs6f7M5OIlLyPzNOjKWEHHvPmS9HtMWWRFFOFyXDtITXosX+hZEr23+0FP8wygD9ei+evprIA0MQLwVuA9tCeWWsNBF4N7TjZZC9OmvIBvTgs3XUANUBz2qVxw9ECLIMOwk+nBV4KsZ1ToytgDjUK7UEgQbfd7F+uMyVgGQ8dVAjYgBV1VwVedgBoe5M/mkjB1XFM/jp3qYvzhUA3RnrGWB6wMobwInk90HSZwrOlfuqKN/u6Vj0an+bRio4RsBaWoVKBHGpyDwCor16waIZB5rGj8JgE7B1004XhPWyOx2W387AbbCxUyd716k+8uDbNgCgMLFTNdWowaZDXCGAhBCTiBz4MAJGkA3VAsi/TZkmh3A/S0d6ESbaCnZQLF9tFbvoA1+aNwHvBHMSw4YxOugp/bJsTrfoaMBzTAGnUkg8w1LkB+J4X2c6P2Nm9B40IE59aBjbaJ7KYLS+XZFz4v7kTryw5E+2+4Y2LoBozvL9ftc4q+qgMZioft6Gwg+l4+1YHurvwQlth/7sR+zD37wg4+MPfvss/b88xBR/un1L/7Fv7APf/jDdvfuXdva2rJv+ZZvseeee84qf/oin3vuOfv5n/95e/75561ardo73/lO+8mf/El79tlnP/cdm83G/tE/+kf2r//1v7Y4ju2bvumb7Kd+6qdsZ0cn+8+6cn9h7kOkYOumIkm3Il0U06m+xF14Z0uIMCpwAJQ7UJYhcjxgw/agKtDI9KBYgjHcrMAqQfkggU4v21YH6b9CR+/NQLNPW/f1MFkDaWBcaO1+NgCZjWPtNPKBj6hyX8eKpjrA7jGQefbUMMdVILds6uG5AmfND4G2v6sR34tLKL809QDsAMZsA/CZ5k3gr4FOqHvQPRMdqBH2HODrmoFTNwX6g0D32/KItAX1dxdj/dy5o3O/gO4ZM7PWpCNjsw1xbOm8nkOnoiV6kOVtkLZoqzOU9KEkCY7U6ETXEQmX9q6CYwHR+isQEB74uqbDqT7HiwWxqgM1yRyMggc4s5Z+Xz7Ud5xlQC7og17h76m9q4a6BuczfR87OQS2YAOrwJ1yAevFr+pebzsKUVg3NYA7vA9M6a7ahPVa2+MvfL3nlyu6XrYB4H1aBf09wM9VQdjbIZLjpgaObSAlNqCUiB8roSb5XxIepDe96U3267/+65/7tw+kgJ+9PvKRj9gP/dAP2U//9E/bO9/5TnvxxRftve99rzmOYx/60IfMzOw3fuM37H3ve5999Vd/tWVZZj/8wz9s73rXu+xTn/qU1eufKWv9w3/4D+0//If/YP/23/5ba7fb9r3f+732d/7O37H/+l//65/7/hfzAwseAtVehxbjOQDCxlADngNjtB/BQol1IYepGgICQW/akE6/p4Z0Gqj3XwMQrwuAcReMobWB+RrOiLeu1FBV62r8Z03943NgVc3awPAMuOOXZ9rtUtvSw+4CDJoPBvwZV+cqgQN+G4wXSResgHW8nuuc1ivqsEbABZUmaoQn8H5Xb9L3MU9AZypX4xW5utbKl3R/kO5Xva4HZQmAl/U5CMaCY1uHMzYF418AWc0wIfC02WatWb0KHLQldH+2oclg3VWsTdqGztECGJnP9N1la9XWmvT1vXcX+swna+hEy/VAbvhQYlvoO7kFTmYNtCdX0JARQmatAExOAWRG/oKcEh0bghBqv6t2LAAgfQ+6tZq1Q/2+uX5fBhm97m5HxhpddSLGhT7Hzh0NVJy2zulJpvb9/OQFGRtUNWnwV4DF3IXqwtpXRwWa+6xB3aAZCAUn+rypP5Gx/Eyz6DuDR9/bsvxLwqTt+77t7gI3OFy//du/bV/7tV9r3/qt32pmZoeHh/ae97zHfu/3fu9zn/m1X/u1R/7mZ3/2Z217e9s+9rGP2Td8wzfYdDq1f/Wv/pV95CMfsb/6V/+qmZn9zM/8jL3hDW+w3/3d37Wv+Zqv+XPd/+vfFjyiMxYlb5XP7G40yvLtzTKWxpoydftQ304UzOnCYXk01QW6XIFMQQCCjhs1kIuRev8HQLQ6gwi3da4bN4j0d7vQAWMjjRIqhRrrA0cP6dsn+rvQRW83b+ihfzYFTA5wy6x8PRDu1/RBsqWOvexoNLYBzNpgDGl34ApxQOIgeINucxdOk2dqkK040fV3ker7vQ5Yj3kF+KGgyyaFQ+JerF1ZDQ8wIRtdzw5EkM4DWM9AtOkVaovShq4NM7MqYOhOgdRwfaElouUpYCcKyCbmCqa3CPATVaDv2NaDpwNioUMAxG9N1bmah7r/N5GuVQ8O5KegbXsHGO1PQUfvFNreK0CdsgIiLwdYxyvgXG0NgPYCxFuvQhl6XgFyUFgb7aZmCIfbAHg+AZLZI93D/UDHRk/pvnanWtbuhUDtstE990pP329jB+YKCD6jM/0cUPjZxtc5iOaKn90t9Y8rawDrg1j1+KXHMEhr8NTg+pI7SC+99JLt7+9bpVKxd7zjHfbcc8/ZtWta7jAze+c732k/93M/Z7//+79vb3vb2+zWrVv2K7/yK/bt3/7tn/f7p3+qYdPrfcaof+xjH7M0Te2v/bW/9rnPvP71r7dr167Z7/zO7/y5HaQbB2dWfYglOj+BLqyWGkgXQIsxdS8Bk24Kh3Q21L+9tj3RzwFHSTRVZ+PoKY0qnwXHp4AFWpuqQYtqmtnIl2owVlAOKkFfLMmBHbahWaBGAp13QHM7cfWeC3AEYgDhJ55qcl0N1fn7dEPLQauZ3t85ZFQSkELxgQrKAa2t6m8DQd0BML5DW/6iohmuNrBrr1JltD0NNPp0iYYA+JJS0KPKgem7cqrvDTqJrbHSZ3uQ694Ku5Bt22IepD3g5wmhFLLZPCtjnYqu6WtAehnA2JTSYQEceMA8XvE1E9GEsRjU2W+A83KygqweRP8tcJZto3ieXqG2srXRY+oEcAF7DcBXQpYlAhHf2Nc12JuqQzhTX8PeCOXlY1CyH0KHWTWBBoCeZk+jDjiTd/Sdr6Frr78zkbH8JXW8G4Atug56anaq79wHVdtaDzLcwK6/SjQjOovUzs6mkEHy1BFdAKRl8NiZlEHXN11fUgfp7W9/u/3sz/6sPfvss3Z8fGwf/OAH7eu//uvtE5/4hDWhlvqt3/qtNhwO7eu+7uusLEvLssy+53u+x374h38Yv78oCvsH/+Af2Nd+7dfam9/8mYzNycmJhWFonU7nkc/u7OzYyQlHimZmcRxb/BCYdDb7zAHjBqfmhq9N/tYOtJOk4OWONA0YruCQbkG56hZkVEJogQSCugDSnvGbdBk8Y3CYQ1t0DJo2FTA2oQf6UcDx4oK9qOTqqERtnef9BhzIoBE2g2VfBXzFSU0NZDYGyn/okKK3cSPp6GAXavfAFXQG6eViR9frrKefO4G/rcz1d9euOk0t0Adza5oRXezq5xoQLtahrDIGhux2pu+3Am3SeV/fWwHgWmeqa3Lb0YOo39KDLZ2wWO0ZZK+yc9A6hNJ5CM7yvKmOtgvt8T3okKxDVnSe6Ny4r+gczmNoyx8ATgfKZJVcv6/S+mMZKxO155UC5g+654Zt/Y1dYKqeAdC63VR7twYuqL1U30cEvfDzKgRmkLmamGaG94DfbBHpvRQ9mGdwpA46eoasABdXH4JMEWhtnnjaAbtagaAzAMYhAWd5oPd8CzTbvFyD+xpklazQHyGqDpLOWTz2p2sPHHa4vqQO0t/8m3/zc//7rW99q7397W+369ev27/5N//GvvM7v1M+/9GPftT+6T/9p/ZTP/VT9va3v91efvll+/7v/3778R//cfvABz4gn3/f+95nn/jEJ+y3fuu3nvhen3vuOQGUm5nt3p5ZrfbaNKaeHloF6DM5gEGwlkZe0Qkgt0GJeTUHvgqQtlh09fv6pq2XCZQKorUeKFs9Nf5tT0sFC+B4MUfvr+VBJxWox+fQxVIAWVl9owayDd1LfglA1RoYcMhmXeRq5HahLbfc6PMeL6GDA+rjeQZ0BRMtBx1k0OJaQKcRPEd9B6gEQHtqDQatMwfw/w5ItThQBpkB3sXTZ3MbIM8B7OnZAx1LMs2SVNrqAJ9A5z9ATD5zPxWdr56j970u1KG8iMHxTHW+AuDiOomgjHIBZSNwvn3IYhgAXTuFrrcjcDYuYqBZyNWObVZaxjuG91k50wxIA/R9WtBkABUYG0+Bly5WTFO01mdrAft/CzTvEhf0FCv6bIs2UCwAGW12DB2OLbUTDvB91Rb6t5GvYw96gP8CTF11oU7iFPiIXgR7Z4B97C0UFlBCl/IcsJ5pDISm0AV4ADygjxNue6weJNeXvMT28NXpdOyZZ56xl1/WUomZ2Qc+8AH79m//dvuu7/ouMzN7y1veYsvl0r77u7/bfuRHfsTch4juvvd7v9d++Zd/2X7zN3/Trlx5bcPu7u5akiQ2mUweySKdnp7+D7FQ//gf/2P7gR/4gc/9ezab2dWrV20+P7H8ocjWBYDiZg4p7J46EQ6wSNdGagydFJTAAbTsQBfBwVAP87iuhvQqiNB6sYJIF3MilNTva0CLsAvsy0EAoodA8pJV9W8boQIoy6Ye5tRy7ACxY0Ylj0DfbzcF8DooWWdbimkooFxVyXVbbh2ocT0Zq1O3HOv8veX1avzHS7AQR5D9hNQEsVJ7JMsy0TlIIIuWNKl1Wu9vHev7cCPQPgNcXL2r91yeATYLJAjmi4l+oZm54Jz5NWiVdtWujGCd14CHxgUcoa10z6WQQV486MhYDtqO5msmog5OcAnlmwpkbU6hYSSDbFELhNcaHZ2DAuQ4nocydBO04ooZBJNAIJQ5uj5G0LjhwTuKXLWVnSlobU50/lZQil/E6pQ4Q52r+bYGnRUgEf4TuL/FbR1br9TZiCP9nA92sbHU59iPdP5yoF3wADNUjPWc6kH34SrSeT5bqj2eP4ap3fxlwSA9fC0WC3vllVc+L6ZotVo94gSZmXl/utjLsvzcf3/f932f/cIv/IJ99KMftRs3Hj2UvuqrvsqCILD//J//s33zN3+zmZm98MILdvfuXXvHO97xee8tiiKLoC6a3A/Meygar4C+0HVoSU2mkG6N1bimDf2+YAIEi46miBs1zfiMIXOwB6n4UzBAFTD087UaoMVcn20wB0bmAThrczVU6UgNS9D+ChkroA7udvR3axFkriDiq9V0M9dBOy0OJjK2SSBbdKrvKOjowdGN9PvaILrpfLkatNNz3fgvd2TIQuBDbI4BTzKHyLAC5dcuSHQE0KkVAO7H0xvMhhpAtBoa+edwaIc1fUcTIN/rtwGvtdIDYXwVsr1mFhJoNNRAIAVyzC1X52u10oMx3dd3nEEmIr+t5YxKDbKTHeDnKhUjFQzVFvmpBnoZALedUhdX5MD6NbVPCyhOewFIFwEL9yvuRMZuAJtzFIM8DTjk80CxsA5gchzA1Z1NNThdAA8S+FtWDfVvz0B37RQCzPVE52VpUEkAOgug67PmHLqjYV21AZTuF9DhfA6ZtQAwfpAx8wAnGmQdGZu29V3WO48Gti5gEun6kjpIP/iDP2jvfve77fr163Z0dGQ/+qM/ap7n2Xve8x4zM/uO7/gOOzg4sOeee87MzN797nfbhz70IfuKr/iKz5XYPvCBD9i73/3uzzlK73vf++wjH/mI/eIv/qI1m83P4Yra7bZVq1Vrt9v2nd/5nfYDP/AD1uv1rNVq2fd93/fZO97xjj83QNvMLD5tmxs+VGIDkq8xeLkN4G/JHf3bpKlGaV6AuJ8HrLSkzznU350CedcUyONOIm0D3YcId+6p03Q/UMfHgXb2+kIPneZXAtM3YBA64MCWKZDlAVD97rYaue1jPXxjOCtDcGIdcA6cvCNjOy/qb/g39MA6TdSg+TNdB26mRvPOH+k8d8FBGrxOjVJ5dSJjxRo6Az1NnSqwaWUAAFUPSURBVPt0mIAml4EmVxt0xFYVNcIhfF8FnNi9UA39CIyk4+pebcUsS+CmWpr2Jlr2iLcBdwVlnnZXxzZLkAsBoeE6kJUW27oWXOAKuyj0+c5aUHIq4YBqAgUCBAduomtmA0zkOUhbbKAz8+oCiCxBHmlWTmQMhAhsCnu4AMcnAPLdCmjtdaEUV+6qc3A/0vk7+mO9F0hSWxADf1CmWWovAZknR9e5X4I6gQc2ELr2FnV9jhqQA2d0Jq31b8OartPjKXT8NScyFi0A8/vY/tj8ZXCQ7t+/b+95z3vs4uLCBoOBfd3XfZ397u/+rg0Gn8mR371795GM0fvf/35zHMfe//7324MHD2wwGNi73/1u+4mf+InPfebDH/6wmZl94zd+4yO/9TM/8zP23ve+18zM/vk//+fmuq598zd/8yNEkX+R6+64blHwmuFwtwAfABiG5lxb0iuvA/mHEzUi1b5+bgGdaH3oGIgBhFoBLZzmRiPu2uJQfxcMQT0A3SBgzQ6h08PfUaO0ARxWCO27rwK1RQMyZlPogPFT3Qr1PWJfVmMznOpclWvgUGmpITjrAs/Vmaar+1XFB7gBYCTuqNMUAWFoBZTPK2fQ8QPz3Ip1ovv+G2UsMSDaA36dJYDDF8AXWNzTDEtQqBHevqHfNzvpyFgPyj7jmjop/ucBLFQ8zTCk1wCTA2Z2BZnX+UJxSUFD14zzijoRMyAVSyCCr9b0wNuC4KV0gNXb1fJyBp2oLpAkelvQWLLUvVkCeDiogEcD5JZhCszNPZCdAbtYB864HDIvS2ge8MC5upXoHpm8DNxDNbDRTSCKdIC1HegAoqqSLNfBtqVAaLo1U1vkbUPTQVXnNAKGfMdVB641UyenrOvZFaYaAO9A9+EurKF5FbByjz1vBtg5upyyBLfx8vozr9lsZu12277jf32nhQ9lkIjziszrNhBFrjsgcDhV49Vdqvdf9vRQyNaaM70GmkOrgS4oH4Rzl1MgRHPBiEBLb/1QnbDKWA19bdqRsUUDcBNAAJdDS3rR1O8LgOn3qq8cIImjL+kIiNMM6tkxGFe3qkZ9PdOMXhtKOssHgK9a6js6gVLr7U+qM+RCZ9vBDXWGOg0QHgYckQclwH4VIlzg11pWQLkbyAub8D4aIK0wi3Uf7YLe4NrVZ/NX+reNLhvTApobxrv6zN5S311WQkY11vsJe2qel4DJqwHHVuHAWq2CyDPpH7b10NqBrBI1WiRQci4rAOI/1b+dQYa2MYYsGmideY7aiT8E5++wBAZ6ADc7wC03ravjcwv4NvIXIDvW1M8tSQ7qRO2s31DnxTXdD0uQ3lkB3CZNNTh4fa6OT2sbhGRd1encygGOsKV7fQwdifYyYG9B324HtABXG10w41Sf43E3Z7OK7Yf+3v9m0+nUWi3SX/zM9b8rDNJfxivqNS18qJulAinJ6JxUyCGjAkKZAchOrGNK/eqCr2VqXCfQ0bSAklMObf4DwIqcQ4trFOj9+Q80mgWZNCS3y0twzKCDY8fTA+tBqkbuILwtY+VMUxYxYFT6sf5ttFGHNbmgjhW96gd66Iwe70k1s4YDyuJAC9GuQmboTWqsR2N1qJ+P9V6oINaBUkuvAsDcjRKfnoO2U3ihe2YJ7eMFdBquM2DrztVJ+RTo720B6/36aSDkQ9IGMy/SqPn0Qj/bfEr3V3mkjkq1pu8uAm6qMtA5HAaaYRysNQvUhwxNvK17KQjA7gyB4w32etgEdniQVgpOdSwG4dy1o2urDKF8CzxZu4BVmkBJMYKgrujp757egzR1qTvbh4zeyoBiZaJrurMFygYA0o5NcWJhpmdICJi1dgmKAM2rMuZBM8HVADKEQF6aAiNvd6XfF0AQG4Ke30vQUQe6tNaCDn7nMaD6uvjC8kKXDtITXp1saNFDKcz2HHSSQmhdz3WjBVUFQYdHalzXACjuzSAVD0r2lY7eX7bRFdU1xcGsK2DkgCukVcChBXwuSzDCKal+Q7fLyVIPk6ylx3my1g15C0AIBeChQhDdzfb0HZVAGJg3NCoKMqjTQ/YpHWoqfpkCqJLKggAc3oqUFG4FPEhnoFpfL3TMr75dxiYbfecbyFaWQOI2AwfYhf2xgY6p84rOaQcUyJ1CjfUq1NLowUqdtcIFkiEzSwHkt1+Fd/IAQvhtKAOckkA0gKrBiXgKJGY2u7ofxtBEEoAYbHyh2acWtHIXAB5OpwA2jzXgOiX4UuOmjPXPobwMWmJzYOFvLbXTq73RH75PSeqNdmGtAN+Wxvp9LqyjTQxi3wHwAr2i66AN0I0EgsSKAXBbvw7Pn7MEAi5HW0JT4G5aAEZq5dyWsb4P8je7+n2tMagsxB/T+4Mz6X5NbXTnsSykB/gyui4dpCe8zn2zh3gibVJoKzzACCzJ1FC5M1205dO6eDrA7TFcK9dK4cBmBoPWg3T6aK7RyYYyQy2950UJgoRATx9R3b+t3/fpY53A9jW9l9Oh/u42RG3HQH5W5MBLE6jVjE4hizHXzTyNdE6dE7BUG2BeroMeVaQGdzIEqzTRe06gVX/tQkQ604PoDghxvhirw3UT1njkHcpYUdfnyLfUWfOAJyxsAvs8cCidJ8A7lOjB0Qe5mk+kanCbPotY9xzdmzvQqUSNG+17GvpGUH6oA1t6PwDNxrZG/8mFHm5BW53CqNKRsQJEtz1w0uO2YinTJeB+ZpouvnYF8AhARHVcA5A8JKkqqdqTALTdxsVExgawfkPglrs1BCjDSrO7xW0SnAVSUxAez4Fl3T8C2+vdlrEjAM1HlAeGSoffBkmcVO9vDRxFHmRkOqBBGK70+zrAkbUAUkjgPbYHANYPY3DqHvt3FkPQAtelg/SEV/XsvkUPqaqvr0HqLlKv2eYAYAXC3nylRmmZ64JKICJI19DlVOgGmgNBWH0LMEOpMq0mp3pYkhbO1NPN98BR5yA910kIIXOQf0xr1EkNyBQTnas1yKNsEo0W/T5sj3vqcNUABB3SO9rRew5yfd5r8H2dhf7tflfndNwCR8/R52gB2+zNHdURnJgeTguQfgiATbEDQdoITra6r5mJdamfi4ATzFuC89L/ar2/GOQMMnBmXKAm8EAPzcwmbWh7dzSqrwItx7gJcwhhuAcq6cuVHr4dYBk/6ICMzVods2GsTn9lrmsrakL2dKT3HACnWLCl73gO3LHxEjBSYw3WVqAKH4RaJt9Aif0MuH1qL4HIc6TPtrdR+/78StflCBxtD6pzg5E6L3dBu69yXR3gLNR7bkIdfx3pXNUKLb9mvjK5j8GRuGH6Lt058BWAkzOEcqQH58AGuo/rwJE1BymZFgQBvcdKnh68M7ouHaQnvIoz1/KHeBxmQzUORz3Q0YIoqwbU8cfQwZWd6996gAvJTBdU2NLNEoIisp1Bvnrz6zJE6duUWuHB+4cmNtsB2p3ZCnAhPTKQes9O2ZGxWhu4qvqggwdq9Lal5a9iDZicC3VU/I46Zi8Bs/SwMpGxDUR8f7zU391OXpKx+8BztddWR3nZATV5cF5aQz2gC4hSXwVh1CWslyupGquap4fT7RvqkDRA2f6a6eeG0NW7BselmkATQwiL0swKYL5+AMSfXlOdkquA8TtY6uG76kMXJtAneCsgOvTVsaiFgAV0JjLmgr7YFDKl+QmU00GDq7yr9+KB2Op9KLe2NnqYGziy93P9XBzqOqoBOPwuOPO1FvBNgVMdB7oP6w1oCgB7dwzOZEMx0NYAcP0KOMp6ULLfhLo3I2BPX5/pOvXr+rnbUMJubenvdqARobEEImXQAkx9PbscX23vLjDXV4i49zGZF2pSoevSQXrC6z9/em3eQyrjDujPJM4fyVgOuBrPB90wSCW31dG361Dq2kKyR/3bc0BLV6AdD+TUrL0HzgukebtNAHNDdLLMNSNQvaIGaAqt5ucAg663gaws1WzMq/fUOWgXuknrAB62mhrrBfQuxkCS5szVYDzf15fe39E5vQmGeXGujlkdoqw00zJUa6jlktm23ssYDs8+kMdVUlD2bOn3OfC8cQA4sQVEn7W3yNgprINkpO838gADB3I1zQaUjMxsAJH0Atb+YKjrowK8QC9BCftgBOSCLXCa7OP6uWNdg5NIn6+ALOZsR9/J4lj/9myuz7E1AbB5TQ/fZQBdjrH+7cr0EHwAQrwNYKAnyS0XshPgR1kKWKVoCY6tqzZrDvtmd6Nz4HnqvPjgPHdHOs9zCCIiyGpWwTFrBNDBeUO/b1ODBp6llrC6c6gawNzHke4ZHzC1VwFnN4bM37StjSBuFbp/No+OxSDnRNelg/SE18vNyaO1UUg6GGy+BugkpbCgQnCQ5i/r4TF8WhcelGItUyfcSoqKavogLijZT0GS4B488Ao6ENogEHsIqfPNVBczNPJRRtdmMzWGg7oelrtwSCwg9Ttvd2SsNtVMQuJCiaKEwwkEe6+Vyq+zda6GNPM1qsxaOqe7cEoMATBeQpZ8OwXdNeAFioCo8CKCyBqymiAzZ0sgZ9xdAPfNsTp6LhCGdoBh+Hyhh0SeAfYG+GbMzM6bOteD+3o/tYq+uwsg1vPgHrMaiBknE/3d9E0yNl2D/Ehb/7YHgs52V+dmfqxrtV6Hbq2GBiA5EJ1u7oCOXgLcVEBMWDjqtC6qHRmrhNrZOmvpuqxmsBbgN+p9dYYqE82UNAnsDxQBRQhlXggS19CC73iALK/qbzSBJXwDPGj1GgQlWtQwD3B/1odSFzh6Ocg8tcfq/F34Wi4txvou/RjwWtB12niMk8ndXDpIX5zLtUcdJHBATLUbbUGdw9CyGENJYr4FJbuhjjWIr2YLQLIppFshA5Lu6HLpXqhzEEKqqXT0FBwDxbwD7dgeUBNknm6gHDq4DiAqzxLomoINE7i6ce8CqWGto4YlAhkAF3ik3NuApUrVeF1Al9JgoQdMtQkyGyhQDGSZS+gWbKqX7Xa1BhCc6/0NS2Ver/aA+wbodTMA19YqOlezBMovEPn7HpBHjoHrK9HDbkH1YTPbb+ie2wAv2AsD3UtdOKBWmX5uSGnbOgCFwe6cAzi8ek8NzwwEp91A1354FYgdL4AodqM25tQA2wUZuD5wmdUNnsPVeckBt3bUVkxYsdTPgeKPOZCNmQOEIm9owNWFLOtsDPhFF9ZqRcuR5JNkIAbbBT6nOYhpGwytQUfUruj8tSFgnW10AfbBpt6Hrtj1oe7h/fhPZOwcMtIhrPtbdd2DvfDRTNMa5L/ounSQnvSqVMz8h0psmvG3cgM5zgp4sKAhZQC+tNua3TlaQKdMqpFIDHiPPFJD310B/Tt0hKUgFhp4GnZs9aAT7VQ9wuJYD+TONTB84Ph40BV3TrprC41sZtD6nwDAKh+AI3WmB5YHJImVLqRorug9v/oA2tQBY/LCWg0pEQFeAVbqFVAikPp79LyujYumgpZDaEevJgDwfqCWPmuA9QfJieOVzvMtAOYfOi/KWFTV+Yub+n1FAiRzAEQ2Mzt5AEKZTT0YX+fomskyahfX75vBSQZURjYDVfigrt1Vmw60pFc161Up9DCaKfbVhkDe12jrXAcQHKQgHZOC0Gi9qnN6tFIakvpcMwyHAPxZ90AOZjSRsfBlvb9kRx2zod/R74OmmWtQ+VmZZppXgMkJgQ7AX+keOQPHOwI6kPz0UG/mXAOaiq9Z2wfg1DULXc9rIHGtutC5rP6lnXqQfXagm3GlZ8gY6BTKxwL+DTQS0XXpID3h5dVa5jyUCcmW4PiAyJ45gGT2Aevgwys60Jp81APQInBnHPp66M+gFjdP1aDt9sFBghRxfKyLdj2GEhbgeeIbkDLTPWo1iByaYUfGhqAsPoIW8kYHWKkHIEYK0gCZAVYhBCxarAdyZ6nvsg8HTHKgc3VjranJSqQZgrwLHSEX+i5fnUxkrITl1x3pgZokhzoGDNQJ4M5q99SJrYKu1rEDWbS5fq4BrNkFlEFapTryRzMNKjKQ3TAzq0KGZg/W1t2l3ncbMEgtEMgLgXiyB/Iq6Rrwhuf6jldt4KvqAl4OqESi7Lbe3xZ0ts11r/uABQSdW3sA4GvbAl04Vw/z+RSAvSlI78TqOC43Hf1dcBwzwENdK0AcFYDRSxChHl6A5EcMgthNKGVCYJtDidKpqBNWgpjuJ0HZwAMC4n0XNNYS7bKbzLRNcTQCNnZIIARAPLvIwKGGQGOnp+98VD66B+OEg57Hr0sH6Qmv3D37TJntTy//DfqZCBbeMgHiDeAKqhNfQ6bGsFrXRXETShLTWA1zAESWb4Auh4s6GNwzqNODgnYOPa4+4CHWr8IhkUBU9Ho98GYltAMPdaO1OyDsCe2nrwdSuClIagR9cAi7OjaZqyO6uqqbfh8kRBbnGlXeL3ROd+o6VyVkF0+ratC6IYCq10BACKjPArilskwj9dkI9AbrgBPb0jkAai6r1PVQfDHUdfo6ACf70GnoZWrU1zFrsRWFOi+3Qt2bM8hy7QJHzE3QhIhWahNy4KaqQEZ61lYPpISOvEqga8F8KO1VVW+vVarDVQcNruSevs+iorZou67rw5noMbWZQ1YESt1pqWmvGfAbeTkEjnVNvTYGoKs31AhumulYAUFn1lR7dwLZDR+ki0DhxAooR07WYFMn+rwNYNcPmmpPZi40RgAGKXtaz7in/1CGbA0ElZsdtScXQInQBLjEx2fA0D5/9P5SUv+F69JBesJrb6tq7kNK4QnJvVd0Jfeg9T+E1wHC6dYG9dvSAXJGwJmsIBorQS5kEmu6Ois+LWM1qGXnVWDrhtSvD2SZ4RWQ94B68eoBkDiW+rt+DYCCkO6PutDt5gNFPziOCWBovGN14PYS7bioFh0ZszmMeZrFcEB4dAadPNlAHWU31uj4CLps8m19tjqwZgcGgqdt/b7GnjqERCK4doCbB9j8VqW+375PTp0eEquxrrUYpCnPahMZMzPzM3UiwMW0gx1YM6egJQhBxLQD3F7AZbRDIONSxzbQ2RoFmkmLwfGsX+j9rUOd13PAnuxW9XkbUPoZgf5ZAYzlpa+ZnGSjNjWFJgPPdE4dwBsmuc7zOeCrykSfIyyALuKqPtsOgFbvgqRO80jfZb2lYysISrpARxH31GHdgPB4AI78GnBOSaS/Yf9Ff+O0AaTEPd0f1bu6ToHX1e6CQsPFRN/l6WPZ+wJkuOi6dJCe8KoO1/ZwudTz9eAudc1arA6yRaA/E0MpCdQarDaF32114HO6wattMA7QZl2fqGMRQiSX7kE7+0wN2nQFKvNAk+D6alzXHT2KiMOm09N7uQPU5t49jTRdEGoNuoBBALbZog+dFJCxWB0DT0tD56VX1XLa/YY6TfeGmiHYA0MPeo7WcHWutqFXdwlllTmob/trBX3GIMgabXT+ylTXpOepwW3noO0GJZ7zhjp1TTic8oau57NRR8bMzOzBRIamkEFePdD5D3wilNW11YKyewqg2xcgQ3YVmPTbIANUnai8x9AHxsFdEPWEEnYDGhmWc11Hk4bOQQFOut8AGpJA9+uVOUjCAEHodAsA45A9aUAG+eJU78WtA64OMj7hSO3nERBZVqE0fQSyUTEQiw42IK1UVcfRBTFyH0qFM2jW8YFmYjYEElzg66pEmgb2AfYxBcbL2Z2OjN13IUtKGp+P7YXPOEh/diebUz4uc3t5fUHXbDazdrtt1/5vZg/r4A0ByBjDvj0E7HUOJQRKBFY60E0G2RPCngRA218dQeu1ByRpu3owjhK96fwusHBD2SMER2qd6Of8mTpNOWwC4J0zOGeROqEBmLC0BsrsEwUob3ahjRZwMDZSxyeCcmkL1BY3kKVaEW1uRct4+3M1wiugP8g6+rwRqI0X0LIddXXh++fgGDR1roKpHtrnvjqxQaLrdAUdjt0QOrVCOHihk/SFIz0U7230kDUzg4qzEY601tL7bgBDeQk4omYInFPQ8zEBSoUEyul1YDyvZ9B+DuSnS8IHRZrBc2Fi6lM4LEGqJazre6pWIDvpapYlmGkQUQB4/Ry6xIoYJHqABsLP9F36E52/8Zb+7hjKX3UoZS7AQBVdfZebhfKWbSDYyEP9XNPV+7sJdApeExw46AI0eDbLAGRW1YDmfKLBVX+uJ98n2rpnkgUQaAJI23kM11VmpS3/S2rT6dRaLbDVn/27SwfpL3Z9zkH6G49isIlHEAIRi0e6ULq7kJ04BnI8YDU73AdRy1w3QXIG5aVGR8YuoKzV9iYyVkBmlXQAo5reXxrqByPAy6zb+rktKCXVJjqnF7BJqwD/8gH/lY2g9HNN87w1Uoa+C8DXSJ/jShdEWWHua7GujVOYq+ZGHeXZnpZL+5CtsHtwIHT1BTtDfbbTFUR8QNkAEmSW1XTuvRk4YeB8AHEwMhbD1JsDrw38LUs+T8NLDxwkSBwYVDMNEiUGyTADqiWDKpkBWbeBsLs1oERMzMPLRD+XtyBLDZxTjYW+qCEUK5Zzfe9BC7h4IDvmQoekD/vVc4BMEToP5kAe6XX1b1vQrVWBzGsFKBucrr7MxFNjNIwB09hRJ31rAU4scJSlcAD5MLY3UNvmFaDJOQHVhlz/ttpX58WF4Mq70CzpECR26guw70tYay39XL3z6JyWaWnL//fi0kH6n3V91kHqvmvH3Ie62EYFRNLA+gp73mqQFZkCSr9V00O114NFkWvkVQTQ4q5NHVbtg4FMQRdqBl1dILbYG6pRegDMvMA7aR4ELOBzAve3Gdh5rCtPSWwVOgNbkNPLTvSmkxwkGODgDnsakbaAOsEHcsBEX6VdEIEmZU+A52YDzgE0TBk0thh0JhsQVRtwuFkML8SF7Df97iMdEp+9AEgPiQQDlQeDhkRzwTEzMwN1HyvhdnBhQlYZBOoNaJWMKFyasEd8CNag0ctq0CXqgPNH8p6UtV3D7zq09vOOjGWgJlABAeBZqS+lDrxA4VKzzylI77gd/b6sBGD0BpymDjiY4LC6wNPkRjAGDNk1wGbFlK6ETKILnvK5q5spgo7kN+xpV+dWqn/7ey+oxNEKZEX8Up+tCnbC1zjPCtivG6Ce8TKdl/FjsV+ZmW1+y/5MB+kSg/SE13peN+chS7QPYeVyBHwwpt51HwzfELTYzsHIXTyvY4WpcaDzBLL9ZhekxQanJZVx7+gmOL4K5pUOPPo+yujSGFw+fB+UxvF3N8CDlEHGLKNTn9geqPwSq5dzD14bsOzbuqNj5DlGUGdcUfYEHC6SKaDGgewTMEYZFvhbF36DHDMkYR2AMwTrihK7OYWG0NFPGRszw7kGwXYrYM81gZsqm+gYDBngsS2E5hAfhK5BtxTBrwB5sRpVTJSOyKAp0SbAtB43JjLmLaDDDNrjq0vgLZsCfxiAgn1YhERn4c/hgSsQcYEtcmASCsDzFKAv1nUVz1NAgHRR6hx0IIrwHWjVh1b4BUj5DF8E7bQKScmoR1Pt6twHc52X41TnOb4NUAswoDVIK09hXz9OMF7mBoJEel06SE94bW5smT3keQ9dPWWawIzshIoVcWDxBFfAupaKBYrhbw24HpyOntxlhbQewNJTPQPo6e3N4B1ACcaqagjMB/Q6HYzk6JHtokMQDmQDx4fGQKzdQPEDfb+4ptsNCIGtgJLTGlKOgLO0DdxzDGUVI/oIB7ooH4D11+XHTjZlkCBzUpAjBUsDx8jKgVWDLm5eB7B0QanBzMxIzmlNnwUHi5o0aG7aFWhnj0GiAw5zkNuySFVsUB4pXWpKcAkNFCGAc91d3RC1jXqTRU0P0MJXm1VUoaMJFlwA5KLLhWb0+9aRMQfyYwlk0R2QJEpX+htdrWpb0NJ7PiVxbkcNXhVEtw9AIzAt9KWnQ/X6T++r4QkgO76B9XwXsjbLM2V7BL/MgKrOYsrkVsCgkEwW2DuyqY8nDaFxG6/LEttf8Ppsic3ee8MsfO2NdADkuYYX5m5pWi+60EU2WcCquCBlRQgDd+FUoIUBhsWASsDWcBqRkBZxTFBXDGWBZpAey2EOQGfOqapxKFPN7kBzD26+CkwL4P/IL7M18KVsA5jlLNeajgM6NOXTUCMi5wAOXoBhIEkfksvSGIVV1E1AaRvKEFJGbwJjZEghE4Pril4Syf2Q4wJlt887To47OGKgpGIgiWgWUL0K/jjURR1m6rx0A+jcg5JJx9MaUQZkqhuQYYnWgCOCiVkDv1kVqCFsrvc8hrkCAn/CBFsA7yOHNUNmtgrvNwXR6Ax0HLtwLy2oZQaAzZpANjsGp44OfiCHtwKc8SXtVwj+ggU47fDeCjIeExImhd/9AkviLiyXCAK49LFXVOZm+R9fltj+p1+NZWZO8tpm8IGo7+pSLenyQkF3Q+j0sm044Pf1+3pLXYwXwGFhE1i0WOqCOk9KLLdgMVbwt5ShoVrBNuwCTz/nrnReChDnDGADVUjfbheyJ9DVYZn+xhp8usEcsAUH+n6b+UTGdgGbkaRQlAcA+nJLx/xYv898/V0gbTaAnfF6AbkAdErglaOjQo4ZfR/dHzku5ExS1osyLJ+HKNIWoApPnfBUlq3qWnDW0NkGrdywHSyHzyU9BdieAjLdB/b/FPA88anudR9MQsPT5xjBOvfmunGGZ+oMtcj5hrA+hXfswlwtQJ4iBdJKh8wYrMv+VZ2/OpixpApqApn+yBHw+OAegWy2LUHfEmgI2lDyNGj+sYF+LgWxX7QJ0B2J4FGYFwOpFgo0XMh6VwDX9bjvV5Ycbz1+XWaQ/oLXZzNI7W/yzHnoFPYpmqCOixhaNGkFrKDTC7q/FiR0OwBOoRW0kIKsgAshFQFQy3OQJLiihg86ji3ydDCGtnKbQHoHhFotAI4SIHsk1fqSQn8oUbpgQzzIZlVg91WaHRnrARCmAAdzGqqDdDaG2sgIvIManNBAvmewrrCgTxbjC80WgZ+HjgrcngMEYCUBVakugEh/Ap7AQQllHzOzklLDFZhD+BkHULwlHNIBHIIelOcJwFpASSyC7s/6fchsXgdnCDqGgJrKwgTEh6kDEbJUAQCKG6YlLAeUvROQb8kBeLwG2xYA11ITSDoJvO66lGfQ3zifwMcQ1Q92DDZJM9Yy6LQCnhSsXwdAZuWnvqBbeaRi8tofk8cKXGbgUOclDFZhTgMQ546hC7gBckaNR41+mZa2+o9/dhfbZQbpCS+v03iESTsAEdX6UKn8M2grnW6UY6cNh/kGWkhD0K4pIJTOICdZ9bSusKnpD5cjOPGAi8c91bxsBKyv7V0tC6YroJ2HNvq4OZGxAriCigJOWmDfbECJYgOpoRB0q1YvaI0/BbzMfDqRMehwtSKEl94FZ4hSCVRWHYHxqlHmD6w/UUNTP9MFAW3A2QVqAhtABAkEcNxOBhb88Xy6mdkhjAEQ1IC0rgQhXjMzq8LBQ+edD0K+IXC1UFAC510A0foCJE68UxAVhVronMJ6+N1sBR2rMP2JD84fHFoGnGcpRCBjaEV1KUDaAFapA5+DeCsEfOXU1AHZBeD70egLC8IIuxeCvSuHujY6FQgOgP6kDl1iS0+97NKgHvl6tRPtjS7o6YYcCni/LeiEJgiFB4LHVRCcXejn3Lou1LCt97c7fNRWFllhryJI8tHrMoP0F7w+m0EK/86jmJbdRIu7IwgqPWDIrY5AZA8sVQEgue6E+nd1gc4BnJsBjsAHgUMPUqvOCjIWgAvxwXhdB12uBSFLm7qQF2e6cU8ILd2kMpkOuYibAOArRNsp4USgDdm6MFcQlRtFVEDcZ2AcsFec/IoGUE+UIB7swwkIgFuD1mQCRLgVYCcn4wrEnTGUaexMsyTtPT3YckhhLDv6dSW0nPkbqgua9SHzmjpwj0Qa6IHTlAIZaKLvuA64NX+hHnl4RffDBej8VUHgNAGwL63LFFr8uokKyY6BNRsJpoCZm1ouHchEVsHhWgG3RgSZqxygAhVIyxebif4GKCCA72wZAK19AjKDtM2yrvuwBU5dDjY/AWK6akfndA4lRQ9yKB5o3lWgwyD39EwKlrqem1Ar3FCjRfKyjDmJLoSrB7o/dh47u9I4t//4v/3RZQbpf/Y1mDnmPlRiu7NWxyeoQ510qofMKThDJXjhIWAVih3g8ZgD8R+UtepKQGvDsWZ3HGgl3gFF84sXQIvpBghOfrlm1mZnipuYnoGKs6+btFFRqxSP9dBPARBVUBUFNMJSX99bA0gwo0jXwbipoasHjOUpSKsgKh2kVawGTpMD6axTkLVpqYVsNPW9LeD+3LH+bqsO9+zqvMSOGteM9AIINbCroKFpAI7oEMqMUG4Gzj/zHXofZgvocqqvdU2nSyUaC68qv8x6CQcZqML7gGBd7WpAMwf26qKth9ESbJGBCKh5pHuk8zrOAchFcTiAtBEsBjqJlPFJ2npYApTKBpCxqJzr756A89JvqCzLGLT/XCgLLgNILy41M7wCLiMDNvwUAuWiAJoEcPCXa/3dA3CuVm1gvofAp1yoM1SAtmCR6z2PoMV0G0qFbvWGjFUhi3YVJLYGwaM2MClSM/sj+dzj16WD9ITX6aJqzkMp5a2KLu7VTBdKBqABqga5cAjG0FqfADCyBIeLCCrBZzKnDt8XK6Pcwgf8Uqr4KsJITI+VXMxSXfDelh4S7aqWtVa55rBT6vghyuJC35GzC5pycDi1idUgA0FXKP30cj1Q15DiiqGnv6iC8QL9OAhwLe4Q5kodKR8wNVtL/d3JHtBWRJRx0LW7BTiiFfgzQQeEzgpgDgeiyBKMf1RqL3YOgrghCGKamTVB5688h83kHcpQt6lruqypw3AGZYVGCCz8QKJ2Bkj3FIgO276umYsEiE6hFFf3AUsJ2fGyBYEjlLAD4PGpRvq5C6BGbwH4ugLSNtNXdS/dBzqFCmQ2X50BDgaIJ5cAwq/D96UAanJiwBtB622w0UxpWqqj3AIZoBW0jtWBIK4DHWsTYHFdR7DugTm8B80ra5A2CKpqy68AR9br36R/G0GjSvfs0TW+2XDQ8/h16SA94RWtYnMeStkuQXwwi9UobUcaQR5Der6ogxbOuRq0ArJKQQNKYlfUAFVWUIPZAq6VGDADMz1Uqx3gCgHsUxvU1FehprMaoBtkxVfK0O5APczXvxFKD6uOjL1wR521LNWUbkFMe6AYns7UCO8A27QP/bYU9TqkFZeqoVrBIeuCLtwGeEamUFbJV+Ccemo2Bn3N/GVVXeObIwDIEjX0QO8v39b7cwo9ELZBgqHa1nmJa2pc43PNsKR9dpCqgb7j9PW6X0vQcnPA2VgW6mQedjXImQNPzmKq99hp6n4FvWlb7Os9hxngPaj6lUHAACXYOpBYBQ1goDflkSuhFfVGrnZi2FGH9+RU58/r6VzlAD1YAdVJAG2dK2iCcLu6jjLoZAiB2rzo6dqPeiAuOwZWbwCt5p4GET3Tfb2GNVSp6ry0Z7pv6sBhMIMsZAZdcd1Q7fZWovf3hqcVu1GFv+0t9H140aPPVgJWi65LB+kJr3QvfKSLrQnAviqkl/9/7V1pkJTV1T69vr13T890z8LADIiMiKQELBETMQaEGCtijKXlkqACmmhipRI1aIioSSRliGvQSqIOShJFfiQSJSiKKAGCSBhAQPYRBmaf6X1/+34//OjQcx50ZGAc8DxVVsKdt9/lnruce5bnJA18Ya9Ng8UZPNNQDQjMYoBPx8lZULPA0kTglOUHmTI5Px9UCaBI2Y3gtJ5H9ZR4BHApqN8wevh41nbmEL7gHtL59/pAll2rjb/zeaX8e6PAdH6gg/cfOlEFgI8/7ua/PeTlm501yheHADjJdYI6WD4QR2R08sXVqwOXLKjd5wekepkgd3nmADVBKWCAs4OadzHg8rCAoqC6zpWcEkSGCDKhwjEQwAuoLDQnV+qMDlRkg6gT1EjwAp4Xt5nfE3HxDAK1DmOAHdoELD4KuJczwFVjsHK5l6S4ctuV5Yq7VePza4iN93UCWF5tIB4lBNy8Xynj7jmXxr93eyc/0Fgb+eZbCbjRsjagkINU8zhQkEygaKwVxH8NKeW/rSBubXPV8LnpcPPrDECZJxdwC3bx9SQd4W37ARGa3etjbTFQUaG6mvdBhx/sIWmumFVpIPzCwmXuqeHf6wC7oRskRfkB54CjB6VMApEeA4iC1EeUBGrJeFSQc8bET4sWULE9GucCSpaFWJung2/cWSeYBCUg3b4TxAyV8ZNXHCglWjsfjBHgsvNU8wXXC3zPgOuNzD4fa3MBWl9/ltexaAYZSAmwWDcrsNDnuKtQdYKSH/kQa0vawYkUZaxk+MRVIN22EqSkp8OgNA2ILfIbgdsNrKMZB+98wwF+Ghvk4+/ssoPK52bezyYQCJrReVamEVhTXRlkYufP0BNclkbAj2oEh4AAYGYMARn5zNwdEQYZp0RENTpXFGNGfsgZFArx60C+c0uEj0GHgSuZGkgeSILsNEuYKwxnlvF5cxjIfZiTf5stBso/gOSLNOBkigCPrscCijKj9P3WvawtlOIKiBNYkLMgecUP6po1AZedzQgSAIAnKVLC11RnCZ9fuoXLLQ+IpEwmsD5led/HAE9bChxYE4r3aQmBd9b4GuOo5fdLZUGtTRD2YfPxA6YjWMvavF6+/5RnuNxK0iD2EZV0AhwhTUOKZZ4CyQoIoiD1EZo5R8ajuFN0jQeSZUG9LbeJ+1gTScB/UwV81IBp1QkY+PJ1wNWQB26tKL8uPJQvkN68j7e1c20dxcYY0MrSyTejWAWfVP9C1ca7QHxVdAdr8wFfewiQkPkdfLFO6nwSlYIMjo+ASycHgiXLwAkoaeAMi/ah/ERqioLisiA+xdLMlQiPGdA9gMxAdwhkTOpcQVdxlHrDA5Htfm45UVF+gNBB3F4yDszkFtDWDQJawXV6N3+XQBWfM8BrQeYQotfGC7G9mY+Pg6C7QoDQzw8oC7x2LvcsSH2yoBIYVXx5T3fyMTMixcdCxACsXll+vzaQDRkivrlVgkxPP5B7UxdXDprA+lQK4mpygBMhEhnG2jSNz+uAC8R16uDk4+R9X2biv00C62Q0y/eB7k6ebu9V/P3Sdj4P03beBykXH5ODuvg7Gyz8IFpq5Rb9TjD2A6XcS+IHsaNDBvH7GQZz61gOhA9YwOGDwmDcqxBrswGLrTdUvAZaE8eg7ugBUZD6iLTJScajXAQOnS/OCZBGb3LzE7Lbx8WRBxk0WeDzNgG2Xy3MF68uYMb3lHBlzdTNN2k/sIQZczzIeHfrQdaWKgWcHaAOQPZDkDLr4d/hA7xFCQ9XDjpB5o2xncsjHuGLjdnGF4L9HcDvD2Iz7CY+wdOHavh1bm6GzgNizAQ4MXvAqdwZ5haHlJVvHIkQ/63RAU7+IOYileQB8l4dZPIhJdZaC54B4rpA/AJiNciDU3kSBK8ac/y5mRAfpy2l/H7WBJcbEVG6i/eDBcSPmUGciRn0ayTAfxxxoAr1XPCeOKq9AbIwwbzpjvNDnQUcIvLAPeICGXBBYHXU3CBTMc/nHIHDRkUt+LYoVxjswG/pDnJFpRRUYO4G2VoOoCB5MmBuGvjhIBPka1sqxpUSI4hLSjr5c7NGPvhLW4G7LwU8E1YQuB3gls4IKBA4rBRYqTx8HARBtmsAJDE4dP6MWCu/nxGUQjFlwELr5AoXrC9j6jGurL2rdi4KUh+hHClS2lGDHMSAVIEg1KR3K2szAqbgfA4ExAHufcvHXGkKgdhmTwV/RkuODxaDiStISRBzAUIayIaUupgPtPEJXlrKF0ijG6Qmp/m7mBLgxAwy1kxBPiG7u/ipo0Tn/ZwFwX15UC8vD/iXXEEeRBqOcytGB2Dfc4JM/Q5AyJkDAe1RQDxZCsiRQsB3n8ryRd2WBKVfQKxMtgzQLhzmfeUA5ICdYKNMW/lccIFMuYQHbBI27qZVOld8ykBsQqQNL5NpkGhx0MI3skGovGAFqMUIvs/v5OtJBJRwiCISxySgDcjw59q8vP81M39GGpBbDjuTb1AlKf7bzv28X1qAAlICDIKxQyBmCGS7lfuAMglIA7MpPvZtoMqwDpigkyAzMA1qyeTC3DJk83ClLmjj8m0DmbIotT5lAW43UN0+kAUW/VaubFS6ACuszg9cZxI/FJcP5i5FLQr2gTAfB2UBrg3FgaXOeZi/s6MGhIIAS6LWI14zH8OJFz0hClIfEUwfJtNRPl6LgS9yiOsmHeJWm1Serw4BEB+QASbdtImfKt2DAJEdSHc2gxLwDlAp2gzInFPAzGu086A7B2D6NWa4Od0MrGg+K58YhghvO4Qq3rdxRSoLGK29Fq6BtAKF0A4K+3qd/DoDqKkRA7EoeQtvQ/w8aUAZ43Lz8dKY4uPPA+KrDiHySEAlkAsCGocyoOSAlcQOCOCSIL4qCuKDrICuIGfi49Rg50FI6SqQrtztY22aA5xIwZJYcibm0nW7z2JtZwCFErkzcmW1rC0OAuctNr6QlwCDWwrQE1AOBC2beFxYCsWUgQO26uCb+eEWPv+7QchWPgjcqIhcFFjWbCYfazOYUdFdbhGMgyC1FHE3rxlQsZCdWyI1xFcCvsMIYovCLXxMH7KD5AEF4kmjfGyAcDICRn6yOrnyYvGC+eXiikqVh1sXg+XcQmhRfBxoKX4AsZZwJdFi58H/ZAJUFkP5+qQD2oVaQNmQ7ZEkFAeHBwRRkPoIe95G5qPYd61Gbm7Ng5gNF+AK8fm50PKgZpYZ1OWyuIGiAugAIsCC5ADZVaj2T8zPJ70LkPdZ43wFzwDWbPIAvz8oixFLcLOsrZqfEuztvK9M4LSdBazZ0RRvs4KyAgQI4FoAS3AoxxdhYwgs/oARGCTPwfJn+m7+bYMquDKEKsdH0nzz7AYLfVkzf2cnqi3p5BtRO1AMfOB7/WA31qPAn+bnC/1Xz+Gn3hLPeaxtRyuXxw5wwq1OAbePG1XOJYrZ+MLucnHh2VDgsbmRPybKNQtLGiz2oH6XpnhfB/KAPwjUJjPFuGKRsfADnKeKrwm+Lj7/jVkuuxggCKyIAr4kE9+kfaCs0D6QTeoGZTscIOnDmOcbbTwJyhSl+PtlgLWNAH9QWgE+LSfgwwOuJAJZkxkQZhBz831F+UF1AqBw+UHZngmD+By2lXClWNNreRtxa7bu5+MgDciG0xbeCQ4CcX9A4R8EGMZjgFvKlip+l1xGgrT7BV2htiL3ZsDJd4AwSOHKAG4PlO1m8/BN2tXOF/YckKTDDIJugSVCbwJxOmVo5wYnmzifkCC2mbRqQMACqjdagXk0DhQQZyfg8QETzVbHF5buQyCgfR/fJMpMfIFsBzEmNpTkBPZ3RKFkAiz3IIwAJNsTgao2lAHrSghYi+wg3kjrnYgoBIiSzwBuizKnj7V1ARN71gay2ADxZBYELO9u4t8xrIK7N756WS1rq8nwE25syzLWNqocxyBV+/mGEk5zRWxViFttol18MOS8XLk1uPicc4DgfHcXX09SBq7A6Ypv0lYTyK5CtTIivF9NoHxGVuP94k4A13QZGHBggeoEiSWlYI4oH+C1AoVzE4CB2lrJx5YblO1pB0SnySiPRdN9PGHEbuQs3DUR/n6tUa6kZ4K8r+xt/P38aRDrVcHb6obwd/ECxnKTdRe/DiQJWKP8YJAFSpO5hL+LG5Qf8rTxfu4AcdVtoFZcTOP7qK9HIfM0PHJyiILUR6gY0dFZ8kZwYtFR4UdQXNYIgi9TbXxhSdm5Oyid5IuhBgKjrcCf73CAE9VBHi9DIT64PwaD0QFiFUzb+O1AYhY07WtgmEbASdgMFJo84FDxpvnEjaI4UGBNt4BuSYNsHBcIcnUEwOkd5OWfO4grG10pwEsFakUZfXwc2IHrBiQGkgW4I4LV/P3KrNz1MKZuOGurrOHP3bWJb8YfW7hikDbzgTCyCrAng022EgQT+w6AzSTHiUDbrXxuNbbiLLbQIT6oAxVcgz5f8/Hfgri6HOBQyyVBiR4wMNvSIOMSEEraQGwRZbm1yAcU3ggoNKqDArtGJ1dyUlneL50ocyzG+7rGAjT8Wr7u6Ck+9s0mPm+8gDMulUQbJn+XClDoOlMClD9AseI2ccVHB+ZiqxMocB2AZyjIvzcI/G4jXXxcmQFXmLON93MaWdE9IObKzcdVPAX4iEx8DidDfFFVQR7fp6f5OxtyG1mbNcFPcKEee0gClPVBEAWpj2gmKjKgoxM3Khaqt3IBuUEYAbAGU4uNDyi07nlAzJAZmfvBdagUVgLUmQOcixRHRQ9BkLEPtKX5YYzyqLosOEGCzHBKIlcN8N0T6HtUFgoWfgXK0DBQufvKq7/N2rKgJEHjx/zUVnEQVNqu48qf38XTmlft+y9r2x3hneUD7LqWHO/oMjtg/w3ycdWZ5u9XeTZXskdo3HUWyfCBYAfJDh3dgHyzmfPmpDVuITQBHp4M8U2n1AKsqUQUBa6ujzr44O8CltxAjt/TqvEjcqaDx3uE8nzigPq8BOiIKN0I2MiJPyMLSoiYAO9WtoXLM+NAVio+3sp2874CVWzICDL5EoDKwY1SnyyAud0KSn7k+LuYTSC7L9/I2uJ7QGIJeOdUmo8DHQT6O0DmSzXgXwsDYtfqbh5A7dXOZG1WkI0XT4CwAGDh7jgMssTMfDybXMBdDWhXDBGgUINAdRVCFRpAXT0QqE7eHsou4OpDMCiFqggKPguRSIS8Xi/RSAfRUaVG3Mh6AjJMskCGcUQ4COY8Ki8GqDOgIuACijNIwiJAyUIgI5VAUgdUXkDMMhlQICj6LUhnpxBoqwC0ykGgIHGDBYF9EX8vKuyO3hn8NljBFy/PCK6UeA6DQsFubrI3gGCgJMhwjOtcAza7QOIAUHyMwDJh6+RLhr+Em+xLQQ0tD8gW6gL3MxoACaY1xNpiKVCSABApWmy8LaEAIV8alEcwowFIFEX8UqAMuSXFB7oLKNDdLv6cQDcfcO5qPo4a45xP60ATn5wmI1c8o4D92wH2wBjgN8qG+eLhKeN9UJH2sbacgX/vIbBZEuBQcoEAaqOVT+wuJ7fUBTPgewHhZRQUxzSgCvU6H0euEr7ApxL8gKkjklQfayKHFVE78AXeVMXHuQZoDWqN/H7DQXyQbuPu5SSIwxqS4d9rAvFQncAiZQZWJc0JNLMQiLkCUzNZxTebZKT4t8lEhm6/cTGFw2HyeMCzjrzbMf8i6B2iiaINHNARUQZtqsjKAlw6SP8AxNeEjCwErDGg4kLvFR9goUGKlBFYhvLISoX6Bb0L2p9qQRuyFoF6uBDoXVCfImVyDNBEo7xjkh5usm/j2efkq+YnejvgHgkk+OLqa2xkbZoTmMdQUCpQ0N0Orrw4fLWsrd0IqCzS/LcGc4i1+fP8pH7YCII+Q/yEGwJWqjYPyIQEG0LCCjKcQHyPEU1WIsoBZulEhveDE/DBdILq52XEN6PyID8hrer4gLXFO8EJHig5CVDyp8PGN3g3N8IReYGynAdWxw4+3rLAFR+v5H1d7eOaI6p/mACLUR4sHjVRsCGDQ2I3yOCyx4AVQ+PxZDookqt38vmfBydWFKOthUGaPyAljQELjQlQJwyPcDej3+9jbSkNKHDdQOZgPW4BTN96B79Qr+Rz0xDjMvcrYDX0hVhbRQxkYO4B1tkhxXPYkhcepP6BlYo29Qyw3IFSU6QDi4oeAvdHEgLKBlRoUBIWslKh5yILErJSgcmS5/sYfD+wpxKolwoDnqEVCH2bD7QhJQfZUVEfAF4yigFz4HDeZgfksNEakJoc4Q/OgIBMAuUgkA47CJAulum8E7oAS3gamMQbu/lC6gGcR/vzvA8qrdzcHwHFOU1JQM8A6gOCxE+KprhCkwCulnAGkAOeBUrJfAzqmRCREeidFp1vlj7gbjGWcrci2LOoKdrAGwERpgUE77njfANt9vCxMAgsMtoZwJoIAoW9ICOUQOWAbg/va1s3XzxCWa6sRYB7yR/kv02H+GaZBItq3glihlL8ulbQp0YD+A5QziiDuIwy/DvMeb4RZEGqZxL49gMZUC4IZGZ2gUQfZ4jPER0cuNqB9a6kkbcpJwjw9oEyVDYffy5gcu8CcZNahP824gD9rPFxr5qK56BCoRIAoiD1FWkqVhKAggQ4vgiEAhGgjSGKgtB9kNUBrR3IcghiFUAZJwLzG48WtCMjFnfwfqCQPXZXof5DcUToO4AVDb4fuh9SMFGsUog3GRt4WzfgYTMaQX0rEIMADD4EPLc0yMYX8CbgIoqDhSoPFutwK0iZ9XMNvaUV0AaAyt3hFm6a8Ob5dUawWDuACzCQC7E2Myh7kOrkwqwGfZUCTORRt4+1ERHZcoCXxQTahvAT8jg732Te2Pkf1pZt4f1gN3CzqNcE4rjAJuNJ8b4JAa6rZCefiOWd/F0O5LiiHQR1+WzAwpADqeb2Uq5E5HRgjf0YpJ9n+YRoBuzanhA/hdlB8LVR4wsyKs6dBXXcFKieYE3w+mc5kBlMwAIfCHMlezDgvrOm+KHJkOU3jJaA9Pgy3veu8H7Wpp/Bn5vNgXqeZfxgkQBcWgY/sCrl+HxtMvO28iwoXQJqLFpUsdKeBNmcCKIg9RUOwm6ho5BHGzw6lAIXG4HFHlpAgDkdorfWJ/RNyEKDFLPeJQhg5QUpcOC5RjByYT8j9xxSpEDfA4JnsoC+z6CwCSCPNDeAkBEQwIEwGDIAIkuvn7vs8iAGyRUBVBEpUCQTZDh2D+YJASkT32BsLuAyAu4IO6iuHm7mG28gzxd6VEhWs/BOdQ7hJwNAnk6HzVxwFrCRVxp5ejsRUdYMFnsg5BpAkrp073rWFgPmUw+IxYqD2lWHTVxOdhCYmAV8IJZOECOF6ouBzDYjWJ+iiOEZZawC5nvVCVx2qB4dqKcWAW4ZLcXngw7coGkzcBE5uTUrD+LJPKBorBfUnOm28cVXMwCrUo5v+lYDV0AMYOFxa3zsl+n8JBrRed/nOoGbHBwc/V38GQ6QeZtrAe75Uj6vUQmRDCiI60nzdSKZ5GugBWTP9bT2GpW42PoHGSrehFFGMNr0kUsHcOxARQopKsjyglxEaFwgxQwpEQjI7dbLmKtejz5El4J+i8ymSFlD1jvkKgTfAbwHRCDgFoTGQBkhVzigXyGK8Q7MgXEQDfMBY03wF0yDk7WhnGsRBsB54vgIbNoOHjdF2/jHfVzDO9AC2mxAbrYQiCNKAloNkBHgBIzFQ+Lc3Ucgbb1iCC5secYwvvE0bOcbwOZ1/BR+QOepyHmgAObM/H5JQLqaioNEEFQ5AhScdSDSUGDFDGf5xDGBUhkWkJZvBmbWJCipo4GyOBEwvzIW/s6mEPBhAxLHshiaYHzA+bv4OEqV8v5zR/j9un2gQn2Uy8ji4s8YgyzXIDOQMoDwNs3j2OKg5L0hDGKLkMsZREnZfKAqwkG+4OkgRkp18uviwDKZAHFnJSAEIJTi94tZgEWvB9lwGgTlI4iC1Fe0UPGGiywWaFNFikUjaINRfKANTSpkaULKFVIikCKFFDiwJsG23gL1FVKu0PuhfkFAv/WBNmTh4p6MXmexaSC4Pg/61AM2hBgI0k6DjBqfFbgPAJeWAgG3iHQtt4UrSAkQFKaByFe9kr+zxg/lZG/nz424QB08QDZIoO5appMHwcWJp1PvoO2sbVQNt9QNrbqAP5eI/vkGtywd1gEFB0iZ91kaWVsuygdmEpDodZcAZQjw0IC4dJg5SjYuz26wM9SAtai9GTwXHOoyOX5KVC6+aCk7sGYBK1USBEHbwZqaBgpczMQVXmBkpTTxwQrCeShU7uO/BTcMg1i0IFDmD2jAygcqU+sgMN8GuF1KQBka4AkmYxvv+0Pt/PTsBWZ0j5mvRUkQF5AwAplrfI0xgDiIfaAcjBkQ3iIXaloV91U6LRak/kHP9Rn1KLLG9DJIDFqawCYDgSwlvVVekLKBlJcTDaQQ9k7Zh/Ff0FXIQwGw3FC2YC+VIaQoI04RkEhFgKaFgJuekkO4kEBICO0NA+tOKyjiiUrRG0EdsTDXHFULH+SonppLAxH3RvAdoGYbCEWhJNh4rRrXqH0653FwIgJSA3/uv95dxR9MRKEwn0zI9WsyA/4WoIySxhftPOB5sYQBOSsouaChomiArT8L2LqD4F0y4HY6WJ8SQMQGKwicD/MFKpvg/Y8KuvozvO8zWR9ry4NK9kiBq9LA4AJm4AgIWnaGeSeMBwVdcyBbxwIyHN2Ai0WFuevW5ADJDcAllgFxdc4IX8wzdj5OQV4EhYHFh8C8Nrr53DSAWpFRoKzYXWAxB3VE88DcnkU8V5bieLI0Ig4EEAXpRKN3imnvXVi9vR9Cbxmu0ChAikB/ALn7eukSg32KrkMWqV5SGMAgbaBXIK6y8iRfgIItfKJmgJUvBZRiLcQX/wRIoyewwWQrwS7WzHcOl4VvEglQGiQJYoZIA4Izcw0YxOpSvgyYzlP8nQ0eIMw414AHlYVYW0WeC/jwTmDN6kkydwQgngrFdqV03v8Z4OLM6WAggeDyvI9Pzi4QU6JZ+Hgz54ElEqwTAXA4iMZ4o8nEf2xK84mYcfCFDMQxUwWYdO1xYE108L43OgF3FqjP5kmC+oxNvE91LwiCtnAT8jB7iLXlQBWD6lZw+uvi15mS3KKSMvFxnikBbkbg7iMwNnSQoZfIItctt34OL+NKXUTj32bQAU8YiPWy2IGbHNRvsgP3cBKcmjJeEF+ZLV6fjMCVjiAK0nHitOLX7IsS1h+AlVp7+Vv0bSHQBsJMzKN5mxPs+chtkQD3M3bzhTkJFLM4MH+DotWUBWthphxk2aR5ZxlAMLLqAnXwDIB4zsnvl6oGm3s7qNkEFDgjCLRLgcw2u5l3qhOkilfp/Lk1dh9r27EDyMPLNXRTFjNpm8FG2+Tgu345SA1PA46AjIVrwZYcUEBA5GwWpGHaQEYG4OQjK2hrAwdst403WpBbCyg+KUD94de5PNtAhq6GMqRAvyTdgDixlG/cySRXNqwWMM6BxaLCyE20JjtXmrxZbqHtNHD5JkGpIQIFisuB2y3SwRcAd4b7591GPqbTSX6dArxUJaDiw0EwTk0xPg/zVh9rqwSn2LiFZwtGzHzApBwg2QSk/EVACEC2h5U6/f+W2c/ax4VJ+zjR1NREgwdzM6pAIBAIBIKBj4MHD1J1NYq5+ASiIB0n8vk8HT58mNxuNxkMyAd06iISidDgwYPp4MGDn0rDLugfiDwGFkQeAw8ik4GFgS4PpRRFo1GqqqoiI8pm+H+Ii+04YTQaP1XzPB3g8XgG5OD+skLkMbAg8hh4EJkMLAxkeXi9KKC0GMdWnQQCgUAgEAi+pBAFSSAQCAQCgaAHREESMGiaRnPnziUNcYMI+h0ij4EFkcfAg8hkYOF0kYcEaQsEAoFAIBD0gFiQBAKBQCAQCHpAFCSBQCAQCASCHhAFSSAQCAQCgaAHREESCAQCgUAg6AFRkE5BPPPMM/SVr3ylQMI1YcIE+te//lV0zbp16+gb3/gGOZ1O8ng8NHHiREomP6kKumrVKjIYDPC/DRs2sOft2bOH3G43+Xw+9rclS5bQWWedRTabjUaPHk3Lli0r+rtSiu6//36qrKwku91OkydPpt27d5+4zhgA6C95KKVo/vz5NGLECNI0jQYNGkS/+c1vip6zatUqGjt2LGmaRsOHD6eFCxey912wYAHV1taSzWaj8ePH0/vvv3/iO+ULRH/J44033qALLriA3G43BQIB+u53v0uNjY1FzxF59F0eRES7du2iadOmUVlZGXk8Hvra175G77zzTtE9Dhw4QJdffjk5HA4KBoN09913Uy5XXJtQ5PEJ+kMmmzdvpuuuu44GDx5MdrudRo4cSU888QR7lwEtEyU45bB06VL1+uuvq127dqmdO3eq++67T1ksFvXhhx8qpZRau3at8ng8at68eerDDz9UH330kVq8eLFKpVJKKaXS6bRqbm4u+m/mzJlq6NChKp/PFz0rk8mo8847T1122WXK6/UW/W3NmjXKZDKpRx55RG3fvl3NmTNHWSwWtXXr1sI1v/3tb5XX61X/+Mc/1ObNm9UVV1yhhg4dqpLJ5MntpH5Ef8njxz/+saqrq1Ovvvqq2rdvn/rggw/Um2++Wfj7vn37lMPhUD/96U/V9u3b1VNPPaVMJpNavnx54ZqXX35ZWa1W9fzzz6tt27apWbNmKZ/Pp1pbW/upt04++kMe+/btU5qmqXvvvVft2bNHbdy4UU2cOFGNGTOm8B4ij0/QV3kopdSZZ56pvvWtb6nNmzerXbt2qdtvv105HA7V3NyslFIql8upc845R02ePFlt2rRJLVu2TJWVlal77723cA+Rx//QHzJ57rnn1J133qlWrVql9u7dqxYtWqTsdrt66qmnCvcY6DIRBek0QUlJiXr22WeVUkqNHz9ezZkzp9e/zWQyKhAIqIceeoj97Z577lE33nijqq+vZwrSNddcoy6//PKitvHjx6vbbrtNKaVUPp9XFRUV6ne/+13h76FQSGmapl566aVev9+piBMtj+3btyuz2aw++uijY/7unnvuUaNGjSpqu/baa9XUqVML/z7//PPVHXfcUfi3ruuqqqpKzZs3r9fvdyriRMtjyZIlymw2K13XC21Lly5VBoNBZTIZpZTI49PweeTR3t6uiEi99957hbZIJKKISK1YsUIppdSyZcuU0WhULS0thWueeeYZ5fF4VDqdVkqJPD4LJ1omCLfffru65JJLCv8e6DIRF9spDl3X6eWXX6Z4PE4TJkygtrY2Wr9+PQWDQbrwwgupvLycLr74Yvr3v/99zHssXbqUOjs76eabby5qX7lyJS1ZsoQWLFgAf7du3TqaPHlyUdvUqVNp3bp1RES0f/9+amlpKbrG6/XS+PHjC9ecbjhZ8vjnP/9Jw4YNo9dee42GDh1KtbW1NHPmTOrq6ipc81nyyGQytHHjxqJrjEYjTZ48WeTxOeUxbtw4MhqNVF9fT7quUzgcpkWLFtHkyZPJYrEQkcgD4XjkUVpaSnV1dfTiiy9SPB6nXC5Hf/zjHykYDNK4ceOI6JO+Hj16NJWXlxd+N3XqVIpEIrRt27bCNSIPjpMlE4RwOEx+v7/w74EuE1GQTlFs3bqVXC4XaZpGP/jBD+jvf/87nX322bRv3z4iInrggQdo1qxZtHz5cho7dixNmjTpmLE/zz33HE2dOrWo+G5nZyfddNNNtHDhwmMWG2xpaSlakIiIysvLqaWlpfD3I23HuuZ0wcmWx759++jjjz+mJUuW0IsvvkgLFy6kjRs30tVXX1245ljyiEQilEwmqaOjg3RdF3lQ3+UxdOhQevPNN+m+++4jTdPI5/NRU1MTvfLKK4VrRB7/Q1/kYTAY6K233qJNmzaR2+0mm81Gjz76KC1fvpxKSkqI6Nh9feRvn3bNl1EeRCdfJj2xdu1aWrx4Md16662FtoEuE1GQTlHU1dVRQ0MDrV+/nn74wx/S9OnTafv27ZTP54mI6LbbbqObb76ZxowZQ4899hjV1dXR888/z+7T1NREb7zxBs2YMaOofdasWXT99dfTxIkT++V7TnWcbHnk83lKp9P04osv0kUXXURf//rX6bnnnqN33nmHdu7c2S/feCrhZMujpaWFZs2aRdOnT6cNGzbQu+++S1arla6++mpSUpyAoS/yUErRHXfcQcFgkFavXk3vv/8+XXnllfTtb3+bmpubv8jPOqXRnzL58MMPadq0aTR37lyaMmVKv35nX2D+ol9AcHywWq00fPhwIvrE3L9hwwZ64oknaPbs2UREdPbZZxddP3LkSDpw4AC7T319PZWWltIVV1xR1L5y5UpaunQpzZ8/n4g+mRD5fJ7MZjP96U9/oltuuYUqKiqotbW16Hetra1UUVFBRFT439bWVqqsrCy65txzz+3D1w88nGx5VFZWktlsphEjRhTdg+iT7J26urpjysPj8ZDdbieTyUQmk+lTZXa64GTLY8GCBeT1eumRRx4ptP3lL3+hwYMH0/r16+mCCy4QeRyFvshj5cqV9Nprr1F3d3fBmv3000/TihUr6IUXXqDZs2dTRUUFy2w60q9Hr0cij//hZMvkCLZv306TJk2iW2+9lebMmVN0z4EuE7EgnSY4YmGora2lqqoqZlXYtWsX1dTUFLUppai+vp6+//3vF+ImjmDdunXU0NBQ+O+hhx4it9tNDQ0N9J3vfIeIiCZMmEBvv/120e9WrFhBEyZMIKJP3BAVFRVF10QiEVq/fn3hmtMVJ1oeX/3qVymXy9HevXuL7kFEhft8ljysViuNGzeu6Jp8Pk9vv/22yONzyiORSJDRWLx8mkymwrOIRB6fhs8jj0QiQUTE+ttoNBb19datW6mtra3w9xUrVpDH4yls9CKPT8eJlgkR0bZt2+iSSy6h6dOnM0oSolNAJic9DFxwwjF79mz17rvvqv3796stW7ao2bNnK4PBUEj5fuyxx5TH41FLlixRu3fvVnPmzFE2m03t2bOn6D5vvfWWIiK1Y8eOz3wmymJbs2aNMpvNav78+WrHjh1q7ty5MM3f5/OpV199VW3ZskVNmzbttEvz7w956Lquxo4dqyZOnKj++9//qg8++ECNHz9eXXrppYVrjqTM3n333WrHjh1qwYIFMGVW0zS1cOFCtX37dnXrrbcqn89XlP1zqqM/5PH2228rg8GgHnzwQbVr1y61ceNGNXXqVFVTU6MSiYRSSuRxBH2VR3t7uyotLVVXXXWVamhoUDt37lR33XWXslgsqqGhQSn1vzT/KVOmqIaGBrV8+XIVCARgmv+XXR5K9Y9Mtm7dqgKBgLrxxhuLKDPa2toK7zHQZSIK0imIW265RdXU1Cir1aoCgYCaNGlSER+OUkrNmzdPVVdXK4fDoSZMmKBWr17N7nPdddepCy+8sFfPRAqSUkq98sorasSIEcpqtapRo0ap119/vejv+Xxe/fKXv1Tl5eVK0zQ1adIktXPnzt5/7CmA/pLHoUOH1FVXXaVcLpcqLy9XN910k+rs7Cy65p133lHnnnuuslqtatiwYaq+vp7d56mnnlJDhgxRVqtVnX/++eo///nP8X34AEV/yeOll15SY8aMUU6nUwUCAXXFFVcwZUrkcWLksWHDBjVlyhTl9/uV2+1WF1xwgVq2bFnRNY2Njeqyyy5TdrtdlZWVqZ/97Gcqm80WXSPy+AT9IZO5c+cqImL/1dTUFN1nIMvEoJREFAoEAoFAIBAcDYlBEggEAoFAIOgBUZAEAoFAIBAIekAUJIFAIBAIBIIeEAVJIBAIBAKBoAdEQRIIBAKBQCDoAVGQBAKBQCAQCHpAFCSBQCAQCASCHhAFSSAQnBZobGwkg8FABoPhlKz1d+TdfT7fF/0qAoGAREESCASnGd566y1W3+lk4KabbqIrr7zyhN2vubmZHn/88RN2P4FA0DeIgiQQCE4rlJaWUmlp6Rf9GgVks9leXVdRUUFer/ckv41AIOgtREESCAQDDu3t7VRRUUEPP/xwoW3t2rVktVo/t3XoiKXn4YcfpvLycvL5fPTQQw9RLpeju+++m/x+P1VXV1N9fX3R7w4ePEjXXHMN+Xw+8vv9NG3aNGpsbCQiogceeIBeeOEFevXVVwuusVWrVhXcfIsXL6aLL76YbDYb/fWvfyUiomeffZZGjhxJNpuNzjrrLHr66af71kkCgeCkwvxFv4BAIBD0RCAQoOeff56uvPJKmjJlCtXV1dH3vvc9+tGPfkSTJk363PdbuXIlVVdX03vvvUdr1qyhGTNm0Nq1a2nixIm0fv16Wrx4Md1222106aWXUnV1NWWzWZo6dSpNmDCBVq9eTWazmX7961/TN7/5TdqyZQvdddddtGPHDopEIgXFyu/30+HDh4mIaPbs2fT73/+exowZU1CS7r//fvrDH/5AY8aMoU2bNtGsWbPI6XTS9OnTT2jfCQSCE4R+KYkrEAgEx4Hbb79djRgxQl1//fVq9OjRKpVKHfPa/fv3KyJSmzZtKmqfPn26qqmpUbquF9rq6urURRddVPh3LpdTTqdTvfTSS0oppRYtWqTq6upUPp8vXJNOp5XdbldvvPFG4b7Tpk2D7/D4448XtZ9xxhnqb3/7W1Hbr371KzVhwoSitvr6euX1eo/5jQKBoP8gFiSBQDBgMX/+fDrnnHNoyZIltHHjRtI07bjuM2rUKDIa/xdRUF5eTuecc07h3yaTiUpLS6mtrY2IiDZv3kx79uwht9tddJ9UKkV79+79zOedd955hf8fj8dp7969NGPGDJo1a1ahPZfLScyRQDCAIQqSQCAYsNi7dy8dPnyY8vk8NTY20ujRo4/rPhaLpejfBoMBtuXzeSIiisViNG7cuEL80NEIBAKf+Tyn01n4/7FYjIiI/vznP9P48eOLrjOZTL37AIFA0O8QBUkgEAxIZDIZuvHGG+naa6+luro6mjlzJm3dupWCweBJf/bYsWNp8eLFFAwGyePxwGusVivpuv6Z9yovL6eqqirat28f3XDDDSf6VQUCwUmCZLEJBIIBiV/84hcUDofpySefpJ///Oc0YsQIuuWWW/rl2TfccAOVlZXRtGnTaPXq1bR//35atWoV3XnnndTU1ERERLW1tbRlyxbauXMndXR0fGo6/4MPPkjz5s2jJ598knbt2kVbt26l+vp6evTRR/vlewQCweeHKEgCgWDAYdWqVfT444/TokWLyOPxkNFopEWLFtHq1avpmWeeOenPdzgc9N5779GQIUPoqquuopEjR9KMGTMolUoVLEqzZs2iuro6Ou+88ygQCNCaNWuOeb+ZM2fSs88+S/X19TR69Gi6+OKLaeHChTR06NCT/i0CgeD4YFBKqS/6JQQCgaCvaGxspKFDh9KmTZtOyVIjREQLFy6kn/zkJxQKhb7oVxEIvvSQGCSBQHBa4cILL6Rzzz2X1q5d+0W/yueCy+WiXC5HNpvti34VgUBAoiAJBILTBNXV1bR7924iouOmA/gi0dDQQESS2SYQDBSIi00gEAgEAoGgByRIWyAQCAQCgaAHREESCAQCgUAg6AFRkAQCgUAgEAh6QBQkgUAgEAgEgh4QBUkgEAgEAoGgB0RBEggEAoFAIOgBUZAEAoFAIBAIekAUJIFAIBAIBIIeEAVJIBAIBAKBoAf+DwKghJ1mJDVCAAAAAElFTkSuQmCC", + "text/plain": [ + "
    " + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "client = Client.open(\"https://earth-search.aws.element84.com/v1\")\n", + "collection = \"sentinel-2-l2a\"\n", + "tas_bbox = [1.045, 52.5, 1.055, 52.6]\n", + "search = client.search(collections=[collection], bbox=tas_bbox, datetime=\"2023-10\")\n", + "\n", + "data = load(search.items(), bbox=tas_bbox, groupby=\"solar_day\", chunks={})\n", + "data[[\"red\", \"green\", \"blue\"]].isel(time=2).to_array().plot.imshow(robust=True)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/SciRAGQ5KM.py b/SciRAGQ5KM.py new file mode 100644 index 000000000..b5ea00e10 --- /dev/null +++ b/SciRAGQ5KM.py @@ -0,0 +1,58 @@ +import os +from ctransformers import AutoModelForCausalLM +# Requires SCIPHI_API_KEY in the environment +from agent_search import SciPhi + +def initialise(): + SCIPHI_API_KEY = "528d08dc3ed417f32954509131952c5a" + sciphi_api_key = os.environ("SCI_PHI_API_KEY") + +'''def get_chat_completion( + self, conversation: list[dict], generation_config: GenerationConfig +) -> str: + self._check_stop_token(generation_config.stop_token) + prompt = "" + added_system_prompt = False + for message in conversation: + if message["role"] == "system": + prompt += f"### System:\n{SciPhiLLMInterface.ALPACA_CHAT_SYSTEM_PROMPT}. Further, the assistant is given the following additional instructions - {message['content']}\n\n" + added_system_prompt = True + elif message["role"] == "user": + last_user_message = message["content"] + prompt += f"### Instruction:\n{last_user_message}\n\n" + elif message["role"] == "assistant": + prompt += f"### Response:\n{message['content']}\n\n" + + if not added_system_prompt: + prompt = f"### System:\n{SciPhiLLMInterface.ALPACA_CHAT_SYSTEM_PROMPT}.\n\n{prompt}" + + context = self.rag_interface.get_contexts([last_user_message])[0] + prompt += f"### Response:\n{SciPhiFormatter.RETRIEVAL_TOKEN} {SciPhiFormatter.INIT_PARAGRAPH_TOKEN}{context}{SciPhiFormatter.END_PARAGRAPH_TOKEN}" + latest_completion = self.model.get_instruct_completion( + prompt, generation_config + ).strip() + + return SciPhiFormatter.remove_cruft(latest_completion) +''' +def perform_search(client): + # Perform a search + search_response = client.search(query='Quantum Field Theory', search_provider='agent-search') + print(search_response) + # example: [{ 'score': '.89', 'url': 'https://...', 'metadata': {...} } + + # Generate a RAG response + rag_response = client.get_search_rag_response(query='latest news', search_provider='bing', llm_model='SciPhi/Sensei-7B-V1') + print(rag_response) + # example: { 'response': '...', 'other_queries': '...', 'search_results': '...' } + + +if __name__ == "__main__": + + initialise() + + # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system. + llm = AutoModelForCausalLM.from_pretrained("models/", model_file="sciphi-self-rag-mistral-7b-32k.Q5_K_M.gguf", model_type="mistral", gpu_layers=50) + + print(llm("In 2024 AI is going to")) + + perform_search(client) diff --git a/ServerTest2.py b/ServerTest2.py new file mode 100644 index 000000000..d315ea500 --- /dev/null +++ b/ServerTest2.py @@ -0,0 +1,58 @@ +import threading +import queue +import requests + +def print_dict(data): + if isinstance(data, dict): + print_dict(data) + elif isinstance(data, list): + for entry in data: + print_dict(entry) + elif data == "content": + print(f"Key: {data:>30}: {data['content']}") + return + +def producer(list): + # Generate test requests and add them to the queue + for i in range(10): # Adjust for desired load + request_data = f"What is the capital of {list[i % len(list)]}?" + print(f"Request: {request_data}") + requests_queue.put(request_data) + +def consumer(): + while True: + try: + request_data = requests_queue.get() + print(f"Processing {request_data}") + response = requests.post("http://localhost:8080", data=request_data) + print_dict(response.text) + except Exception as e: + print(f"Exception {e}\n") + continue + finally: + requests_queue.task_done() + +# Define your test request data +requests_queue = queue.Queue() + +# number of threads +num_threads = 5 + +# some text data +country_list = ["France", "Germany", "China", "USA", "Italy", "India", + "Ukraine", "Japan", "Australia", "New Zealand", "Indonesia", "Nigeria", "Saudi Arabia", "Israel", "Egypt", "Kenya", "Chile", "Mexico", "Canada"] + +# Create producer and consumer threads +producer_thread = threading.Thread(target=producer, args = (country_list,)) +consumer_threads = [threading.Thread(target=consumer) for _ in range(num_threads)] # Adjust thread count + +# Start threads and monitor resources +producer_thread.start() +for thread in consumer_threads: + thread.start() + +producer_thread.join() +for thread in consumer_threads: + thread.join() + +print("Stress test completed!") diff --git a/StockMarketPred.py b/StockMarketPred.py new file mode 100644 index 000000000..126ed5e92 --- /dev/null +++ b/StockMarketPred.py @@ -0,0 +1,40 @@ +# stock market predictions + +import numpy as np +import pandas as pd +from sklearn import preprocessing +from sklearn.model_selection import train_test_split +from sklearn.linear_model import LinearRegression + +def prepare_data(df,forecast_col,forecast_out,test_size): + label = df[forecast_col].shift(-forecast_out) #creating new column called label with the last 5 rows are nan + X = np.array(df[[forecast_col]]) #creating the feature array + X = preprocessing.scale(X) #processing the feature array + X_lately = X[-forecast_out:] #creating the column i want to use later in the predicting method + X = X[:-forecast_out] # X that will contain the training and testing + label.dropna(inplace=True) #dropping na values + y = np.array(label) # assigning Y + X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=test_size, random_state=0) #cross validation + + response = [X_train,X_test , Y_train, Y_test , X_lately] + return response + +df = pd.read_csv("prices.csv") +df = df[df.symbol == "GOOG"] + +forecast_col = 'close' +forecast_out = 5 +test_size = 0.2 + +X_train, X_test, Y_train, Y_test , X_lately =prepare_data(df,forecast_col,forecast_out,test_size); #calling the method were the cross validation and data preperation is in +learner = LinearRegression() #initializing linear regression model + +learner.fit(X_train,Y_train) #training the linear regression model + +score=learner.score(X_test,Y_test)#testing the linear regression model +forecast= learner.predict(X_lately) #set that will contain the forecasted data +response={}#creting json object +response['test_score']=score +response['forecast_set']=forecast + +print(response) diff --git a/awq-py/README.md b/awq-py/README.md index 59354f4e3..16e68d027 100644 --- a/awq-py/README.md +++ b/awq-py/README.md @@ -43,7 +43,7 @@ Example for llama model # For llama7b and llama2 models python convert.py models/llama-7b/ --awq-path awq_cache/llama-7b-w4-g128.pt --outfile models/llama_7b_fp16.gguf # For mistral and mpt models -python convert-hf-to-gguf.py models/mpt-7b/ --awq-path awq_cache/llama-7b-w4-g128.pt --outfile models/mpt_7b_fp16.gguf +python convert-hf-to-gguf.py models/mpt-7b/ --awq-path awq_cache/mpt-7b-w4-g128.pt --outfile models/mpt_7b_fp16.gguf ``` ## Quantize diff --git a/ci/README.md b/ci/README.md index 65cfe63eb..406470519 100644 --- a/ci/README.md +++ b/ci/README.md @@ -22,4 +22,8 @@ bash ./ci/run.sh ./tmp/results ./tmp/mnt # with CUDA support GG_BUILD_CUDA=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt + +# with SYCL support +source /opt/intel/oneapi/setvars.sh +GG_BUILD_SYCL=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt ``` diff --git a/ci/run.sh b/ci/run.sh index 47a254f4c..979b4a793 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -10,6 +10,9 @@ # # with CUDA support # GG_BUILD_CUDA=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt # +# # with SYCL support +# GG_BUILD_SYCL=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt +# if [ -z "$2" ]; then echo "usage: $0 " @@ -22,9 +25,9 @@ mkdir -p "$2" OUT=$(realpath "$1") MNT=$(realpath "$2") -rm -v $OUT/*.log -rm -v $OUT/*.exit -rm -v $OUT/*.md +rm -f "$OUT/*.log" +rm -f "$OUT/*.exit" +rm -f "$OUT/*.md" sd=`dirname $0` cd $sd/../ @@ -36,6 +39,18 @@ if [ ! -z ${GG_BUILD_METAL} ]; then CMAKE_EXTRA="${CMAKE_EXTRA} -DLLAMA_METAL_SHADER_DEBUG=ON" fi +if [ ! -z ${GG_BUILD_CUDA} ]; then + CMAKE_EXTRA="${CMAKE_EXTRA} -DLLAMA_CUBLAS=1" +fi + +if [ ! -z ${GG_BUILD_SYCL} ]; then + if [ -z ${ONEAPI_ROOT} ]; then + echo "Not detected ONEAPI_ROOT, please install oneAPI base toolkit and enable it by:\n source /opt/intel/oneapi/setvars.sh" + exit 1 + fi + + CMAKE_EXTRA="${CMAKE_EXTRA} -DLLAMA_SYCL=1 DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_SYCL_F16=ON" +fi ## helpers # download a file if it does not exist or if it is outdated @@ -90,7 +105,7 @@ function gg_run_ctest_debug { (time cmake -DCMAKE_BUILD_TYPE=Debug ${CMAKE_EXTRA} .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log (time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log - (time ctest --output-on-failure -E test-opt ) 2>&1 | tee -a $OUT/${ci}-ctest.log + (time ctest --output-on-failure -L main -E test-opt ) 2>&1 | tee -a $OUT/${ci}-ctest.log set +e } @@ -119,9 +134,9 @@ function gg_run_ctest_release { (time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log if [ -z ${GG_BUILD_LOW_PERF} ]; then - (time ctest --output-on-failure ) 2>&1 | tee -a $OUT/${ci}-ctest.log + (time ctest --output-on-failure -L main ) 2>&1 | tee -a $OUT/${ci}-ctest.log else - (time ctest --output-on-failure -E test-opt ) 2>&1 | tee -a $OUT/${ci}-ctest.log + (time ctest --output-on-failure -L main -E test-opt ) 2>&1 | tee -a $OUT/${ci}-ctest.log fi set +e @@ -137,6 +152,61 @@ function gg_sum_ctest_release { gg_printf '```\n' } +function gg_get_model { + local gguf_3b="$MNT/models/open-llama/3B-v2/ggml-model-f16.gguf" + local gguf_7b="$MNT/models/open-llama/7B-v2/ggml-model-f16.gguf" + if [[ -s $gguf_3b ]]; then + echo -n "$gguf_3b" + elif [[ -s $gguf_7b ]]; then + echo -n "$gguf_7b" + else + echo >&2 "No model found. Can't run gg_run_ctest_with_model." + exit 1 + fi +} + +function gg_run_ctest_with_model_debug { + cd ${SRC} + + local model; model=$(gg_get_model) + cd build-ci-debug + set -e + (LLAMACPP_TEST_MODELFILE="$model" time ctest --output-on-failure -L model) 2>&1 | tee -a $OUT/${ci}-ctest.log + set +e + cd .. +} + +function gg_run_ctest_with_model_release { + cd ${SRC} + + local model; model=$(gg_get_model) + cd build-ci-release + set -e + (LLAMACPP_TEST_MODELFILE="$model" time ctest --output-on-failure -L model) 2>&1 | tee -a $OUT/${ci}-ctest.log + set +e + cd .. +} + +function gg_sum_ctest_with_model_debug { + gg_printf '### %s\n\n' "${ci}" + + gg_printf 'Runs ctest with model files in debug mode\n' + gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)" + gg_printf '```\n' + gg_printf '%s\n' "$(cat $OUT/${ci}-ctest.log)" + gg_printf '```\n' +} + +function gg_sum_ctest_with_model_release { + gg_printf '### %s\n\n' "${ci}" + + gg_printf 'Runs ctest with model files in release mode\n' + gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)" + gg_printf '```\n' + gg_printf '%s\n' "$(cat $OUT/${ci}-ctest.log)" + gg_printf '```\n' +} + # open_llama_3b_v2 function gg_run_open_llama_3b_v2 { @@ -160,8 +230,8 @@ function gg_run_open_llama_3b_v2 { set -e - (time cmake -DCMAKE_BUILD_TYPE=Release -DLLAMA_QKK_64=1 .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log - (time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log + (time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} -DLLAMA_QKK_64=1 .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log + (time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log python3 ../convert.py ${path_models} @@ -214,6 +284,8 @@ function gg_run_open_llama_3b_v2 { (time ./bin/perplexity --model ${model_q5_k} -f ${wiki_test_60} -c 128 -b 128 --chunks 2 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log (time ./bin/perplexity --model ${model_q6_k} -f ${wiki_test_60} -c 128 -b 128 --chunks 2 ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log + (time ./bin/imatrix --model ${model_f16} -f ${wiki_test_60} -c 128 -b 128 --chunks 2 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log + (time ./bin/save-load-state --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log function check_ppl { @@ -241,6 +313,8 @@ function gg_run_open_llama_3b_v2 { check_ppl "q5_k" "$(cat $OUT/${ci}-tg-q5_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log check_ppl "q6_k" "$(cat $OUT/${ci}-tg-q6_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log + cat $OUT/${ci}-imatrix.log | grep "Final" >> $OUT/${ci}-imatrix-sum.log + # lora function compare_ppl { qnt="$1" @@ -282,7 +356,6 @@ function gg_run_open_llama_3b_v2 { (time ./bin/perplexity --model ${model_q8_0} -f ${shakespeare} --lora ${lora_shakespeare} --lora-base ${model_f16} -c 128 -b 128 --chunks 2 ) 2>&1 | tee -a $OUT/${ci}-ppl-shakespeare-lora-q8_0-f16.log compare_ppl "q8_0 / f16 base shakespeare" "$(cat $OUT/${ci}-ppl-shakespeare-q8_0.log | grep "^\[1\]")" "$(cat $OUT/${ci}-ppl-shakespeare-lora-q8_0-f16.log | grep "^\[1\]")" | tee -a $OUT/${ci}-lora-ppl.log - set +e } @@ -292,6 +365,7 @@ function gg_sum_open_llama_3b_v2 { gg_printf 'OpenLLaMA 3B-v2:\n' gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)" gg_printf '- perplexity:\n%s\n' "$(cat $OUT/${ci}-ppl.log)" + gg_printf '- imatrix:\n```\n%s\n```\n' "$(cat $OUT/${ci}-imatrix-sum.log)" gg_printf '- lora:\n%s\n' "$(cat $OUT/${ci}-lora-ppl.log)" gg_printf '- f16: \n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-f16.log)" gg_printf '- q8_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q8_0.log)" @@ -337,8 +411,8 @@ function gg_run_open_llama_7b_v2 { set -e - (time cmake -DCMAKE_BUILD_TYPE=Release -DLLAMA_CUBLAS=1 .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log - (time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log + (time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} -DLLAMA_CUBLAS=1 .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log + (time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log python3 ../convert.py ${path_models} @@ -391,6 +465,8 @@ function gg_run_open_llama_7b_v2 { (time ./bin/perplexity --model ${model_q5_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log (time ./bin/perplexity --model ${model_q6_k} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log + (time ./bin/imatrix --model ${model_f16} -f ${wiki_test} -t 1 -ngl 999 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log + (time ./bin/save-load-state --model ${model_q4_0} ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log function check_ppl { @@ -418,6 +494,8 @@ function gg_run_open_llama_7b_v2 { check_ppl "q5_k" "$(cat $OUT/${ci}-tg-q5_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log check_ppl "q6_k" "$(cat $OUT/${ci}-tg-q6_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log + cat $OUT/${ci}-imatrix.log | grep "Final" >> $OUT/${ci}-imatrix-sum.log + # lora function compare_ppl { qnt="$1" @@ -469,6 +547,7 @@ function gg_sum_open_llama_7b_v2 { gg_printf 'OpenLLaMA 7B-v2:\n' gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)" gg_printf '- perplexity:\n%s\n' "$(cat $OUT/${ci}-ppl.log)" + gg_printf '- imatrix:\n```\n%s\n```\n' "$(cat $OUT/${ci}-imatrix-sum.log)" gg_printf '- lora:\n%s\n' "$(cat $OUT/${ci}-lora-ppl.log)" gg_printf '- f16: \n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-f16.log)" gg_printf '- q8_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q8_0.log)" @@ -489,17 +568,69 @@ function gg_sum_open_llama_7b_v2 { #gg_printf '- shakespeare (q8_0 / f16 base lora):\n```\n%s\n```\n' "$(cat $OUT/${ci}-ppl-shakespeare-lora-q8_0-f16.log)" } +# bge-small + +function gg_run_embd_bge_small { + cd ${SRC} + + gg_wget models-mnt/bge-small/ https://huggingface.co/BAAI/bge-small-en-v1.5/raw/main/config.json + gg_wget models-mnt/bge-small/ https://huggingface.co/BAAI/bge-small-en-v1.5/resolve/main/tokenizer.model + gg_wget models-mnt/bge-small/ https://huggingface.co/BAAI/bge-small-en-v1.5/raw/main/tokenizer_config.json + gg_wget models-mnt/bge-small/ https://huggingface.co/BAAI/bge-small-en-v1.5/raw/main/special_tokens_map.json + gg_wget models-mnt/bge-small/ https://huggingface.co/BAAI/bge-small-en-v1.5/resolve/main/pytorch_model.bin + gg_wget models-mnt/bge-small/ https://huggingface.co/BAAI/bge-small-en-v1.5/raw/main/sentence_bert_config.json + gg_wget models-mnt/bge-small/ https://huggingface.co/BAAI/bge-small-en-v1.5/raw/main/vocab.txt + gg_wget models-mnt/bge-small/ https://huggingface.co/BAAI/bge-small-en-v1.5/raw/main/modules.json + gg_wget models-mnt/bge-small/ https://huggingface.co/BAAI/bge-small-en-v1.5/raw/main/config.json + + gg_wget models-mnt/bge-small/1_Pooling https://huggingface.co/BAAI/bge-small-en-v1.5/raw/main/1_Pooling/config.json + + path_models="../models-mnt/bge-small" + + rm -rf build-ci-release && mkdir build-ci-release && cd build-ci-release + + set -e + + (time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log + (time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log + + python3 ../convert-hf-to-gguf.py ${path_models} + + model_f16="${path_models}/ggml-model-f16.gguf" + model_q8_0="${path_models}/ggml-model-q8_0.gguf" + + ./bin/quantize ${model_f16} ${model_q8_0} q8_0 + + (time ./bin/embedding --model ${model_f16} -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log + (time ./bin/embedding --model ${model_q8_0} -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log + + set +e +} + +function gg_sum_embd_bge_small { + gg_printf '### %s\n\n' "${ci}" + + gg_printf 'BGE Small (BERT):\n' + gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)" + gg_printf '- f16: \n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-f16.log)" + gg_printf '- q8_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q8_0.log)" +} + ## main if [ -z ${GG_BUILD_LOW_PERF} ]; then + # Create symlink: ./llama.cpp/models-mnt -> $MNT/models/models-mnt rm -rf ${SRC}/models-mnt - mnt_models=${MNT}/models mkdir -p ${mnt_models} ln -sfn ${mnt_models} ${SRC}/models-mnt - python3 -m pip install -r ${SRC}/requirements.txt - python3 -m pip install --editable gguf-py + # Create a fresh python3 venv and enter it + python3 -m venv "$MNT/venv" + source "$MNT/venv/bin/activate" + + pip install -r ${SRC}/requirements.txt --disable-pip-version-check + pip install --editable gguf-py --disable-pip-version-check fi ret=0 @@ -508,12 +639,16 @@ test $ret -eq 0 && gg_run ctest_debug test $ret -eq 0 && gg_run ctest_release if [ -z ${GG_BUILD_LOW_PERF} ]; then + test $ret -eq 0 && gg_run embd_bge_small + if [ -z ${GG_BUILD_VRAM_GB} ] || [ ${GG_BUILD_VRAM_GB} -ge 8 ]; then if [ -z ${GG_BUILD_CUDA} ]; then test $ret -eq 0 && gg_run open_llama_3b_v2 else test $ret -eq 0 && gg_run open_llama_7b_v2 fi + test $ret -eq 0 && gg_run ctest_with_model_debug + test $ret -eq 0 && gg_run ctest_with_model_release fi fi diff --git a/common/common.cpp b/common/common.cpp index b2cb0e257..3a92d3797 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -42,6 +42,14 @@ #pragma warning(disable: 4244 4267) // possible loss of data #endif +#if (defined(GGML_USE_CUBLAS) || defined(GGML_USE_SYCL)) +#define GGML_USE_CUBLAS_SYCL +#endif + +#if (defined(GGML_USE_CUBLAS) || defined(GGML_USE_SYCL)) || defined(GGML_USE_VULKAN) +#define GGML_USE_CUBLAS_SYCL_VULKAN +#endif + int32_t get_num_physical_cores() { #ifdef __linux__ // enumerate the set of thread siblings, num entries is num cores @@ -167,6 +175,24 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { if (params.n_threads_batch <= 0) { params.n_threads_batch = std::thread::hardware_concurrency(); } + } else if (arg == "-td" || arg == "--threads-draft") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.n_threads_draft = std::stoi(argv[i]); + if (params.n_threads_draft <= 0) { + params.n_threads_draft = std::thread::hardware_concurrency(); + } + } else if (arg == "-tbd" || arg == "--threads-batch-draft") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.n_threads_batch_draft = std::stoi(argv[i]); + if (params.n_threads_batch_draft <= 0) { + params.n_threads_batch_draft = std::thread::hardware_concurrency(); + } } else if (arg == "-p" || arg == "--prompt") { if (++i >= argc) { invalid_param = true; @@ -185,6 +211,23 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { params.prompt_cache_all = true; } else if (arg == "--prompt-cache-ro") { params.prompt_cache_ro = true; + } else if (arg == "-bf" || arg == "--binary-file") { + if (++i >= argc) { + invalid_param = true; + break; + } + std::ifstream file(argv[i], std::ios::binary); + if (!file) { + fprintf(stderr, "error: failed to open file '%s'\n", argv[i]); + invalid_param = true; + break; + } + // store the external file name in params + params.prompt_file = argv[i]; + std::ostringstream ss; + ss << file.rdbuf(); + params.prompt = ss.str(); + fprintf(stderr, "Read %zu bytes from binary file %s\n", params.prompt.size(), argv[i]); } else if (arg == "-f" || arg == "--file") { if (++i >= argc) { invalid_param = true; @@ -297,13 +340,14 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { invalid_param = true; break; } - sparams.samplers_sequence = parse_samplers_input(argv[i]); + const auto sampler_names = string_split(argv[i], ';'); + sparams.samplers_sequence = sampler_types_from_names(sampler_names, true); } else if (arg == "--sampling-seq") { if (++i >= argc) { invalid_param = true; break; } - sparams.samplers_sequence = argv[i]; + sparams.samplers_sequence = sampler_types_from_chars(argv[i]); } else if (arg == "--top-p") { if (++i >= argc) { invalid_param = true; @@ -360,6 +404,18 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { break; } sparams.penalty_present = std::stof(argv[i]); + } else if (arg == "--dynatemp-range") { + if (++i >= argc) { + invalid_param = true; + break; + } + sparams.dynatemp_range = std::stof(argv[i]); + } else if (arg == "--dynatemp-exp") { + if (++i >= argc) { + invalid_param = true; + break; + } + sparams.dynatemp_exponent = std::stof(argv[i]); } else if (arg == "--mirostat") { if (++i >= argc) { invalid_param = true; @@ -476,7 +532,7 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { invalid_param = true; break; } - params.lora_adapter.push_back(std::make_tuple(argv[i], 1.0f)); + params.lora_adapter.emplace_back(argv[i], 1.0f); params.use_mmap = false; } else if (arg == "--lora-scaled") { if (++i >= argc) { @@ -488,7 +544,7 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { invalid_param = true; break; } - params.lora_adapter.push_back(std::make_tuple(lora_adapter, std::stof(argv[i]))); + params.lora_adapter.emplace_back(lora_adapter, std::stof(argv[i])); params.use_mmap = false; } else if (arg == "--lora-base") { if (++i >= argc) { @@ -543,75 +599,97 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { invalid_param = true; break; } -#ifdef LLAMA_SUPPORTS_GPU_OFFLOAD params.n_gpu_layers = std::stoi(argv[i]); -#else - fprintf(stderr, "warning: not compiled with GPU offload support, --n-gpu-layers option will be ignored\n"); - fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n"); -#endif + if (!llama_supports_gpu_offload()) { + fprintf(stderr, "warning: not compiled with GPU offload support, --n-gpu-layers option will be ignored\n"); + fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n"); + } } else if (arg == "--gpu-layers-draft" || arg == "-ngld" || arg == "--n-gpu-layers-draft") { if (++i >= argc) { invalid_param = true; break; } -#ifdef LLAMA_SUPPORTS_GPU_OFFLOAD params.n_gpu_layers_draft = std::stoi(argv[i]); -#else - fprintf(stderr, "warning: not compiled with GPU offload support, --n-gpu-layers-draft option will be ignored\n"); - fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n"); -#endif + if (!llama_supports_gpu_offload()) { + fprintf(stderr, "warning: not compiled with GPU offload support, --n-gpu-layers-draft option will be ignored\n"); + fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n"); + } } else if (arg == "--main-gpu" || arg == "-mg") { if (++i >= argc) { invalid_param = true; break; } -#ifdef GGML_USE_CUBLAS params.main_gpu = std::stoi(argv[i]); -#else - fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.\n"); -#endif +#ifndef GGML_USE_CUBLAS_SYCL + fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS/SYCL. Setting the main GPU has no effect.\n"); +#endif // GGML_USE_CUBLAS_SYCL + } else if (arg == "--split-mode" || arg == "-sm") { + if (++i >= argc) { + invalid_param = true; + break; + } + std::string arg_next = argv[i]; + if (arg_next == "none") { + params.split_mode = LLAMA_SPLIT_NONE; + } else if (arg_next == "layer") { + params.split_mode = LLAMA_SPLIT_LAYER; + } else if (arg_next == "row") { + params.split_mode = LLAMA_SPLIT_ROW; + } else { + invalid_param = true; + break; + } +#ifndef GGML_USE_CUBLAS_SYCL + fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS/SYCL. Setting the split mode has no effect.\n"); +#endif // GGML_USE_CUBLAS_SYCL + } else if (arg == "--tensor-split" || arg == "-ts") { if (++i >= argc) { invalid_param = true; break; } -#ifdef GGML_USE_CUBLAS std::string arg_next = argv[i]; // split string by , and / const std::regex regex{R"([,/]+)"}; std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1}; std::vector split_arg{it, {}}; - GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES); - - for (size_t i = 0; i < LLAMA_MAX_DEVICES; ++i) { + if (split_arg.size() >= llama_max_devices()) { + invalid_param = true; + break; + } + for (size_t i = 0; i < llama_max_devices(); ++i) { if (i < split_arg.size()) { params.tensor_split[i] = std::stof(split_arg[i]); } else { params.tensor_split[i] = 0.0f; } } -#else - fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.\n"); -#endif // GGML_USE_CUBLAS - } else if (arg == "--no-mul-mat-q" || arg == "-nommq") { -#ifdef GGML_USE_CUBLAS - params.mul_mat_q = false; -#else - fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. Disabling mul_mat_q kernels has no effect.\n"); -#endif // GGML_USE_CUBLAS +#ifndef GGML_USE_CUBLAS_SYCL_VULKAN + fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS/SYCL/Vulkan. Setting a tensor split has no effect.\n"); +#endif // GGML_USE_CUBLAS_SYCL } else if (arg == "--no-mmap") { params.use_mmap = false; } else if (arg == "--numa") { - params.numa = true; + if (++i >= argc) { + invalid_param = true; + break; + } + std::string value(argv[i]); + /**/ if (value == "distribute" || value == "") { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; } + else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; } + else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; } + else { invalid_param = true; break; } } else if (arg == "--verbose-prompt") { params.verbose_prompt = true; + } else if (arg == "--no-display-prompt") { + params.display_prompt = false; } else if (arg == "-r" || arg == "--reverse-prompt") { if (++i >= argc) { invalid_param = true; break; } - params.antiprompt.push_back(argv[i]); + params.antiprompt.emplace_back(argv[i]); } else if (arg == "-ld" || arg == "--logdir") { if (++i >= argc) { invalid_param = true; @@ -622,6 +700,12 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { if (params.logdir.back() != DIRECTORY_SEPARATOR) { params.logdir += DIRECTORY_SEPARATOR; } + } else if (arg == "--save-all-logits" || arg == "--kl-divergence-base") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.logits_file = argv[i]; } else if (arg == "--perplexity" || arg == "--all-logits") { params.logits_all = true; } else if (arg == "--ppl-stride") { @@ -650,6 +734,24 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { break; } params.hellaswag_tasks = std::stoi(argv[i]); + } else if (arg == "--winogrande") { + params.winogrande = true; + } else if (arg == "--winogrande-tasks") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.winogrande_tasks = std::stoi(argv[i]); + } else if (arg == "--multiple-choice") { + params.multiple_choice = true; + } else if (arg == "--multiple-choice-tasks") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.multiple_choice_tasks = std::stoi(argv[i]); + } else if (arg == "--kl-divergence") { + params.kl_divergence = true; } else if (arg == "--ignore-eos") { params.ignore_eos = true; } else if (arg == "--no-penalize-nl") { @@ -803,7 +905,7 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { } if (!params.kv_overrides.empty()) { - params.kv_overrides.emplace_back(llama_model_kv_override()); + params.kv_overrides.emplace_back(); params.kv_overrides.back().key[0] = 0; } @@ -813,12 +915,20 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { const llama_sampling_params & sparams = params.sparams; + std::string sampler_type_chars; + std::string sampler_type_names; + for (const auto sampler_type : sparams.samplers_sequence) { + sampler_type_chars += static_cast(sampler_type); + sampler_type_names += sampler_type_to_name_string(sampler_type) + ";"; + } + sampler_type_names.pop_back(); + printf("\n"); printf("usage: %s [options]\n", argv[0]); printf("\n"); printf("options:\n"); printf(" -h, --help show this help message and exit\n"); - printf(" --version show version and build info\n"); + printf(" --version show version and build info\n"); printf(" -i, --interactive run in interactive mode\n"); printf(" --interactive-first run in interactive mode and wait for input right away\n"); printf(" -ins, --instruct run in instruction mode (use with Alpaca models)\n"); @@ -832,6 +942,10 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" -t N, --threads N number of threads to use during generation (default: %d)\n", params.n_threads); printf(" -tb N, --threads-batch N\n"); printf(" number of threads to use during batch and prompt processing (default: same as --threads)\n"); + printf(" -td N, --threads-draft N"); + printf(" number of threads to use during generation (default: same as --threads)\n"); + printf(" -tbd N, --threads-batch-draft N\n"); + printf(" number of threads to use during batch and prompt processing (default: same as --threads-draft)\n"); printf(" -p PROMPT, --prompt PROMPT\n"); printf(" prompt to start generation with (default: empty)\n"); printf(" -e, --escape process prompt escapes sequences (\\n, \\r, \\t, \\', \\\", \\\\)\n"); @@ -845,11 +959,14 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" --in-suffix STRING string to suffix after user inputs with (default: empty)\n"); printf(" -f FNAME, --file FNAME\n"); printf(" prompt file to start generation.\n"); + printf(" -bf FNAME, --binary-file FNAME\n"); + printf(" binary file containing multiple choice tasks.\n"); printf(" -n N, --n-predict N number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)\n", params.n_predict); printf(" -c N, --ctx-size N size of the prompt context (default: %d, 0 = loaded from model)\n", params.n_ctx); printf(" -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch); - printf(" --samplers samplers that will be used for generation in the order, separated by \';\', for example: \"top_k;tfs;typical;top_p;min_p;temp\"\n"); - printf(" --sampling-seq simplified sequence for samplers that will be used (default: %s)\n", sparams.samplers_sequence.c_str()); + printf(" --samplers samplers that will be used for generation in the order, separated by \';\'\n"); + printf(" (default: %s)\n", sampler_type_names.c_str()); + printf(" --sampling-seq simplified sequence for samplers that will be used (default: %s)\n", sampler_type_chars.c_str()); printf(" --top-k N top-k sampling (default: %d, 0 = disabled)\n", sparams.top_k); printf(" --top-p N top-p sampling (default: %.1f, 1.0 = disabled)\n", (double)sparams.top_p); printf(" --min-p N min-p sampling (default: %.1f, 0.0 = disabled)\n", (double)sparams.min_p); @@ -859,6 +976,8 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" --repeat-penalty N penalize repeat sequence of tokens (default: %.1f, 1.0 = disabled)\n", (double)sparams.penalty_repeat); printf(" --presence-penalty N repeat alpha presence penalty (default: %.1f, 0.0 = disabled)\n", (double)sparams.penalty_present); printf(" --frequency-penalty N repeat alpha frequency penalty (default: %.1f, 0.0 = disabled)\n", (double)sparams.penalty_freq); + printf(" --dynatemp-range N dynamic temperature range (default: %.1f, 0.0 = disabled)\n", (double)sparams.dynatemp_range); + printf(" --dynatemp-exp N dynamic temperature exponent (default: %.1f)\n", (double)sparams.dynatemp_exponent); printf(" --mirostat N use Mirostat sampling.\n"); printf(" Top K, Nucleus, Tail Free and Locally Typical samplers are ignored if used.\n"); printf(" (default: %d, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)\n", sparams.mirostat); @@ -891,6 +1010,11 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" --logits-all return logits for all tokens in the batch (default: disabled)\n"); printf(" --hellaswag compute HellaSwag score over random tasks from datafile supplied with -f\n"); printf(" --hellaswag-tasks N number of tasks to use when computing the HellaSwag score (default: %zu)\n", params.hellaswag_tasks); + printf(" --winogrande compute Winogrande score over random tasks from datafile supplied with -f\n"); + printf(" --winogrande-tasks N number of tasks to use when computing the Winogrande score (default: %zu)\n", params.winogrande_tasks); + printf(" --multiple-choice compute multiple choice score over random tasks from datafile supplied with -f\n"); + printf(" --multiple-choice-tasks N number of tasks to use when computing the multiple choice score (default: %zu)\n", params.winogrande_tasks); + printf(" --kl-divergence computes KL-divergence to logits provided via --kl-divergence-base\n"); printf(" --keep N number of tokens to keep from the initial prompt (default: %d, -1 = all)\n", params.n_keep); printf(" --draft N number of tokens to draft for speculative decoding (default: %d)\n", params.n_draft); printf(" --chunks N max number of chunks to process (default: %d, -1 = all)\n", params.n_chunks); @@ -901,34 +1025,39 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" -cb, --cont-batching enable continuous batching (a.k.a dynamic batching) (default: disabled)\n"); printf(" --mmproj MMPROJ_FILE path to a multimodal projector file for LLaVA. see examples/llava/README.md\n"); printf(" --image IMAGE_FILE path to an image file. use with multimodal models\n"); - if (llama_mlock_supported()) { + if (llama_supports_mlock()) { printf(" --mlock force system to keep model in RAM rather than swapping or compressing\n"); } - if (llama_mmap_supported()) { + if (llama_supports_mmap()) { printf(" --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n"); } - printf(" --numa attempt optimizations that help on some NUMA systems\n"); + printf(" --numa TYPE attempt optimizations that help on some NUMA systems\n"); + printf(" - distribute: spread execution evenly over all nodes\n"); + printf(" - isolate: only spawn threads on CPUs on the node that execution started on\n"); + printf(" - numactl: use the CPU map provided by numactl\n"); printf(" if run without this previously, it is recommended to drop the system page cache before using this\n"); printf(" see https://github.com/ggerganov/llama.cpp/issues/1437\n"); -#ifdef LLAMA_SUPPORTS_GPU_OFFLOAD - printf(" -ngl N, --n-gpu-layers N\n"); - printf(" number of layers to store in VRAM\n"); - printf(" -ngld N, --n-gpu-layers-draft N\n"); - printf(" number of layers to store in VRAM for the draft model\n"); - printf(" -ts SPLIT --tensor-split SPLIT\n"); - printf(" how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n"); - printf(" -mg i, --main-gpu i the GPU to use for scratch and small tensors\n"); -#ifdef GGML_USE_CUBLAS - printf(" -nommq, --no-mul-mat-q\n"); - printf(" use " GGML_CUBLAS_NAME " instead of custom mul_mat_q " GGML_CUDA_NAME " kernels.\n"); - printf(" Not recommended since this is both slower and uses more VRAM.\n"); -#endif // GGML_USE_CUBLAS -#endif + if (llama_supports_gpu_offload()) { + printf(" -ngl N, --n-gpu-layers N\n"); + printf(" number of layers to store in VRAM\n"); + printf(" -ngld N, --n-gpu-layers-draft N\n"); + printf(" number of layers to store in VRAM for the draft model\n"); + printf(" -sm SPLIT_MODE, --split-mode SPLIT_MODE\n"); + printf(" how to split the model across multiple GPUs, one of:\n"); + printf(" - none: use one GPU only\n"); + printf(" - layer (default): split layers and KV across GPUs\n"); + printf(" - row: split rows across GPUs\n"); + printf(" -ts SPLIT, --tensor-split SPLIT\n"); + printf(" fraction of the model to offload to each GPU, comma-separated list of proportions, e.g. 3,1\n"); + printf(" -mg i, --main-gpu i the GPU to use for the model (with split-mode = none),\n"); + printf(" or for intermediate results and KV (with split-mode = row) (default: %d)\n", params.main_gpu); + } + printf(" --verbose-prompt print a verbose prompt before generation (default: %s)\n", params.verbose_prompt ? "true" : "false"); + printf(" --no-display-prompt don't print prompt at generation (default: %s)\n", !params.display_prompt ? "true" : "false"); printf(" -gan N, --grp-attn-n N\n"); printf(" group-attention factor (default: %d)\n", params.grp_attn_n); printf(" -gaw N, --grp-attn-w N\n"); printf(" group-attention width (default: %.1f)\n", (double)params.grp_attn_w); - printf(" --verbose-prompt print prompt before generation\n"); printf(" -dkvc, --dump-kv-cache\n"); printf(" verbose print of the KV cache\n"); printf(" -nkvo, --no-kv-offload\n"); @@ -950,7 +1079,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" --override-kv KEY=TYPE:VALUE\n"); printf(" advanced option to override model metadata by key. may be specified multiple times.\n"); printf(" types: int, float, bool. example: --override-kv tokenizer.ggml.add_bos_token=bool:false\n"); - printf(" -stc N --print-token-count N\n"); + printf(" -ptc N, --print-token-count N\n"); printf(" print token count every N tokens (default: %d)\n", params.n_print); printf("\n"); #ifndef LOG_DISABLE_LOGS @@ -989,45 +1118,101 @@ std::string gpt_random_prompt(std::mt19937 & rng) { } // -// String parsing +// String utils // -std::string parse_samplers_input(std::string input) { - std::string output = ""; +std::vector string_split(std::string input, char separator) { + std::vector parts; + size_t separator_pos = input.find(separator); + while (separator_pos != std::string::npos) { + std::string part = input.substr(0, separator_pos); + parts.emplace_back(part); + input = input.substr(separator_pos + 1); + separator_pos = input.find(separator); + } + parts.emplace_back(input); + return parts; +} + +std::vector sampler_types_from_names(const std::vector & names, bool allow_alt_names) { + std::unordered_map sampler_canonical_name_map { + {"top_k", llama_sampler_type::TOP_K}, + {"top_p", llama_sampler_type::TOP_P}, + {"typical_p", llama_sampler_type::TYPICAL_P}, + {"min_p", llama_sampler_type::MIN_P}, + {"tfs_z", llama_sampler_type::TFS_Z}, + {"temperature", llama_sampler_type::TEMPERATURE} + }; + // since samplers names are written multiple ways // make it ready for both system names and input names - std::unordered_map samplers_symbols { - {"top_k", 'k'}, - {"top-k", 'k'}, - {"top_p", 'p'}, - {"top-p", 'p'}, - {"nucleus", 'p'}, - {"typical_p", 'y'}, - {"typical-p", 'y'}, - {"typical", 'y'}, - {"min_p", 'm'}, - {"min-p", 'm'}, - {"tfs_z", 'f'}, - {"tfs-z", 'f'}, - {"tfs", 'f'}, - {"temp", 't'}, - {"temperature",'t'} + std::unordered_map sampler_alt_name_map { + {"top-k", llama_sampler_type::TOP_K}, + {"top-p", llama_sampler_type::TOP_P}, + {"nucleus", llama_sampler_type::TOP_P}, + {"typical-p", llama_sampler_type::TYPICAL_P}, + {"typical", llama_sampler_type::TYPICAL_P}, + {"min-p", llama_sampler_type::MIN_P}, + {"tfs-z", llama_sampler_type::TFS_Z}, + {"tfs", llama_sampler_type::TFS_Z}, + {"temp", llama_sampler_type::TEMPERATURE} }; - // expected format example: "temp;top_k;tfs_z;typical_p;top_p;min_p" - size_t separator = input.find(';'); - while (separator != input.npos) { - std::string name = input.substr(0,separator); - input = input.substr(separator+1); - separator = input.find(';'); - if (samplers_symbols.find(name) != samplers_symbols.end()) { - output += samplers_symbols[name]; + std::vector sampler_types; + sampler_types.reserve(names.size()); + for (const auto & name : names) + { + auto sampler_item = sampler_canonical_name_map.find(name); + if (sampler_item != sampler_canonical_name_map.end()) + { + sampler_types.push_back(sampler_item->second); + } + else + { + if (allow_alt_names) + { + sampler_item = sampler_alt_name_map.find(name); + if (sampler_item != sampler_alt_name_map.end()) + { + sampler_types.push_back(sampler_item->second); + } + } } } - if (samplers_symbols.find(input) != samplers_symbols.end()) { - output += samplers_symbols[input]; + return sampler_types; +} + +std::vector sampler_types_from_chars(const std::string & names_string) { + std::unordered_map sampler_name_map { + {'k', llama_sampler_type::TOP_K}, + {'p', llama_sampler_type::TOP_P}, + {'y', llama_sampler_type::TYPICAL_P}, + {'m', llama_sampler_type::MIN_P}, + {'f', llama_sampler_type::TFS_Z}, + {'t', llama_sampler_type::TEMPERATURE} + }; + + std::vector sampler_types; + sampler_types.reserve(names_string.size()); + for (const auto & c : names_string) { + const auto sampler_item = sampler_name_map.find(c); + if (sampler_item != sampler_name_map.end()) { + sampler_types.push_back(sampler_item->second); + } + } + return sampler_types; +} + +std::string sampler_type_to_name_string(llama_sampler_type sampler_type) { + switch (sampler_type) { + case llama_sampler_type::TOP_K: return "top_k"; + case llama_sampler_type::TFS_Z: return "tfs_z"; + case llama_sampler_type::TYPICAL_P: return "typical_p"; + case llama_sampler_type::TOP_P: return "top_p"; + case llama_sampler_type::MIN_P: return "min_p"; + case llama_sampler_type::TEMPERATURE: return "temperature"; + default : return ""; } - return output; } // @@ -1041,6 +1226,7 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params & mparams.n_gpu_layers = params.n_gpu_layers; } mparams.main_gpu = params.main_gpu; + mparams.split_mode = params.split_mode; mparams.tensor_split = params.tensor_split; mparams.use_mmap = params.use_mmap; mparams.use_mlock = params.use_mlock; @@ -1055,6 +1241,9 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params & } static ggml_type kv_cache_type_from_str(const std::string & s) { + if (s == "f32") { + return GGML_TYPE_F32; + } if (s == "f16") { return GGML_TYPE_F16; } @@ -1425,9 +1614,10 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l fprintf(stream, "cpu_has_avx512: %s\n", ggml_cpu_has_avx512() ? "true" : "false"); fprintf(stream, "cpu_has_avx512_vbmi: %s\n", ggml_cpu_has_avx512_vbmi() ? "true" : "false"); fprintf(stream, "cpu_has_avx512_vnni: %s\n", ggml_cpu_has_avx512_vnni() ? "true" : "false"); - fprintf(stream, "cpu_has_blas: %s\n", ggml_cpu_has_blas() ? "true" : "false"); fprintf(stream, "cpu_has_cublas: %s\n", ggml_cpu_has_cublas() ? "true" : "false"); + fprintf(stream, "cpu_has_vulkan: %s\n", ggml_cpu_has_vulkan() ? "true" : "false"); fprintf(stream, "cpu_has_clblast: %s\n", ggml_cpu_has_clblast() ? "true" : "false"); + fprintf(stream, "cpu_has_kompute: %s\n", ggml_cpu_has_kompute() ? "true" : "false"); fprintf(stream, "cpu_has_fma: %s\n", ggml_cpu_has_fma() ? "true" : "false"); fprintf(stream, "cpu_has_gpublas: %s\n", ggml_cpu_has_gpublas() ? "true" : "false"); fprintf(stream, "cpu_has_neon: %s\n", ggml_cpu_has_neon() ? "true" : "false"); @@ -1437,6 +1627,7 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l fprintf(stream, "cpu_has_blas: %s\n", ggml_cpu_has_blas() ? "true" : "false"); fprintf(stream, "cpu_has_sse3: %s\n", ggml_cpu_has_sse3() ? "true" : "false"); fprintf(stream, "cpu_has_vsx: %s\n", ggml_cpu_has_vsx() ? "true" : "false"); + fprintf(stream, "cpu_has_matmul_int8: %s\n", ggml_cpu_has_matmul_int8() ? "true" : "false"); #ifdef NDEBUG fprintf(stream, "debug: false\n"); @@ -1526,7 +1717,6 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l fprintf(stream, "no_mmap: %s # default: false\n", !params.use_mmap ? "true" : "false"); fprintf(stream, "no_mul_mat_q: %s # default: false\n", !params.mul_mat_q ? "true" : "false"); fprintf(stream, "no_penalize_nl: %s # default: false\n", !sparams.penalize_nl ? "true" : "false"); - fprintf(stream, "numa: %s # default: false\n", params.numa ? "true" : "false"); fprintf(stream, "ppl_output_type: %d # default: 0\n", params.ppl_output_type); fprintf(stream, "ppl_stride: %d # default: 0\n", params.ppl_stride); fprintf(stream, "presence_penalty: %f # default: 0.0\n", sparams.penalty_present); @@ -1556,7 +1746,7 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l fprintf(stream, "cont_batching: %s # default: false\n", params.cont_batching ? "true" : "false"); fprintf(stream, "temp: %f # default: 0.8\n", sparams.temp); - const std::vector tensor_split_vector(params.tensor_split, params.tensor_split + LLAMA_MAX_DEVICES); + const std::vector tensor_split_vector(params.tensor_split, params.tensor_split + llama_max_devices()); dump_vector_float_yaml(stream, "tensor_split", tensor_split_vector); fprintf(stream, "tfs: %f # default: 1.0\n", sparams.tfs_z); @@ -1566,6 +1756,7 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l fprintf(stream, "min_p: %f # default: 0.0\n", sparams.min_p); fprintf(stream, "typical_p: %f # default: 1.0\n", sparams.typical_p); fprintf(stream, "verbose_prompt: %s # default: false\n", params.verbose_prompt ? "true" : "false"); + fprintf(stream, "display_prompt: %s # default: true\n", params.display_prompt ? "true" : "false"); } // diff --git a/common/common.h b/common/common.h index 1359e76ab..935771d44 100644 --- a/common/common.h +++ b/common/common.h @@ -43,37 +43,40 @@ extern char const *LLAMA_BUILD_TARGET; int32_t get_num_physical_cores(); struct gpt_params { - uint32_t seed = -1; // RNG seed + uint32_t seed = -1; // RNG seed - int32_t n_threads = get_num_physical_cores(); - int32_t n_threads_batch = -1; // number of threads to use for batch processing (-1 = use n_threads) - int32_t n_predict = -1; // new tokens to predict - int32_t n_ctx = 512; // context size - int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS) - int32_t n_keep = 0; // number of tokens to keep from initial prompt - int32_t n_draft = 8; // number of tokens to draft during speculative decoding - int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited) - int32_t n_parallel = 1; // number of parallel sequences to decode - int32_t n_sequences = 1; // number of sequences to decode - float p_accept = 0.5f; // speculative decoding accept probability - float p_split = 0.1f; // speculative decoding split probability - int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default) - int32_t n_gpu_layers_draft = -1; // number of layers to store in VRAM for the draft model (-1 - use default) - int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors - float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs - int32_t n_beams = 0; // if non-zero then use beam search of given width. - int32_t grp_attn_n = 1; // group-attention factor - int32_t grp_attn_w = 512; // group-attention width - int32_t n_print = -1; // print token count every n tokens (-1 = disabled) - float rope_freq_base = 0.0f; // RoPE base frequency - float rope_freq_scale = 0.0f; // RoPE frequency scaling factor - float yarn_ext_factor = -1.0f; // YaRN extrapolation mix factor - float yarn_attn_factor = 1.0f; // YaRN magnitude scaling factor - float yarn_beta_fast = 32.0f; // YaRN low correction dim - float yarn_beta_slow = 1.0f; // YaRN high correction dim - int32_t yarn_orig_ctx = 0; // YaRN original context length - int8_t rope_scaling_type = LLAMA_ROPE_SCALING_UNSPECIFIED; // TODO: better to be int32_t for alignment - // pinging @cebtenzzre + int32_t n_threads = get_num_physical_cores(); + int32_t n_threads_draft = -1; + int32_t n_threads_batch = -1; // number of threads to use for batch processing (-1 = use n_threads) + int32_t n_threads_batch_draft = -1; + int32_t n_predict = -1; // new tokens to predict + int32_t n_ctx = 512; // context size + int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS) + int32_t n_keep = 0; // number of tokens to keep from initial prompt + int32_t n_draft = 8; // number of tokens to draft during speculative decoding + int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited) + int32_t n_parallel = 1; // number of parallel sequences to decode + int32_t n_sequences = 1; // number of sequences to decode + float p_accept = 0.5f; // speculative decoding accept probability + float p_split = 0.1f; // speculative decoding split probability + int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default) + int32_t n_gpu_layers_draft = -1; // number of layers to store in VRAM for the draft model (-1 - use default) + llama_split_mode split_mode = LLAMA_SPLIT_LAYER; // how to split the model across GPUs + int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors + float tensor_split[128] = {0}; // how split tensors should be distributed across GPUs + int32_t n_beams = 0; // if non-zero then use beam search of given width. + int32_t grp_attn_n = 1; // group-attention factor + int32_t grp_attn_w = 512; // group-attention width + int32_t n_print = -1; // print token count every n tokens (-1 = disabled) + float rope_freq_base = 0.0f; // RoPE base frequency + float rope_freq_scale = 0.0f; // RoPE frequency scaling factor + float yarn_ext_factor = -1.0f; // YaRN extrapolation mix factor + float yarn_attn_factor = 1.0f; // YaRN magnitude scaling factor + float yarn_beta_fast = 32.0f; // YaRN low correction dim + float yarn_beta_slow = 1.0f; // YaRN high correction dim + int32_t yarn_orig_ctx = 0; // YaRN original context length + int32_t rope_scaling_type = LLAMA_ROPE_SCALING_UNSPECIFIED; + ggml_numa_strategy numa = GGML_NUMA_STRATEGY_DISABLED; // // sampling parameters struct llama_sampling_params sparams; @@ -88,6 +91,7 @@ struct gpt_params { std::string input_suffix = ""; // string to suffix user inputs with std::vector antiprompt; // string upon seeing which more user input is prompted std::string logdir = ""; // directory in which to save YAML log files + std::string logits_file = ""; // file for saving *all* logits std::vector kv_overrides; @@ -102,6 +106,14 @@ struct gpt_params { bool hellaswag = false; // compute HellaSwag score over random tasks from datafile supplied in prompt size_t hellaswag_tasks = 400; // number of tasks to use when computing the HellaSwag score + bool winogrande = false; // compute Winogrande score over random tasks from datafile supplied in prompt + size_t winogrande_tasks= 0; // number of tasks to use when computing the Winogrande score. If 0, all tasks will be computed + + bool multiple_choice = false; // compute TruthfulQA score over random tasks from datafile supplied in prompt + size_t multiple_choice_tasks = 0; // number of tasks to use when computing the TruthfulQA score. If 0, all tasks will be computed + + bool kl_divergence = false; // compute KL-divergence + bool mul_mat_q = true; // if true, use mul_mat_q kernels instead of cuBLAS bool random_prompt = false; // do not randomize prompt if none provided bool use_color = false; // use color to distinguish generations and inputs @@ -123,8 +135,8 @@ struct gpt_params { bool logits_all = false; // return logits for all tokens in the batch bool use_mmap = true; // use mmap for faster loads bool use_mlock = false; // use mlock to keep model in memory - bool numa = false; // attempt optimizations that help on some NUMA systems bool verbose_prompt = false; // print prompt tokens before generation + bool display_prompt = true; // print prompt before generation bool infill = false; // use infill mode bool dump_kv_cache = false; // dump the KV cache contents for debugging purposes bool no_kv_offload = false; // disable KV offloading @@ -150,10 +162,13 @@ std::string gpt_random_prompt(std::mt19937 & rng); void process_escapes(std::string& input); // -// String parsing +// String utils // -std::string parse_samplers_input(std::string input); +std::vector sampler_types_from_names(const std::vector & names, bool allow_alt_names); +std::vector sampler_types_from_chars(const std::string & names_string); +std::vector string_split(std::string input, char separator); +std::string sampler_type_to_name_string(llama_sampler_type sampler_type); // // Model utils diff --git a/common/sampling.cpp b/common/sampling.cpp index 8e45909f1..53013138a 100644 --- a/common/sampling.cpp +++ b/common/sampling.cpp @@ -13,6 +13,7 @@ struct llama_sampling_context * llama_sampling_init(const struct llama_sampling_ // will be empty (default) if there are parse errors if (result->parsed_grammar.rules.empty()) { fprintf(stderr, "%s: failed to parse grammar\n", __func__); + delete result; return nullptr; } @@ -102,15 +103,10 @@ std::string llama_sampling_print(const llama_sampling_params & params) { std::string llama_sampling_order_print(const llama_sampling_params & params) { std::string result = "CFG -> Penalties "; if (params.mirostat == 0) { - for (auto s : params.samplers_sequence) { - switch (s) { - case 'k': result += "-> top_k "; break; - case 'f': result += "-> tfs_z "; break; - case 'y': result += "-> typical_p "; break; - case 'p': result += "-> top_p "; break; - case 'm': result += "-> min_p "; break; - case 't': result += "-> temp "; break; - default : break; + for (auto sampler_type : params.samplers_sequence) { + const auto sampler_type_name = sampler_type_to_name_string(sampler_type); + if (!sampler_type_name.empty()) { + result += "-> " + sampler_type_name + " "; } } } else { @@ -126,24 +122,32 @@ static void sampler_queue( const llama_sampling_params & params, llama_token_data_array & cur_p, size_t & min_keep) { - const int n_vocab = llama_n_vocab(llama_get_model(ctx_main)); - const float temp = params.temp; - const int32_t top_k = params.top_k <= 0 ? n_vocab : params.top_k; + const float dynatemp_range = params.dynatemp_range; + const float dynatemp_exponent = params.dynatemp_exponent; + const int32_t top_k = params.top_k; const float top_p = params.top_p; const float min_p = params.min_p; const float tfs_z = params.tfs_z; const float typical_p = params.typical_p; - const std::string & samplers_sequence = params.samplers_sequence; + const std::vector & samplers_sequence = params.samplers_sequence; - for (auto s : samplers_sequence) { - switch (s){ - case 'k': llama_sample_top_k (ctx_main, &cur_p, top_k, min_keep); break; - case 'f': llama_sample_tail_free(ctx_main, &cur_p, tfs_z, min_keep); break; - case 'y': llama_sample_typical (ctx_main, &cur_p, typical_p, min_keep); break; - case 'p': llama_sample_top_p (ctx_main, &cur_p, top_p, min_keep); break; - case 'm': llama_sample_min_p (ctx_main, &cur_p, min_p, min_keep); break; - case 't': llama_sample_temp (ctx_main, &cur_p, temp); break; + for (auto sampler_type : samplers_sequence) { + switch (sampler_type) { + case llama_sampler_type::TOP_K : llama_sample_top_k (ctx_main, &cur_p, top_k, min_keep); break; + case llama_sampler_type::TFS_Z : llama_sample_tail_free(ctx_main, &cur_p, tfs_z, min_keep); break; + case llama_sampler_type::TYPICAL_P: llama_sample_typical (ctx_main, &cur_p, typical_p, min_keep); break; + case llama_sampler_type::TOP_P : llama_sample_top_p (ctx_main, &cur_p, top_p, min_keep); break; + case llama_sampler_type::MIN_P : llama_sample_min_p (ctx_main, &cur_p, min_p, min_keep); break; + case llama_sampler_type::TEMPERATURE: + if (dynatemp_range > 0) { + float dynatemp_min = std::max(0.0f, temp - dynatemp_range); + float dynatemp_max = std::max(0.0f, temp + dynatemp_range); + llama_sample_entropy(ctx_main, &cur_p, dynatemp_min, dynatemp_max, dynatemp_exponent); + } else { + llama_sample_temp(ctx_main, &cur_p, temp); + } + break; default : break; } } @@ -190,6 +194,11 @@ static llama_token llama_sampling_sample_impl( logits[it->first] += it->second; } + if (ctx_cfg) { + float * logits_guidance = llama_get_logits_ith(ctx_cfg, idx); + llama_sample_apply_guidance(ctx_main, logits, logits_guidance, params.cfg_scale); + } + cur.clear(); for (llama_token token_id = 0; token_id < n_vocab; token_id++) { @@ -198,10 +207,6 @@ static llama_token llama_sampling_sample_impl( llama_token_data_array cur_p = { cur.data(), cur.size(), false }; - if (ctx_cfg) { - llama_sample_classifier_free_guidance(ctx_main, &cur_p, ctx_cfg, params.cfg_scale); - } - // apply penalties const auto& penalty_tokens = params.use_penalty_prompt_tokens ? params.penalty_prompt_tokens : prev; const int penalty_tokens_used_size = std::min((int)penalty_tokens.size(), penalty_last_n); diff --git a/common/sampling.h b/common/sampling.h index f16ef97e3..e1279a894 100644 --- a/common/sampling.h +++ b/common/sampling.h @@ -8,6 +8,16 @@ #include #include +// sampler types +enum class llama_sampler_type : char { + TOP_K = 'k', + TOP_P = 'p', + MIN_P = 'm', + TFS_Z = 'f', + TYPICAL_P = 'y', + TEMPERATURE = 't' +}; + // sampling parameters typedef struct llama_sampling_params { int32_t n_prev = 64; // number of previous tokens to remember @@ -17,7 +27,9 @@ typedef struct llama_sampling_params { float min_p = 0.05f; // 0.0 = disabled float tfs_z = 1.00f; // 1.0 = disabled float typical_p = 1.00f; // 1.0 = disabled - float temp = 0.80f; // 1.0 = disabled + float temp = 0.80f; // <= 0.0 to sample greedily, 0.0 to not output probabilities + float dynatemp_range = 0.00f; // 0.0 = disabled + float dynatemp_exponent = 1.00f; // controls how entropy maps to temperature in dynamic temperature sampler int32_t penalty_last_n = 64; // last n tokens to penalize (0 = disable penalty, -1 = context size) float penalty_repeat = 1.10f; // 1.0 = disabled float penalty_freq = 0.00f; // 0.0 = disabled @@ -26,7 +38,15 @@ typedef struct llama_sampling_params { float mirostat_tau = 5.00f; // target entropy float mirostat_eta = 0.10f; // learning rate bool penalize_nl = true; // consider newlines as a repeatable token - std::string samplers_sequence = "kfypmt"; // top_k, tail_free, typical_p, top_p, min_p, temp + + std::vector samplers_sequence = { + llama_sampler_type::TOP_K, + llama_sampler_type::TFS_Z, + llama_sampler_type::TYPICAL_P, + llama_sampler_type::TOP_P, + llama_sampler_type::MIN_P, + llama_sampler_type::TEMPERATURE + }; std::string grammar; // optional BNF-like grammar to constrain sampling diff --git a/common/train.cpp b/common/train.cpp index e6f2f7a2f..e4c3d5df6 100644 --- a/common/train.cpp +++ b/common/train.cpp @@ -1363,12 +1363,12 @@ bool consume_common_train_arg( *invalid_param = true; return true; } -#ifdef LLAMA_SUPPORTS_GPU_OFFLOAD - params->n_gpu_layers = std::stoi(argv[i]); -#else - fprintf(stderr, "warning: not compiled with GPU offload support, --n-gpu-layers option will be ignored\n"); - fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n"); -#endif + if (llama_supports_gpu_offload()) { + params->n_gpu_layers = std::stoi(argv[i]); + } else { + fprintf(stderr, "warning: not compiled with GPU offload support, --n-gpu-layers option will be ignored\n"); + fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n"); + } } else if (arg == "-h" || arg == "--help") { params->print_usage = true; return true; diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 203eaf64b..9771fccf9 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -10,7 +10,7 @@ import re import sys from enum import IntEnum from pathlib import Path -from typing import TYPE_CHECKING, Any, ContextManager, Iterator, cast, Optional +from typing import TYPE_CHECKING, Any, ContextManager, Iterator, Sequence, cast import numpy as np import torch @@ -22,6 +22,8 @@ if 'NO_LOCAL_GGUF' not in os.environ: sys.path.insert(1, str(Path(__file__).parent / 'gguf-py')) import gguf +from convert import HfVocab + ###### MODEL DEFINITIONS ###### @@ -47,6 +49,15 @@ class Model: self.hparams = Model.load_hparams(self.dir_model) self.model_arch = self._get_model_architecture() self.gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=False) + self.block_count = self.find_hparam(["n_layers", "num_hidden_layers", "n_layer"]) + + def find_hparam(self, keys: Sequence[str], optional: bool = False) -> Any: + key = next((k for k in keys if k in self.hparams), None) + if key is not None: + return self.hparams[key] + if optional: + return None + raise KeyError(f"could not find any of: {keys}") def set_vocab(self): self._set_vocab_gpt2() @@ -68,28 +79,33 @@ class Model: def set_gguf_parameters(self): self.gguf_writer.add_name(self.dir_model.name) - self.gguf_writer.add_block_count(self.hparams.get( - "n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")), - )) - if (n_ctx := self.hparams.get("max_position_embeddings")) is not None: + self.gguf_writer.add_block_count(self.block_count) + + if (n_ctx := self.find_hparam(["max_position_embeddings", "n_ctx"], optional=True)) is not None: self.gguf_writer.add_context_length(n_ctx) - if (n_embd := self.hparams.get("hidden_size")) is not None: - self.gguf_writer.add_embedding_length(n_embd) - if (n_ff := self.hparams.get("intermediate_size")) is not None: + + n_embd = self.find_hparam(["hidden_size", "n_embd"]) + self.gguf_writer.add_embedding_length(n_embd) + + if (n_ff := self.find_hparam(["intermediate_size", "n_inner"], optional=True)) is not None: self.gguf_writer.add_feed_forward_length(n_ff) - if (n_head := self.hparams.get("num_attention_heads")) is not None: - self.gguf_writer.add_head_count(n_head) + + n_head = self.find_hparam(["num_attention_heads", "n_head"]) + self.gguf_writer.add_head_count(n_head) + if (n_head_kv := self.hparams.get("num_key_value_heads")) is not None: self.gguf_writer.add_head_count_kv(n_head_kv) - if (n_rms_eps := self.hparams.get("rms_norm_eps")) is not None: - self.gguf_writer.add_layer_norm_rms_eps(n_rms_eps) + if (f_rms_eps := self.hparams.get("rms_norm_eps")) is not None: + self.gguf_writer.add_layer_norm_rms_eps(f_rms_eps) + if (f_norm_eps := self.find_hparam(["layer_norm_eps", "layer_norm_epsilon"], optional=True)) is not None: + self.gguf_writer.add_layer_norm_eps(f_norm_eps) if (n_experts := self.hparams.get("num_local_experts")) is not None: self.gguf_writer.add_expert_count(n_experts) if (n_experts_used := self.hparams.get("num_experts_per_tok")) is not None: self.gguf_writer.add_expert_used_count(n_experts_used) - self.gguf_writer.add_parallel_residual(self.hparams.get("use_parallel_residual", True)) + self.gguf_writer.add_file_type(self.ftype) def write_tensors(self): block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) @@ -180,6 +196,8 @@ class Model: return StableLMModel if model_architecture == "QWenLMHeadModel": return QwenModel + if model_architecture == "Qwen2ForCausalLM": + return Model if model_architecture == "MixtralForCausalLM": return MixtralModel if model_architecture == "GPT2LMHeadModel": @@ -188,6 +206,18 @@ class Model: return Phi2Model if model_architecture == "PlamoForCausalLM": return PlamoModel + if model_architecture == "CodeShellForCausalLM": + return CodeShellModel + if model_architecture == "OrionForCausalLM": + return OrionModel + if model_architecture == "InternLM2ForCausalLM": + return InternLM2Model + if model_architecture == "MiniCPMForCausalLM": + return MiniCPMModel + if model_architecture == "BertModel": + return BertModel + if model_architecture == "NomicBertModel": + return NomicBertModel return Model def _is_model_safetensors(self) -> bool: @@ -225,6 +255,8 @@ class Model: return gguf.MODEL_ARCH.STABLELM if arch == "QWenLMHeadModel": return gguf.MODEL_ARCH.QWEN + if arch == "Qwen2ForCausalLM": + return gguf.MODEL_ARCH.QWEN2 if arch == "MixtralForCausalLM": return gguf.MODEL_ARCH.LLAMA if arch == "GPT2LMHeadModel": @@ -233,6 +265,18 @@ class Model: return gguf.MODEL_ARCH.PHI2 if arch == "PlamoForCausalLM": return gguf.MODEL_ARCH.PLAMO + if arch == "CodeShellForCausalLM": + return gguf.MODEL_ARCH.CODESHELL + if arch == "OrionForCausalLM": + return gguf.MODEL_ARCH.ORION + if arch == "InternLM2ForCausalLM": + return gguf.MODEL_ARCH.INTERNLM2 + if arch == "MiniCPMForCausalLM": + return gguf.MODEL_ARCH.MINICPM + if arch == "BertModel": + return gguf.MODEL_ARCH.BERT + if arch == "NomicBertModel": + return gguf.MODEL_ARCH.NOMIC_BERT raise NotImplementedError(f'Architecture "{arch}" not supported!') @@ -272,6 +316,58 @@ class Model: special_vocab = gguf.SpecialVocab(dir_model, load_merges=True) special_vocab.add_to_gguf(self.gguf_writer) + def _set_vocab_qwen(self): + dir_model = self.dir_model + hparams = self.hparams + tokens: list[bytearray] = [] + toktypes: list[int] = [] + + from transformers import AutoTokenizer + tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True) + vocab_size = hparams["vocab_size"] + assert max(tokenizer.get_vocab().values()) < vocab_size + + merges = [] + vocab = {} + mergeable_ranks = tokenizer.mergeable_ranks + for token, rank in mergeable_ranks.items(): + vocab[QwenModel.token_bytes_to_string(token)] = rank + if len(token) == 1: + continue + merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank) + assert len(merged) == 2 + merges.append(' '.join(map(QwenModel.token_bytes_to_string, merged))) + + # for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined + added_vocab = tokenizer.special_tokens + reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in (vocab | added_vocab).items()} + + for i in range(vocab_size): + if i not in reverse_vocab: + pad_token = f"[PAD{i}]".encode("utf-8") + tokens.append(bytearray(pad_token)) + toktypes.append(gguf.TokenType.USER_DEFINED) + elif reverse_vocab[i] in added_vocab: + tokens.append(reverse_vocab[i]) + toktypes.append(gguf.TokenType.CONTROL) + else: + tokens.append(reverse_vocab[i]) + toktypes.append(gguf.TokenType.NORMAL) + + self.gguf_writer.add_tokenizer_model("gpt2") + self.gguf_writer.add_token_list(tokens) + self.gguf_writer.add_token_types(toktypes) + + special_vocab = gguf.SpecialVocab(dir_model, load_merges=False) + special_vocab.merges = merges + # only add special tokens when they were not already loaded from config.json + if len(special_vocab.special_token_ids) == 0: + special_vocab._set_special_token("bos", tokenizer.special_tokens["<|endoftext|>"]) + special_vocab._set_special_token("eos", tokenizer.special_tokens["<|endoftext|>"]) + # this one is usually not in config.json anyway + special_vocab._set_special_token("unk", tokenizer.special_tokens["<|endoftext|>"]) + special_vocab.add_to_gguf(self.gguf_writer) + def _set_vocab_sentencepiece(self): from sentencepiece import SentencePieceProcessor @@ -325,6 +421,31 @@ class Model: special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) special_vocab.add_to_gguf(self.gguf_writer) + def _set_vocab_hf(self): + path = self.dir_model + added_tokens_path = self.dir_model + vocab = HfVocab( + path, added_tokens_path if added_tokens_path.exists() else None + ) + tokens = [] + scores = [] + toktypes = [] + + for text, score, toktype in vocab.all_tokens(): + tokens.append(text) + scores.append(score) + toktypes.append(toktype) + + assert len(tokens) == vocab.vocab_size + + self.gguf_writer.add_tokenizer_model("llama") + self.gguf_writer.add_token_list(tokens) + self.gguf_writer.add_token_scores(scores) + self.gguf_writer.add_token_types(toktypes) + + special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) + special_vocab.add_to_gguf(self.gguf_writer) + class GPTNeoXModel(Model): def set_gguf_parameters(self): @@ -470,7 +591,8 @@ class MPTModel(Model): # map tensor names if "scales" in name: new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias", ".scales")) - new_name = new_name.replace("scales", "act.scales") + if new_name is not None: + new_name = new_name.replace("scales", "act.scales") else: new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) if new_name is None: @@ -502,6 +624,83 @@ class MPTModel(Model): self.gguf_writer.add_tensor("output.weight", data) +class OrionModel(Model): + def set_vocab(self): + self._set_vocab_sentencepiece() + + def set_gguf_parameters(self): + block_count = self.hparams["num_hidden_layers"] + head_count = self.hparams["num_attention_heads"] + head_count_kv = self.hparams.get("num_key_value_heads", head_count) + hf_repo = self.hparams.get("_name_or_path", "") + + ctx_length = 0 + if "max_sequence_length" in self.hparams: + ctx_length = self.hparams["max_sequence_length"] + elif "max_position_embeddings" in self.hparams: + ctx_length = self.hparams["max_position_embeddings"] + elif "model_max_length" in self.hparams: + ctx_length = self.hparams["model_max_length"] + else: + print("gguf: can not find ctx length parameter.") + sys.exit() + + self.gguf_writer.add_file_type(self.ftype) + self.gguf_writer.add_name(self.dir_model.name) + self.gguf_writer.add_source_hf_repo(hf_repo) + self.gguf_writer.add_tensor_data_layout("Meta AI original pth") + self.gguf_writer.add_context_length(ctx_length) + self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) + self.gguf_writer.add_block_count(block_count) + self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) + self.gguf_writer.add_head_count(head_count) + self.gguf_writer.add_head_count_kv(head_count_kv) + self.gguf_writer.add_layer_norm_eps(self.hparams["rms_norm_eps"]) + + def write_tensors(self): + # Collect tensors from generator object + model_kv = dict(self.get_tensors()) + block_count = self.hparams["num_hidden_layers"] + tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + + for name, data_torch in model_kv.items(): + # we don't need these + if name.endswith(".rotary_emb.inv_freq"): + continue + + old_dtype = data_torch.dtype + + # convert any unsupported data types to float32 + if data_torch.dtype not in (torch.float16, torch.float32): + data_torch = data_torch.to(torch.float32) + + data = data_torch.squeeze().numpy() + + # map tensor names + new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) + if new_name is None: + print(f"Can not map tensor {name!r}") + sys.exit() + + n_dims = len(data.shape) + data_dtype = data.dtype + + # if f32 desired, convert any float16 to float32 + if self.ftype == 0 and data_dtype == np.float16: + data = data.astype(np.float32) + + # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 + if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: + data = data.astype(np.float32) + + # if f16 desired, convert any float32 2-dim weight tensors to float16 + if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: + data = data.astype(np.float16) + + print(f"{name} -> {new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") + self.gguf_writer.add_tensor(new_name, data) + + class BaichuanModel(Model): def set_vocab(self): self._set_vocab_sentencepiece() @@ -817,10 +1016,17 @@ class PersimmonModel(Model): hidden_size = self.hparams["hidden_size"] self.gguf_writer.add_name('persimmon-8b-chat') + self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"]) self.gguf_writer.add_embedding_length(hidden_size) self.gguf_writer.add_block_count(block_count) self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) - self.gguf_writer.add_rope_dimension_count(hidden_size // head_count) + + # NOTE: not sure about this change - why does the model not have a rope dimension count when it is smaller + # than the head size? + # ref: https://github.com/ggerganov/llama.cpp/pull/4889 + # self.gguf_writer.add_rope_dimension_count(hidden_size // head_count) + self.gguf_writer.add_rope_dimension_count(hidden_size // head_count // 2) + self.gguf_writer.add_head_count(head_count) self.gguf_writer.add_head_count_kv(head_count_kv) self.gguf_writer.add_rope_freq_base(self.hparams["rope_theta"]) @@ -852,6 +1058,13 @@ class PersimmonModel(Model): class StableLMModel(Model): + def set_vocab(self): + if (self.dir_model / "tokenizer.json").is_file(): + self._set_vocab_gpt2() + else: + # StableLM 2 1.6B uses a vocab in a similar format to Qwen's vocab + self._set_vocab_qwen() + def set_gguf_parameters(self): hparams = self.hparams block_count = hparams["num_hidden_layers"] @@ -872,6 +1085,83 @@ class MixtralModel(Model): self._set_vocab_sentencepiece() +class MiniCPMModel(Model): + def set_gguf_parameters(self): + block_count = self.hparams["num_hidden_layers"] + self.gguf_writer.add_name("MiniCPM") + self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"]) + self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) + self.gguf_writer.add_block_count(block_count) + self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) + self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"]) + self.gguf_writer.add_head_count(self.hparams["num_attention_heads"]) + self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"]) + self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) + self.gguf_writer.add_file_type(self.ftype) + + def set_vocab(self): + self._set_vocab_hf() + + def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor: + if n_kv_head is not None and n_head != n_kv_head: + n_head //= n_kv_head + + return ( + weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:]) + .swapaxes(1, 2) + .reshape(weights.shape) + ) + + def write_tensors(self): + block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) + tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + n_head = self.hparams.get("num_attention_heads") + n_kv_head = self.hparams.get("num_key_value_heads") + for name, data_torch in self.get_tensors(): + # we don't need these + if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")): + continue + + old_dtype = data_torch.dtype + + # convert any unsupported data types to float32 + if data_torch.dtype not in (torch.float16, torch.float32): + data_torch = data_torch.to(torch.float32) + + # HF models permute some of the tensors, so we need to undo that + if name.endswith(("q_proj.weight")): + data_torch = self._reverse_hf_permute(data_torch, n_head, n_head) + if name.endswith(("k_proj.weight")): + data_torch = self._reverse_hf_permute(data_torch, n_head, n_kv_head) + + data = data_torch.squeeze().numpy() + + # map tensor names + new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) + if new_name is None: + print(f"Can not map tensor {name!r}") + sys.exit() + + n_dims = len(data.shape) + data_dtype = data.dtype + + # if f32 desired, convert any float16 to float32 + if self.ftype == 0 and data_dtype == np.float16: + data = data.astype(np.float32) + + # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 + if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: + data = data.astype(np.float32) + + # if f16 desired, convert any float32 2-dim weight tensors to float16 + if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: + data = data.astype(np.float16) + + print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") + + self.gguf_writer.add_tensor(new_name, data) + + class QwenModel(Model): @staticmethod def token_bytes_to_string(b): @@ -880,7 +1170,7 @@ class QwenModel(Model): return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')]) @staticmethod - def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: Optional[int] = None) -> list[bytes]: + def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]: parts = [bytes([b]) for b in token] while True: min_idx = None @@ -897,52 +1187,7 @@ class QwenModel(Model): return parts def set_vocab(self): - dir_model = self.dir_model - hparams = self.hparams - tokens: list[bytearray] = [] - toktypes: list[int] = [] - - from transformers import AutoTokenizer - tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True) - vocab_size = hparams["vocab_size"] - assert max(tokenizer.get_vocab().values()) < vocab_size - - merges = [] - vocab = {} - mergeable_ranks = tokenizer.mergeable_ranks - for token, rank in mergeable_ranks.items(): - vocab[self.token_bytes_to_string(token)] = rank - if len(token) == 1: - continue - merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank) - assert len(merged) == 2 - merges.append(' '.join(map(self.token_bytes_to_string, merged))) - - reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in vocab.items()} - added_vocab = tokenizer.special_tokens - - for i in range(vocab_size): - if i not in reverse_vocab: - pad_token = f"[PAD{i}]".encode("utf-8") - tokens.append(bytearray(pad_token)) - toktypes.append(gguf.TokenType.USER_DEFINED) - elif reverse_vocab[i] in added_vocab: - tokens.append(reverse_vocab[i]) - toktypes.append(gguf.TokenType.CONTROL) - else: - tokens.append(reverse_vocab[i]) - toktypes.append(gguf.TokenType.NORMAL) - - self.gguf_writer.add_tokenizer_model("gpt2") - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_types(toktypes) - - special_vocab = gguf.SpecialVocab(dir_model, load_merges=False) - special_vocab.merges = merges - special_vocab._set_special_token("bos", tokenizer.special_tokens["<|endoftext|>"]) - special_vocab._set_special_token("eos", tokenizer.special_tokens["<|endoftext|>"]) - special_vocab._set_special_token("unk", tokenizer.special_tokens["<|endoftext|>"]) - special_vocab.add_to_gguf(self.gguf_writer) + self._set_vocab_qwen() def set_gguf_parameters(self): self.gguf_writer.add_name("Qwen") @@ -1014,7 +1259,7 @@ class GPT2Model(Model): for name, data_torch in self.get_tensors(): # we don't need these - if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq", ".attn.bias")): + if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq", ".attn.bias", ".attn.masked_bias")): continue if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_proj.weight")): @@ -1061,17 +1306,22 @@ class GPT2Model(Model): class Phi2Model(Model): def set_gguf_parameters(self): - block_count = self.hparams["n_layer"] + block_count = self.find_hparam(["num_hidden_layers", "n_layer"]) + + rot_pct = self.find_hparam(["partial_rotary_factor"]) + n_embd = self.find_hparam(["hidden_size", "n_embd"]) + n_head = self.find_hparam(["num_attention_heads", "n_head"]) self.gguf_writer.add_name("Phi2") - self.gguf_writer.add_context_length(self.hparams["n_positions"]) - self.gguf_writer.add_embedding_length(self.hparams["n_embd"]) - self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"]) + self.gguf_writer.add_context_length(self.find_hparam(["n_positions", "max_position_embeddings"])) + + self.gguf_writer.add_embedding_length(n_embd) + self.gguf_writer.add_feed_forward_length(4 * n_embd) self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_head_count(self.hparams["n_head"]) - self.gguf_writer.add_head_count_kv(self.hparams["n_head"]) - self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) - self.gguf_writer.add_rope_dimension_count(self.hparams["rotary_dim"]) + self.gguf_writer.add_head_count(n_head) + self.gguf_writer.add_head_count_kv(n_head) + self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_epsilon", "layer_norm_eps"])) + self.gguf_writer.add_rope_dimension_count(int(rot_pct * n_embd) // n_head) self.gguf_writer.add_file_type(self.ftype) self.gguf_writer.add_add_bos_token(False) @@ -1155,6 +1405,386 @@ class PlamoModel(Model): self.gguf_writer.add_tensor(new_name, data) +class CodeShellModel(Model): + def set_gguf_parameters(self): + block_count = self.hparams["n_layer"] + + self.gguf_writer.add_name("CodeShell") + self.gguf_writer.add_context_length(self.hparams["n_positions"]) + self.gguf_writer.add_embedding_length(self.hparams["n_embd"]) + self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"]) + self.gguf_writer.add_block_count(block_count) + self.gguf_writer.add_head_count(self.hparams["n_head"]) + self.gguf_writer.add_head_count_kv(self.hparams["num_query_groups"]) + self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) + self.gguf_writer.add_file_type(self.ftype) + self.gguf_writer.add_rope_freq_base(10000.0) + self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) + self.gguf_writer.add_rope_scaling_factor(1.0) + + def write_tensors(self): + block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer"))) + tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + tensors = dict(self.get_tensors()) + has_lm_head = "lm_head.weight" in tensors.keys() or "output.weight" in tensors.keys() + for name, data_torch in tensors.items(): + # we don't need these + if name.endswith((".attn.rotary_emb.inv_freq")): + continue + + old_dtype = data_torch.dtype + + # convert any unsupported data types to float32 + if data_torch.dtype not in (torch.float16, torch.float32): + data_torch = data_torch.to(torch.float32) + + data = data_torch.squeeze().numpy() + + # map tensor names + new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) + if new_name is None: + print(f"Can not map tensor {name!r}") + sys.exit() + + n_dims = len(data.shape) + data_dtype = data.dtype + + # if f32 desired, convert any float16 to float32 + if self.ftype == 0 and data_dtype == np.float16: + data = data.astype(np.float32) + + # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 + if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: + data = data.astype(np.float32) + + # if f16 desired, convert any float32 2-dim weight tensors to float16 + if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: + data = data.astype(np.float16) + + print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") + + self.gguf_writer.add_tensor(new_name, data) + + if not has_lm_head and name == "transformer.wte.weight": + self.gguf_writer.add_tensor("output.weight", data) + print(name, f"=> output.weight, shape = {data.shape}, {old_dtype} --> {data.dtype}") + + +class InternLM2Model(Model): + def set_vocab(self): + # (TODO): Is there a better way? + # Copy from _set_vocab_sentencepiece, The only difference is that we will treat the character + # \x00 specially and convert it into an emoji character to prevent it from being mistakenly + # recognized as an empty string in C++. + from sentencepiece import SentencePieceProcessor + from sentencepiece import sentencepiece_model_pb2 as model + + tokenizer_path = self.dir_model / 'tokenizer.model' + + tokens: list[bytes] = [] + scores: list[float] = [] + toktypes: list[int] = [] + + if not tokenizer_path.is_file(): + print(f'Error: Missing {tokenizer_path}', file=sys.stderr) + sys.exit(1) + + sentencepiece_model = model.ModelProto() + sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read()) + add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix + + tokenizer = SentencePieceProcessor(str(tokenizer_path)) + vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) + + for token_id in range(vocab_size): + piece = tokenizer.id_to_piece(token_id) + text = piece.encode("utf-8") + score = tokenizer.get_score(token_id) + if text == b"\x00": + # (TODO): fixme + # Hack here and replace the \x00 characters. + print(f"InternLM2 convert token '{text}' to '🐉'!") + text = "🐉" + + toktype = SentencePieceTokenTypes.NORMAL + if tokenizer.is_unknown(token_id): + toktype = SentencePieceTokenTypes.UNKNOWN + elif tokenizer.is_control(token_id): + toktype = SentencePieceTokenTypes.CONTROL + elif tokenizer.is_unused(token_id): + toktype = SentencePieceTokenTypes.UNUSED + elif tokenizer.is_byte(token_id): + toktype = SentencePieceTokenTypes.BYTE + + tokens.append(text) + scores.append(score) + toktypes.append(toktype) + + added_tokens_file = self.dir_model / 'added_tokens.json' + if added_tokens_file.is_file(): + with open(added_tokens_file, "r", encoding="utf-8") as f: + added_tokens_json = json.load(f) + + for key in added_tokens_json: + tokens.append(key.encode("utf-8")) + scores.append(-1000.0) + toktypes.append(SentencePieceTokenTypes.USER_DEFINED) + + self.gguf_writer.add_tokenizer_model("llama") + self.gguf_writer.add_token_list(tokens) + self.gguf_writer.add_token_scores(scores) + self.gguf_writer.add_token_types(toktypes) + self.gguf_writer.add_add_space_prefix(add_prefix) + + special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) + old_eos = special_vocab.special_token_ids["eos"] + if "chat" in os.path.basename(self.dir_model.absolute()): + # For the chat model, we replace the eos with '<|im_end|>'. + special_vocab.special_token_ids["eos"] = self._try_get_sft_eos(tokenizer) + print(f"Replace eos:{old_eos} with a special token:{special_vocab.special_token_ids['eos']} \ +in chat mode so that the conversation can end normally.") + + special_vocab.add_to_gguf(self.gguf_writer) + + def _try_get_sft_eos(self, tokenizer): + unused_145_list = tokenizer.encode('[UNUSED_TOKEN_145]') + im_end_list = tokenizer.encode('<|im_end|>') + assert (len(unused_145_list) == 1) ^ (len(im_end_list) == 1) + if len(unused_145_list) == 1: + eos_token = unused_145_list[0] + if len(im_end_list) == 1: + eos_token = im_end_list[0] + return eos_token + + def _hf_permute_qk(self, weights, n_head: int, n_head_kv: int): + if n_head_kv is not None and n_head != n_head_kv: + n_head = n_head_kv + return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:]) + .swapaxes(1, 2) + .reshape(weights.shape)) + + def set_gguf_parameters(self): + self.gguf_writer.add_name("InternLM2") + self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"]) + self.gguf_writer.add_block_count(self.hparams["num_hidden_layers"]) + self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) + self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) + self.gguf_writer.add_rope_freq_base(self.hparams["rope_theta"]) + self.gguf_writer.add_head_count(self.hparams["num_attention_heads"]) + self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) + self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"]) + + def post_write_tensors(self, tensor_map, name, data_torch): + old_dtype = data_torch.dtype + + # convert any unsupported data types to float32 + if data_torch.dtype not in (torch.float16, torch.float32): + data_torch = data_torch.to(torch.float32) + + data = data_torch.squeeze().numpy() + + # map tensor names + new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) + if new_name is None: + print(f"Can not map tensor {name!r}") + sys.exit() + + n_dims = len(data.shape) + data_dtype = data.dtype + + # if f32 desired, convert any float16 to float32 + if self.ftype == 0 and data_dtype == np.float16: + data = data.astype(np.float32) + + # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 + if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1: + data = data.astype(np.float32) + + # if f16 desired, convert any float32 2-dim weight tensors to float16 + if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: + data = data.astype(np.float16) + + print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}") + self.gguf_writer.add_tensor(new_name, data) + + def write_tensors(self): + from einops import rearrange + + num_heads = self.hparams.get("num_attention_heads") + num_kv_heads = self.hparams.get("num_key_value_heads") + hidden_size = self.hparams.get("hidden_size") + q_per_kv = num_heads // num_kv_heads + head_dim = hidden_size // num_heads + num_groups = num_heads // q_per_kv + + block_count = self.hparams["num_hidden_layers"] + model_kv = dict(self.get_tensors()) + tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count) + qkv_pattern = r"model\.layers\.(\d+)\.attention\.wqkv" + for name, data_torch in model_kv.items(): + # we don't need these + if name.endswith(".rotary_emb.inv_freq"): + continue + + if re.match(qkv_pattern, name): + bid = re.findall(qkv_pattern, name)[0] + qkv = data_torch + qkv = rearrange(qkv.T, " o (g n i) ->o g n i", g=num_groups, n=q_per_kv + 2, i=head_dim) + q, k, v = qkv[..., : q_per_kv, :], qkv[..., q_per_kv: q_per_kv + 1, :], qkv[..., q_per_kv + 1: q_per_kv + 2, :] + # The model weights of q and k equire additional reshape. + q = self._hf_permute_qk(rearrange(q, " o g n i -> o (g n i)").T, num_heads, num_heads) + k = self._hf_permute_qk(rearrange(k, " o g n i -> o (g n i)").T, num_heads, num_kv_heads) + v = rearrange(v, " o g n i -> o (g n i)").T + self.post_write_tensors(tensor_map, f"model.layers.{bid}.attention.wq.weight", q) + self.post_write_tensors(tensor_map, f"model.layers.{bid}.attention.wk.weight", k) + self.post_write_tensors(tensor_map, f"model.layers.{bid}.attention.wv.weight", v) + else: + self.post_write_tensors(tensor_map, name, data_torch) + + +class BertModel(Model): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.vocab_size = None + + def set_gguf_parameters(self): + super().set_gguf_parameters() + self.gguf_writer.add_causal_attention(False) + + # get pooling path + with open(self.dir_model / "modules.json", encoding="utf-8") as f: + modules = json.load(f) + pooling_path = None + for mod in modules: + if mod["type"] == "sentence_transformers.models.Pooling": + pooling_path = mod["path"] + break + + # get pooling type + pooling_type = gguf.PoolingType.NONE + if pooling_path is not None: + with open(self.dir_model / pooling_path / "config.json", encoding="utf-8") as f: + pooling = json.load(f) + if pooling["pooling_mode_mean_tokens"]: + pooling_type = gguf.PoolingType.MEAN + elif pooling["pooling_mode_cls_token"]: + pooling_type = gguf.PoolingType.CLS + else: + raise NotImplementedError("Only MEAN and CLS pooling types supported") + + self.gguf_writer.add_pooling_type(pooling_type.value) + + def set_vocab(self): + path = self.dir_model + added_tokens_path = self.dir_model if self.dir_model.exists() else None + + # use huggingface vocab to get all tokens + vocab = HfVocab(path, added_tokens_path) + tokens, scores, toktypes = zip(*vocab.all_tokens()) + assert len(tokens) == vocab.vocab_size + self.vocab_size = vocab.vocab_size + + # we need this to validate the size of the token_type embeddings + # though currently we are passing all zeros to the token_type embeddings + n_token_types = len(set(toktypes)) + self.gguf_writer.add_token_type_count(n_token_types) + + # convert to phantom space vocab + def phantom(tok, typ): + if tok.startswith(b"[") and tok.endswith(b"]"): + return tok + if tok.startswith(b"##"): + return tok[2:] + return b"\xe2\x96\x81" + tok + tokens = tuple(phantom(t, y) for t, y in zip(tokens, toktypes)) + + # set up bos and eos tokens (cls and sep) + self.gguf_writer.add_bos_token_id(vocab.tokenizer.cls_token_id) + self.gguf_writer.add_eos_token_id(vocab.tokenizer.sep_token_id) + + # add vocab to gguf + self.gguf_writer.add_tokenizer_model("bert") + self.gguf_writer.add_token_list(tokens) + self.gguf_writer.add_token_scores(scores) + self.gguf_writer.add_token_types(toktypes) + + # handle special tokens + special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) + special_vocab.add_to_gguf(self.gguf_writer) + + def write_tensors(self): + tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count) + tensors = dict(self.get_tensors()) + for name, data_torch in tensors.items(): + # we are only using BERT for embeddings so we don't need the pooling layer + if name in ("embeddings.position_ids", "pooler.dense.weight", "pooler.dense.bias"): + continue # we don't need these + + # map tensor names + new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias")) + if new_name is None: + print(f"Can not map tensor {name!r}") + sys.exit() + + data = data_torch.squeeze().numpy() + n_dims = len(data.shape) + new_dtype: type[np.floating[Any]] + + if ( + self.ftype == 1 and name.endswith(".weight") and n_dims == 2 + and name != "embeddings.token_type_embeddings.weight" # not used with get_rows, must be F32 + ): + # if f16 desired, convert any float32 2-dim weight tensors to float16 + new_dtype = np.float16 + else: + # if f32 desired, convert any float16 to float32 + new_dtype = np.float32 + + print(f"{new_name}, n_dims = {n_dims}, {data_torch.dtype} --> {new_dtype}") + + if data.dtype != new_dtype: + data = data.astype(new_dtype) + + self.gguf_writer.add_tensor(new_name, data) + + +class NomicBertModel(BertModel): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # the HF config claims n_ctx=8192, but it uses RoPE scaling + self.hparams["n_ctx"] = 2048 + + # SwigLU activation + assert self.hparams["activation_function"] == "swiglu" + # this doesn't do anything in the HF version + assert self.hparams["causal"] is False + # no bias tensors + assert self.hparams["qkv_proj_bias"] is False + assert self.hparams["mlp_fc1_bias"] is False + assert self.hparams["mlp_fc2_bias"] is False + # norm at end of layer + assert self.hparams["prenorm"] is False + # standard RoPE + assert self.hparams["rotary_emb_fraction"] == 1.0 + assert self.hparams["rotary_emb_interleaved"] is False + assert self.hparams["rotary_emb_scale_base"] is None + + def set_gguf_parameters(self): + super().set_gguf_parameters() + self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"]) + + def get_tensors(self): + assert self.vocab_size is not None + for name, data in super().get_tensors(): + # Nomic Embed's token embeddings tensor is padded, but llama.cpp wants tensor sizes to match exactly. + if name == 'embeddings.word_embeddings.weight' and data.shape[1] != self.vocab_size: + rounded_vocab_size = (self.vocab_size + 63) // 64 * 64 + assert data.shape == (rounded_vocab_size, self.hparams["n_embd"]) + data = data[:self.vocab_size, :] + yield name, data + + ###### CONVERSION LOGIC ###### @@ -1192,7 +1822,7 @@ def main() -> None: if args.awq_path: sys.path.insert(1, str(Path(__file__).parent / 'awq-py')) - from awq.apply_awq import add_scale_weights + from awq.apply_awq import add_scale_weights # type: ignore[import-not-found] tmp_model_path = args.model / "weighted_model" dir_model = tmp_model_path if tmp_model_path.is_dir(): diff --git a/convert-llama-ggml-to-gguf.py b/convert-llama-ggml-to-gguf.py index e359330af..b33108062 100755 --- a/convert-llama-ggml-to-gguf.py +++ b/convert-llama-ggml-to-gguf.py @@ -2,6 +2,7 @@ from __future__ import annotations import argparse +import os import struct import sys from enum import IntEnum @@ -9,7 +10,6 @@ from pathlib import Path import numpy as np -import os if 'NO_LOCAL_GGUF' not in os.environ: sys.path.insert(1, str(Path(__file__).parent / 'gguf-py')) import gguf @@ -371,15 +371,11 @@ def handle_metadata(cfg, hp): params = convert.Params.loadOriginalParamsJson(fakemodel, orig_config_path) else: raise ValueError('Unable to load metadata') - vocab = convert.load_vocab( - cfg.vocab_dir if cfg.vocab_dir is not None else cfg.model_metadata_dir, - cfg.vocabtype) - # FIXME: Respect cfg.vocab_dir? - svocab = gguf.SpecialVocab(cfg.model_metadata_dir, - load_merges = cfg.vocabtype == 'bpe', - n_vocab = vocab.vocab_size) + vocab_path = Path(cfg.vocab_dir if cfg.vocab_dir is not None else cfg.model_metadata_dir) + vocab_factory = convert.VocabFactory(vocab_path) + vocab, special_vocab = vocab_factory.load_vocab(cfg.vocabtype, cfg.model_metadata_dir) convert.check_vocab_size(params, vocab) - return (params, vocab, svocab) + return params, vocab, special_vocab def handle_args(): diff --git a/convert-lora-to-ggml.py b/convert-lora-to-ggml.py index 35ce152f4..9a9936dec 100755 --- a/convert-lora-to-ggml.py +++ b/convert-lora-to-ggml.py @@ -5,17 +5,16 @@ import json import os import struct import sys +from pathlib import Path from typing import Any, BinaryIO, Sequence import numpy as np import torch -from pathlib import Path if 'NO_LOCAL_GGUF' not in os.environ: sys.path.insert(1, str(Path(__file__).parent / 'gguf-py' / 'gguf')) import gguf - NUMPY_TYPE_TO_FTYPE: dict[str, int] = {"float32": 0, "float16": 1} @@ -60,7 +59,14 @@ if __name__ == '__main__': input_model = os.path.join(sys.argv[1], "adapter_model.bin") output_path = os.path.join(sys.argv[1], "ggml-adapter-model.bin") - model = torch.load(input_model, map_location="cpu") + if os.path.exists(input_model): + model = torch.load(input_model, map_location="cpu") + else: + input_model = os.path.join(sys.argv[1], "adapter_model.safetensors") + # lazy import load_file only if lora is in safetensors format. + from safetensors.torch import load_file + model = load_file(input_model, device="cpu") + arch_name = sys.argv[2] if len(sys.argv) == 3 else "llama" if arch_name not in gguf.MODEL_ARCH_NAMES.values(): diff --git a/convert-persimmon-to-gguf.py b/convert-persimmon-to-gguf.py index 1ba5864dc..def210531 100755 --- a/convert-persimmon-to-gguf.py +++ b/convert-persimmon-to-gguf.py @@ -1,11 +1,13 @@ #!/usr/bin/env python3 -import torch -import os -from pprint import pprint -import sys import argparse +import os +import sys from pathlib import Path +from pprint import pprint + +import torch from sentencepiece import SentencePieceProcessor + if 'NO_LOCAL_GGUF' not in os.environ: sys.path.insert(1, str(Path(__file__).parent / 'gguf-py')) import gguf @@ -69,7 +71,7 @@ def main(): persimmon_model = torch.load(args.ckpt_path) hparams = persimmon_model['args'] pprint(hparams) - tensors = {} + tensors: dict[str, torch.Tensor] = {} _flatten_dict(persimmon_model['model'], tensors, None) arch = gguf.MODEL_ARCH.PERSIMMON @@ -86,7 +88,8 @@ def main(): gguf_writer.add_embedding_length(hidden_size) gguf_writer.add_block_count(block_count) gguf_writer.add_feed_forward_length(hparams.ffn_hidden_size) - gguf_writer.add_rope_dimension_count(hidden_size // head_count) + # ref: https://github.com/ggerganov/llama.cpp/pull/4889/commits/eea19039fc52ea2dbd1aab45b59ab4e3e29a3443 + gguf_writer.add_rope_dimension_count(hidden_size // head_count // 2) gguf_writer.add_head_count(head_count) gguf_writer.add_head_count_kv(head_count_kv) gguf_writer.add_rope_freq_base(hparams.rotary_emb_base) diff --git a/convert.py b/convert.py index 3b613eefc..63a0a5d78 100755 --- a/convert.py +++ b/convert.py @@ -17,58 +17,28 @@ import signal import struct import sys import time -import warnings import zipfile from abc import ABCMeta, abstractmethod -from argparse import ArgumentParser from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor from dataclasses import dataclass from pathlib import Path -from typing import ( - IO, - TYPE_CHECKING, - Any, - Callable, - Iterable, - Literal, - Optional, - Tuple, - TypeVar, -) +from typing import IO, TYPE_CHECKING, Any, Callable, Iterable, Literal, TypeVar import numpy as np from sentencepiece import SentencePieceProcessor -try: - from transformers import AutoTokenizer -except ModuleNotFoundError as e: - warnings.warn(f"Could not import AutoTokenizer from transformers: {e}") +if 'NO_LOCAL_GGUF' not in os.environ: + sys.path.insert(1, str(Path(__file__).parent / 'gguf-py')) +import gguf -# If NO_LOCAL_GGUF is not set, try to import gguf from the local gguf-py directory -if "NO_LOCAL_GGUF" not in os.environ: - # Use absolute path to the gguf-py directory - gguf_py_dir = str(Path(__file__).resolve().parent / "gguf-py") - print(gguf_py_dir) # NOTE: Remove this once path is verified after changes are completed - if gguf_py_dir not in sys.path: - sys.path.insert(1, gguf_py_dir) +if TYPE_CHECKING: + from typing import TypeAlias -# Import gguf module -try: - import gguf -except ModuleNotFoundError as e: - print(f"Could not import gguf: {e}") - sys.exit(1) - -if TYPE_CHECKING: # NOTE: This isn't necessary. - from typing import TypeAlias # This can technically be omitted. - -if hasattr(faulthandler, "register") and hasattr(signal, "SIGUSR1"): +if hasattr(faulthandler, 'register') and hasattr(signal, 'SIGUSR1'): faulthandler.register(signal.SIGUSR1) -# NOTE: n-dimensional arrays should be directly referenced -NDArray: TypeAlias = "np.ndarray[Any, Any]" +NDArray: TypeAlias = 'np.ndarray[Any, Any]' -# Why is this here? LLAMA and GPT are technically the only compatible ARCHs. ARCH = gguf.MODEL_ARCH.LLAMA DEFAULT_CONCURRENCY = 8 @@ -78,7 +48,6 @@ DEFAULT_CONCURRENCY = 8 # -# TODO: Clean up and refactor data types @dataclass(frozen=True) class DataType: name: str @@ -183,85 +152,65 @@ GGML_FILE_TYPE_TO_DATA_TYPE: dict[GGMLFileType, DataType] = { @dataclass class Params: - n_vocab: int - n_embd: int - n_layer: int - n_ctx: int - n_ff: int - n_head: int - n_head_kv: int - f_norm_eps: Optional[float] = None - n_experts: Optional[int] = None - n_experts_used: Optional[int] = None + n_vocab: int + n_embd: int + n_layer: int + n_ctx: int + n_ff: int + n_head: int + n_head_kv: int + n_experts: int | None = None + n_experts_used: int | None = None + f_norm_eps: float | None = None - rope_scaling_type: Optional[gguf.RopeScalingType] = None - f_rope_freq_base: Optional[float] = None - f_rope_scale: Optional[float] = None - n_orig_ctx: Optional[int] = None - rope_finetuned: Optional[bool] = None + rope_scaling_type: gguf.RopeScalingType | None = None + f_rope_freq_base: float | None = None + f_rope_scale: float | None = None + n_orig_ctx: int | None = None + rope_finetuned: bool | None = None - ftype: Optional[GGMLFileType] = None + ftype: GGMLFileType | None = None # path to the directory containing the model files - path_model: Optional[Path] = None + path_model: Path | None = None @staticmethod - def guessed(model: LazyModel) -> "Params": + def guessed(model: LazyModel) -> Params: # try transformer naming first - n_vocab, n_embd = ( - model["model.embed_tokens.weight"].shape - if "model.embed_tokens.weight" in model - else model["tok_embeddings.weight"].shape - ) + n_vocab, n_embd = model["model.embed_tokens.weight"].shape if "model.embed_tokens.weight" in model else model["tok_embeddings.weight"].shape # try transformer naming first if "model.layers.0.self_attn.q_proj.weight" in model: - n_layer = next( - i - for i in itertools.count() - if f"model.layers.{i}.self_attn.q_proj.weight" not in model - ) - elif ( - "model.layers.0.self_attn.W_pack.weight" in model - ): # next: try baichuan naming - n_layer = next( - i - for i in itertools.count() - if f"model.layers.{i}.self_attn.W_pack.weight" not in model - ) + n_layer = next(i for i in itertools.count() if f"model.layers.{i}.self_attn.q_proj.weight" not in model) + elif "model.layers.0.self_attn.W_pack.weight" in model: # next: try baichuan naming + n_layer = next(i for i in itertools.count() if f"model.layers.{i}.self_attn.W_pack.weight" not in model) else: - n_layer = next( - i - for i in itertools.count() - if f"layers.{i}.attention.wq.weight" not in model - ) + n_layer = next(i for i in itertools.count() if f"layers.{i}.attention.wq.weight" not in model) if n_layer < 1: - raise Exception( - "failed to guess 'n_layer'. This model is unknown or unsupported.\n" - "Suggestion: provide 'config.json' of the model in the same directory containing model files." - ) + raise Exception("failed to guess 'n_layer'. This model is unknown or unsupported.\n" + "Suggestion: provide 'config.json' of the model in the same directory containing model files.") - n_head = n_embd // 128 # guessed - n_mult = 256 # guessed + n_head = n_embd // 128 # guessed + n_mult = 256 # guessed # TODO: verify this n_ff = int(2 * (4 * n_embd) / 3) n_ff = n_mult * ((n_ff + n_mult - 1) // n_mult) return Params( - n_vocab=n_vocab, - n_embd=n_embd, - n_layer=n_layer, - n_ctx=-1, - n_ff=n_ff, - n_head=n_head, - n_head_kv=n_head, - f_norm_eps=1e-5, + n_vocab = n_vocab, + n_embd = n_embd, + n_layer = n_layer, + n_ctx = -1, + n_ff = n_ff, + n_head = n_head, + n_head_kv = n_head, + f_norm_eps = 1e-5, ) @staticmethod - def load_transformers_config(model: LazyModel, config_path: Path) -> "Params": + def loadHFTransformerJson(model: LazyModel, config_path: Path) -> Params: config = json.load(open(config_path)) rope_scaling_type = f_rope_scale = n_orig_ctx = rope_finetuned = None @@ -274,22 +223,20 @@ class Params: rope_scaling_type = gguf.RopeScalingType.LINEAR elif typ == "yarn": rope_scaling_type = gguf.RopeScalingType.YARN - n_orig_ctx = rope_scaling["original_max_position_embeddings"] - rope_finetuned = rope_scaling["finetuned"] + n_orig_ctx = rope_scaling['original_max_position_embeddings'] + rope_finetuned = rope_scaling['finetuned'] else: - raise NotImplementedError(f"Unknown rope scaling type: {typ}") + raise NotImplementedError(f'Unknown rope scaling type: {typ}') if "max_sequence_length" in config: n_ctx = config["max_sequence_length"] elif "max_position_embeddings" in config: n_ctx = config["max_position_embeddings"] else: - raise Exception( - "failed to guess 'n_ctx'. This model is unknown or unsupported.\n" - "Suggestion: provide 'config.json' of the model in the same directory containing model files." - ) + raise Exception("failed to guess 'n_ctx'. This model is unknown or unsupported.\n" + "Suggestion: provide 'config.json' of the model in the same directory containing model files.") - n_experts = None + n_experts = None n_experts_used = None if "num_local_experts" in config: @@ -297,30 +244,30 @@ class Params: n_experts_used = config["num_experts_per_tok"] return Params( - n_vocab=config["vocab_size"], - n_embd=config["hidden_size"], - n_layer=config["num_hidden_layers"], - n_ctx=n_ctx, - n_ff=config["intermediate_size"], - n_head=(n_head := config["num_attention_heads"]), - n_head_kv=config.get("num_key_value_heads", n_head), - n_experts=n_experts, - n_experts_used=n_experts_used, - f_norm_eps=config["rms_norm_eps"], - f_rope_freq_base=config.get("rope_theta"), - rope_scaling_type=rope_scaling_type, - f_rope_scale=f_rope_scale, - n_orig_ctx=n_orig_ctx, - rope_finetuned=rope_finetuned, + n_vocab = config["vocab_size"], + n_embd = config["hidden_size"], + n_layer = config["num_hidden_layers"], + n_ctx = n_ctx, + n_ff = config["intermediate_size"], + n_head = (n_head := config["num_attention_heads"]), + n_head_kv = config.get("num_key_value_heads", n_head), + n_experts = n_experts, + n_experts_used = n_experts_used, + f_norm_eps = config["rms_norm_eps"], + f_rope_freq_base = config.get("rope_theta"), + rope_scaling_type = rope_scaling_type, + f_rope_scale = f_rope_scale, + n_orig_ctx = n_orig_ctx, + rope_finetuned = rope_finetuned, ) # LLaMA v2 70B params.json # {"dim": 8192, "multiple_of": 4096, "ffn_dim_multiplier": 1.3, "n_heads": 64, "n_kv_heads": 8, "n_layers": 80, "norm_eps": 1e-05, "vocab_size": -1} @staticmethod - def load_torch_params(model: LazyModel, config_path: Path) -> "Params": + def loadOriginalParamsJson(model: LazyModel, config_path: Path) -> Params: config = json.load(open(config_path)) - n_experts = None + n_experts = None n_experts_used = None f_rope_freq_base = None @@ -343,89 +290,90 @@ class Params: if config.get("moe"): n_ff = model["layers.0.feed_forward.experts.0.w1.weight"].shape[0] - n_experts = config["moe"]["num_experts"] + n_experts = config["moe"]["num_experts"] n_experts_used = config["moe"]["num_experts_per_tok"] f_rope_freq_base = 1e6 return Params( - n_vocab=config.get("vocab_size", model["tok_embeddings.weight"].shape[0]), - n_embd=config["dim"], - n_layer=config["n_layers"], - n_ctx=n_ctx, - n_ff=n_ff, - n_head=(n_head := config["n_heads"]), - n_head_kv=config.get("n_kv_heads", n_head), - n_experts=n_experts, - n_experts_used=n_experts_used, - f_norm_eps=config["norm_eps"], - f_rope_freq_base=config.get("rope_theta", f_rope_freq_base), + n_vocab = model["tok_embeddings.weight"].shape[0], + n_embd = config["dim"], + n_layer = config["n_layers"], + n_ctx = n_ctx, + n_ff = n_ff, + n_head = (n_head := config["n_heads"]), + n_head_kv = config.get("n_kv_heads", n_head), + n_experts = n_experts, + n_experts_used = n_experts_used, + f_norm_eps = config["norm_eps"], + f_rope_freq_base = config.get("rope_theta", f_rope_freq_base), ) @staticmethod - def load(model_plus: ModelPlus) -> "Params": - hf_config_path = model_plus.paths[0].parent / "config.json" + def load(model_plus: ModelPlus) -> Params: + hf_config_path = model_plus.paths[0].parent / "config.json" orig_config_path = model_plus.paths[0].parent / "params.json" if hf_config_path.exists(): - params = Params.load_transformers_config(model_plus.model, hf_config_path) + params = Params.loadHFTransformerJson(model_plus.model, hf_config_path) elif orig_config_path.exists(): - params = Params.load_torch_params(model_plus.model, orig_config_path) - elif model_plus.format != "none": + params = Params.loadOriginalParamsJson(model_plus.model, orig_config_path) + elif model_plus.format != 'none': params = Params.guessed(model_plus.model) else: - raise ValueError("Cannot guess params when model format is none") + raise ValueError('Cannot guess params when model format is none') params.path_model = model_plus.paths[0].parent return params -class BpeVocab: # GPT - def __init__( - self, fname_tokenizer: Path, fname_added_tokens: Optional[Path] - ) -> None: - self.bpe_tokenizer = json.loads( - open(str(fname_tokenizer), encoding="utf-8").read() - ) +# +# vocab +# + +class BpeVocab: + def __init__(self, fname_tokenizer: Path, fname_added_tokens: Path | None) -> None: + self.bpe_tokenizer = json.loads(open(str(fname_tokenizer), encoding="utf-8").read()) + if isinstance(self.bpe_tokenizer.get('model'), dict): + self.vocab = self.bpe_tokenizer["model"]["vocab"] + else: + self.vocab = self.bpe_tokenizer added_tokens: dict[str, int] if fname_added_tokens is not None: # FIXME: Verify that added tokens here _cannot_ overlap with the main vocab. added_tokens = json.load(open(fname_added_tokens, encoding="utf-8")) else: # Fall back to trying to find the added tokens in tokenizer.json - tokenizer_json_file = fname_tokenizer.parent / "tokenizer.json" + tokenizer_json_file = fname_tokenizer.parent / 'tokenizer.json' if not tokenizer_json_file.is_file(): added_tokens = {} else: tokenizer_json = json.load(open(tokenizer_json_file, encoding="utf-8")) added_tokens = dict( - (item["content"], item["id"]) - for item in tokenizer_json.get("added_tokens", []) + (item['content'], item['id']) + for item in tokenizer_json.get('added_tokens', []) # Added tokens here can be duplicates of the main vocabulary. - if item["content"] not in self.bpe_tokenizer - ) + if item['content'] not in self.bpe_tokenizer) - vocab_size: int = len(self.bpe_tokenizer) - expected_ids = list(range(vocab_size, vocab_size + len(added_tokens))) - actual_ids = sorted(added_tokens.values()) + vocab_size: int = len(self.vocab) + expected_ids = list(range(vocab_size, vocab_size + len(added_tokens))) + actual_ids = sorted(added_tokens.values()) if expected_ids != actual_ids: expected_end_id = vocab_size + len(actual_ids) - 1 - raise Exception( - f"Expected the {len(actual_ids)} added token ID(s) to be sequential in the range {vocab_size} - {expected_end_id}; got {actual_ids}" - ) + raise Exception(f"Expected the {len(actual_ids)} added token ID(s) to be sequential in the range {vocab_size} - {expected_end_id}; got {actual_ids}") items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1]) - self.added_tokens_list = [text for (text, idx) in items] + self.added_tokens_dict = added_tokens + self.added_tokens_list = [text for (text, idx) in items] self.vocab_size_base: int = vocab_size - self.vocab_size: int = self.vocab_size_base + len(self.added_tokens_list) - self.fname_tokenizer = fname_tokenizer - self.fname_added_tokens = fname_added_tokens + self.vocab_size: int = self.vocab_size_base + len(self.added_tokens_list) + self.fname_tokenizer = fname_tokenizer + self.fname_added_tokens = fname_added_tokens def bpe_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: - tokenizer = self.bpe_tokenizer - reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.items()} + reverse_vocab = {id: encoded_tok for encoded_tok, id in self.vocab.items()} - for i, _ in enumerate(tokenizer): + for i, _ in enumerate(self.vocab): yield reverse_vocab[i], 0.0, gguf.TokenType.NORMAL def added_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: @@ -441,10 +389,8 @@ class BpeVocab: # GPT return f"" -class SentencePieceVocab: # LlaMa - def __init__( - self, fname_tokenizer: Path, fname_added_tokens: Optional[Path] - ) -> None: +class SentencePieceVocab: + def __init__(self, fname_tokenizer: Path, fname_added_tokens: Path | None) -> None: self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer)) added_tokens: dict[str, int] if fname_added_tokens is not None: @@ -454,22 +400,19 @@ class SentencePieceVocab: # LlaMa vocab_size: int = self.sentencepiece_tokenizer.vocab_size() - new_tokens = { - id: piece for piece, id in added_tokens.items() if id >= vocab_size - } + new_tokens = {id: piece for piece, id in added_tokens.items() if id >= vocab_size} expected_new_ids = list(range(vocab_size, vocab_size + len(new_tokens))) - actual_new_ids = sorted(new_tokens.keys()) + actual_new_ids = sorted(new_tokens.keys()) if expected_new_ids != actual_new_ids: - raise ValueError( - f"Expected new token IDs {expected_new_ids} to be sequential; got {actual_new_ids}" - ) + raise ValueError(f"Expected new token IDs {expected_new_ids} to be sequential; got {actual_new_ids}") # Token pieces that were added to the base vocabulary. - self.added_tokens_list = [new_tokens[id] for id in actual_new_ids] - self.vocab_size_base = vocab_size - self.vocab_size = self.vocab_size_base + len(self.added_tokens_list) - self.fname_tokenizer = fname_tokenizer + self.added_tokens_dict = added_tokens + self.added_tokens_list = [new_tokens[id] for id in actual_new_ids] + self.vocab_size_base = vocab_size + self.vocab_size = self.vocab_size_base + len(self.added_tokens_list) + self.fname_tokenizer = fname_tokenizer self.fname_added_tokens = fname_added_tokens def sentencepiece_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: @@ -510,11 +453,15 @@ class SentencePieceVocab: # LlaMa class HfVocab: - def __init__( - self, - fname_tokenizer: Path, - fname_added_tokens: Optional[Path] = None, - ) -> None: + def __init__(self, fname_tokenizer: Path, fname_added_tokens: Path | None = None) -> None: + try: + from transformers import AutoTokenizer + except ImportError as e: + raise ImportError( + "To use HfVocab, please install the `transformers` package. " + "You can install it with `pip install transformers`." + ) from e + print("fname_tokenizer:", fname_tokenizer) # Allow the tokenizer to default to slow or fast versions. # Explicitly set tokenizer to use local paths. @@ -527,7 +474,7 @@ class HfVocab: # Initialize lists and dictionaries for added tokens self.added_tokens_list = [] self.added_tokens_dict = dict() - self.added_tokens_ids = set() + self.added_tokens_ids = set() # Process added tokens for tok, tokidx in sorted( @@ -548,12 +495,12 @@ class HfVocab: # Set vocabulary sizes self.vocab_size_base = self.tokenizer.vocab_size - self.vocab_size = self.vocab_size_base + len(self.added_tokens_list) + self.vocab_size = self.vocab_size_base + len(self.added_tokens_list) - self.fname_tokenizer = fname_tokenizer + self.fname_tokenizer = fname_tokenizer self.fname_added_tokens = fname_added_tokens - def hf_tokens(self) -> Iterable[Tuple[bytes, float, gguf.TokenType]]: + def hf_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: reverse_vocab = { id: encoded_tok for encoded_tok, id in self.tokenizer.get_vocab().items() } @@ -568,14 +515,16 @@ class HfVocab: # Yield token text, score, and type yield token_text, self.get_token_score(token_id), self.get_token_type( - token_id, self.special_ids # Reuse already stored special IDs + token_id, token_text, self.special_ids # Reuse already stored special IDs ) - def get_token_type(self, token_id: int, special_ids: set) -> gguf.TokenType: + def get_token_type(self, token_id: int, token_text: bytes, special_ids: set[int]) -> gguf.TokenType: + # Special case for byte tokens + if re.fullmatch(br"<0x[0-9A-Fa-f]{2}>", token_text): + return gguf.TokenType.BYTE + # Determine token type based on whether it's a special token - return ( - gguf.TokenType.CONTROL if token_id in special_ids else gguf.TokenType.NORMAL - ) + return gguf.TokenType.CONTROL if token_id in special_ids else gguf.TokenType.NORMAL def get_token_score(self, token_id: int) -> float: # Placeholder for actual logic to determine the token's score @@ -585,9 +534,8 @@ class HfVocab: def added_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: for text in self.added_tokens_list: if text in self.specials: - toktype = self.get_token_type(self.specials[text], self.special_ids) + toktype = self.get_token_type(self.specials[text], b'', self.special_ids) score = self.get_token_score(self.specials[text]) - else: toktype = gguf.TokenType.USER_DEFINED score = -1000.0 @@ -781,7 +729,7 @@ def merge_multifile_models(models_plus: list[ModelPlus]) -> ModelPlus: else: model = merge_sharded([mp.model for mp in models_plus]) - return ModelPlus(model, paths, format, vocab) + return ModelPlus(model, paths, format, vocab) # pytype: disable=wrong-arg-types def permute_lazy(lazy_tensor: LazyTensor, n_head: int, n_head_kv: int) -> LazyTensor: @@ -869,17 +817,13 @@ class LazyUnpickler(pickle.Unpickler): CLASSES: dict[tuple[str, str], Any] = { # getattr used here as a workaround for mypy not being smart enough to determine # the staticmethods have a __func__ attribute. - ("torch._tensor", "_rebuild_from_type_v2"): getattr( - rebuild_from_type_v2, "__func__" - ), - ("torch._utils", "_rebuild_tensor_v2"): getattr( - lazy_rebuild_tensor_v2, "__func__" - ), - ("torch", "BFloat16Storage"): LazyStorageKind(DT_BF16), - ("torch", "HalfStorage"): LazyStorageKind(DT_F16), - ("torch", "FloatStorage"): LazyStorageKind(DT_F32), - ("torch", "IntStorage"): LazyStorageKind(DT_I32), - ("torch", "Tensor"): LazyTensor, + ('torch._tensor', '_rebuild_from_type_v2'): getattr(rebuild_from_type_v2, '__func__'), + ('torch._utils', '_rebuild_tensor_v2'): getattr(lazy_rebuild_tensor_v2, '__func__'), + ('torch', 'BFloat16Storage'): LazyStorageKind(DT_BF16), + ('torch', 'HalfStorage'): LazyStorageKind(DT_F16), + ('torch', 'FloatStorage'): LazyStorageKind(DT_F32), + ('torch', 'IntStorage'): LazyStorageKind(DT_I32), + ('torch', 'Tensor'): LazyTensor, } def find_class(self, module: str, name: str) -> Any: @@ -966,7 +910,7 @@ def bounded_parallel_map(func: Callable[[In], Out], iterable: Iterable[In], conc executor_class = ProcessPoolExecutor else: executor_class = ThreadPoolExecutor - with executor_class(max_workers = max_workers) as executor: + with executor_class(max_workers=max_workers) as executor: futures: list[concurrent.futures.Future[Out]] = [] done = False for _ in range(concurrency): @@ -1006,6 +950,7 @@ def check_vocab_size(params: Params, vocab: Vocab, pad_vocab: bool = False) -> N ) for i in range(1, pad_count + 1): vocab.added_tokens_dict[f""] = -1 + vocab.added_tokens_list.append(f"") vocab.vocab_size = params.n_vocab return @@ -1019,12 +964,8 @@ def check_vocab_size(params: Params, vocab: Vocab, pad_vocab: bool = False) -> N class OutputFile: - def __init__( - self, fname_out: Path, endianess: gguf.GGUFEndian = gguf.GGUFEndian.LITTLE - ) -> None: - self.gguf = gguf.GGUFWriter( - fname_out, gguf.MODEL_ARCH_NAMES[ARCH], endianess=endianess - ) + def __init__(self, fname_out: Path, endianess:gguf.GGUFEndian = gguf.GGUFEndian.LITTLE) -> None: + self.gguf = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH], endianess=endianess) def add_meta_arch(self, params: Params) -> None: name = "LLaMA" @@ -1033,21 +974,16 @@ class OutputFile: if params.n_ctx == 4096: name = "LLaMA v2" elif params.path_model is not None: - name = str(params.path_model.parent).split("/")[-1] + name = str(params.path_model.parent).split('/')[-1] - self.gguf.add_name(name) - self.gguf.add_context_length(params.n_ctx) - self.gguf.add_embedding_length(params.n_embd) - self.gguf.add_block_count(params.n_layer) - self.gguf.add_feed_forward_length(params.n_ff) + self.gguf.add_name (name) + self.gguf.add_context_length (params.n_ctx) + self.gguf.add_embedding_length (params.n_embd) + self.gguf.add_block_count (params.n_layer) + self.gguf.add_feed_forward_length (params.n_ff) self.gguf.add_rope_dimension_count(params.n_embd // params.n_head) - self.gguf.add_head_count(params.n_head) - self.gguf.add_head_count_kv(params.n_head_kv) - - if params.f_norm_eps is None: - raise ValueError("f_norm_eps is None") - - self.gguf.add_layer_norm_rms_eps(params.f_norm_eps) + self.gguf.add_head_count (params.n_head) + self.gguf.add_head_count_kv (params.n_head_kv) if params.n_experts: self.gguf.add_expert_count(params.n_experts) @@ -1055,6 +991,11 @@ class OutputFile: if params.n_experts_used: self.gguf.add_expert_used_count(params.n_experts_used) + if params.f_norm_eps: + self.gguf.add_layer_norm_rms_eps(params.f_norm_eps) + else: + raise ValueError('f_norm_eps is None') + if params.f_rope_freq_base is not None: self.gguf.add_rope_freq_base(params.f_rope_freq_base) @@ -1086,7 +1027,7 @@ class OutputFile: return tokenizer_model - def extract_vocabulary_from_model(self, vocab: Vocab) -> Tuple[list, list, list]: + def extract_vocabulary_from_model(self, vocab: Vocab) -> tuple[list[bytes], list[float], list[gguf.TokenType]]: tokens = [] scores = [] toktypes = [] @@ -1097,6 +1038,8 @@ class OutputFile: scores.append(score) toktypes.append(toktype) + assert len(tokens) == vocab.vocab_size + return tokens, scores, toktypes def add_meta_vocab(self, vocab: Vocab) -> None: @@ -1119,14 +1062,10 @@ class OutputFile: def add_tensor_info(self, name: str, tensor: LazyTensor) -> None: n_elements = int(np.prod(tensor.shape)) - raw_dtype = getattr(tensor.data_type, "ggml_type", None) - data_type = ( - getattr(tensor.data_type, "quantized_type", None) or tensor.data_type.dtype - ) + raw_dtype = getattr(tensor.data_type, 'ggml_type', None) + data_type = getattr(tensor.data_type, 'quantized_type', None) or tensor.data_type.dtype data_nbytes = tensor.data_type.elements_to_bytes(n_elements) - self.gguf.add_tensor_info( - name, tensor.shape, data_type, data_nbytes, raw_dtype=raw_dtype - ) + self.gguf.add_tensor_info(name, tensor.shape, data_type, data_nbytes, raw_dtype=raw_dtype) def write_meta(self) -> None: self.gguf.write_header_to_file() @@ -1140,14 +1079,10 @@ class OutputFile: @staticmethod def write_vocab_only( - fname_out: Path, - params: Params, - vocab: Vocab, - svocab: gguf.SpecialVocab, - endianess: gguf.GGUFEndian = gguf.GGUFEndian.LITTLE, - pad_vocab: bool = False, + fname_out: Path, params: Params, vocab: Vocab, svocab: gguf.SpecialVocab, + endianess: gguf.GGUFEndian = gguf.GGUFEndian.LITTLE, pad_vocab: bool = False, ) -> None: - check_vocab_size(params, vocab, pad_vocab=pad_vocab) + check_vocab_size(params, vocab, pad_vocab = pad_vocab) of = OutputFile(fname_out, endianess=endianess) @@ -1175,14 +1110,8 @@ class OutputFile: @staticmethod def write_all( - fname_out: Path, - ftype: GGMLFileType, - params: Params, - model: LazyModel, - vocab: Vocab, - svocab: gguf.SpecialVocab, - concurrency: int = DEFAULT_CONCURRENCY, - endianess: gguf.GGUFEndian = gguf.GGUFEndian.LITTLE, + fname_out: Path, ftype: GGMLFileType, params: Params, model: LazyModel, vocab: Vocab, svocab: gguf.SpecialVocab, + concurrency: int = DEFAULT_CONCURRENCY, endianess: gguf.GGUFEndian = gguf.GGUFEndian.LITTLE, pad_vocab: bool = False, ) -> None: check_vocab_size(params, vocab, pad_vocab=pad_vocab) @@ -1202,26 +1131,19 @@ class OutputFile: of.write_tensor_info() # tensor data - ndarrays_inner = bounded_parallel_map( - OutputFile.do_item, model.items(), concurrency=concurrency - ) + ndarrays_inner = bounded_parallel_map(OutputFile.do_item, model.items(), concurrency = concurrency) if ftype == GGMLFileType.MostlyQ8_0: ndarrays = bounded_parallel_map( - OutputFile.maybe_do_quantize, - ndarrays_inner, - concurrency=concurrency, - max_workers=concurrency, + OutputFile.maybe_do_quantize, ndarrays_inner, concurrency=concurrency, max_workers=concurrency, use_processpool_executor=True, ) else: ndarrays = map(OutputFile.maybe_do_quantize, ndarrays_inner) start = time.time() - for i, ((name, lazy_tensor), ndarray) in enumerate( - zip(model.items(), ndarrays) - ): + for i, ((name, lazy_tensor), ndarray) in enumerate(zip(model.items(), ndarrays)): elapsed = time.time() - start - size = " x ".join(f"{dim:6d}" for dim in lazy_tensor.shape) + size = ' x '.join(f"{dim:6d}" for dim in lazy_tensor.shape) padi = len(str(len(model))) print( f"[{i+1:{padi}d}/{len(model)}] Writing tensor {name:38s} | size {size:16} | type {lazy_tensor.data_type.name:4} | T+{int(elapsed):4}" @@ -1251,7 +1173,7 @@ def convert_to_output_type(model: LazyModel, output_type: GGMLFileType) -> LazyM for (name, tensor) in model.items()} -def convert_model_names(model: LazyModel, params: Params) -> LazyModel: +def convert_model_names(model: LazyModel, params: Params, skip_unknown: bool) -> LazyModel: tmap = gguf.TensorNameMap(ARCH, params.n_layer) should_skip: set[gguf.MODEL_TENSOR] = set(gguf.MODEL_TENSOR_SKIP.get(ARCH, [])) @@ -1277,7 +1199,11 @@ def convert_model_names(model: LazyModel, params: Params) -> LazyModel: for name, lazy_tensor in model.items(): tensor_type, name_new = tmap.get_type_and_name(name, try_suffixes = (".weight", ".bias")) or (None, None) if name_new is None: - raise Exception(f"Unexpected tensor name: {name}") + if skip_unknown: + print(f"Unexpected tensor name: {name} - skipping") + continue + else: + raise Exception(f"Unexpected tensor name: {name}. Use --skip-unknown to ignore it (e.g. LLaVA)") if tensor_type in should_skip: print(f"skipping tensor {name_new}") @@ -1358,7 +1284,7 @@ def load_some_model(path: Path) -> ModelPlus: class VocabFactory: def __init__(self, path: Path): self.path = path - self.files = { + self.files: dict[str, Path | None] = { "tokenizer.model": None, "vocab.json": None, "tokenizer.json": None, @@ -1373,27 +1299,20 @@ class VocabFactory: self.files[file] = file_path elif parent_file_path.exists(): self.files[file] = parent_file_path + print(f"Found vocab files: {self.files}") - def _select_file(self, vocabtype: Optional[str]) -> Path: + def _select_file(self, vocabtype: str | None) -> Path: if vocabtype in ["spm", "bpe"]: - # For SentencePiece and BPE, return specific files as before - file_key = "tokenizer.model" if vocabtype == "spm" else "vocab.json" - if self.files[file_key]: - return self.files[file_key] - else: - raise FileNotFoundError(f"{vocabtype} {file_key} not found.") - elif vocabtype == "hfft": + for file_key in self.files.keys(): + if (file := self.files[file_key]) is not None: + return file + raise FileNotFoundError(f"{vocabtype} vocab not found.") + if vocabtype == "hfft": # For Hugging Face Fast Tokenizer, return the directory path instead of a specific file return self.path - else: - raise ValueError(f"Unsupported vocabulary type {vocabtype}") + raise ValueError(f"Unsupported vocabulary type {vocabtype}") - def _create_special_vocab( - self, - vocab: Vocab, - vocabtype: str, - model_parent_path: Path, - ) -> gguf.SpecialVocab: + def _create_special_vocab(self, vocab: Vocab, vocabtype: str, model_parent_path: Path) -> gguf.SpecialVocab: load_merges = vocabtype == "bpe" n_vocab = vocab.vocab_size if hasattr(vocab, "vocab_size") else None return gguf.SpecialVocab( @@ -1403,13 +1322,12 @@ class VocabFactory: n_vocab=n_vocab, ) - def load_vocab( - self, vocabtype: str, model_parent_path: Path - ) -> Tuple[Vocab, gguf.SpecialVocab]: + def load_vocab(self, vocabtype: str, model_parent_path: Path) -> tuple[Vocab, gguf.SpecialVocab]: path = self._select_file(vocabtype) print(f"Loading vocab file '{path}', type '{vocabtype}'") added_tokens_path = path.parent / "added_tokens.json" + vocab: Vocab if vocabtype == "bpe": vocab = BpeVocab( path, added_tokens_path if added_tokens_path.exists() else None @@ -1424,6 +1342,7 @@ class VocabFactory: ) else: raise ValueError(f"Unsupported vocabulary type {vocabtype}") + # FIXME: Respect --vocab-dir? special_vocab = self._create_special_vocab( vocab, vocabtype, @@ -1432,18 +1351,17 @@ class VocabFactory: return vocab, special_vocab -def default_output_file(model_paths: list[Path], file_type: GGMLFileType) -> Path: +def default_outfile(model_paths: list[Path], file_type: GGMLFileType) -> Path: namestr = { - GGMLFileType.AllF32: "f32", + GGMLFileType.AllF32: "f32", GGMLFileType.MostlyF16: "f16", - GGMLFileType.MostlyQ8_0: "q8_0", + GGMLFileType.MostlyQ8_0:"q8_0", }[file_type] ret = model_paths[0].parent / f"ggml-model-{namestr}.gguf" if ret in model_paths: sys.stderr.write( f"Error: Default output path ({ret}) would overwrite the input. " - "Please explicitly specify a path using --outfile.\n" - ) + "Please explicitly specify a path using --outfile.\n") sys.exit(1) return ret @@ -1453,111 +1371,35 @@ def do_dump_model(model_plus: ModelPlus) -> None: print(f"model_plus.format = {model_plus.format!r}") print(f"model_plus.vocab = {model_plus.vocab!r}") for name, lazy_tensor in model_plus.model.items(): - print( - f"{name}: shape={lazy_tensor.shape} type={lazy_tensor.data_type}; {lazy_tensor.description}" - ) + print(f"{name}: shape={lazy_tensor.shape} type={lazy_tensor.data_type}; {lazy_tensor.description}") -def get_argument_parser() -> ArgumentParser: +def main(args_in: list[str] | None = None) -> None: output_choices = ["f32", "f16"] if np.uint32(1) == np.uint32(1).newbyteorder("<"): # We currently only support Q8_0 output on little endian systems. output_choices.append("q8_0") + vocab_types = ["spm", "bpe", "hfft"] + parser = argparse.ArgumentParser(description="Convert a LLaMa model to a GGML compatible file") + parser.add_argument("--awq-path", type=Path, help="Path to scale awq cache file", default=None) + parser.add_argument("--dump", action="store_true", help="don't convert, just show what's in the model") + parser.add_argument("--dump-single", action="store_true", help="don't convert, just show what's in a single model file") + parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab") + parser.add_argument("--outtype", choices=output_choices, help="output format - note: q8_0 may be very slow (default: f16 or f32 based on input)") + parser.add_argument("--vocab-dir", type=Path, help="directory containing tokenizer.model, if separate from model file") + parser.add_argument("--vocab-type", choices=vocab_types, help="The vocabulary format used to define the tokenizer model (default: spm)", default="spm") + parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input") + parser.add_argument("model", type=Path, help="directory containing model file, or model file itself (*.pth, *.pt, *.bin)") + parser.add_argument("--ctx", type=int, help="model training context (default: based on input)") + parser.add_argument("--concurrency", type=int, help=f"concurrency used for conversion (default: {DEFAULT_CONCURRENCY})", default=DEFAULT_CONCURRENCY) + parser.add_argument("--big-endian", action="store_true", help="model is executed on big endian machine") + parser.add_argument("--pad-vocab", action="store_true", help="add pad tokens when model vocab expects more than tokenizer metadata provides") + parser.add_argument("--skip-unknown", action="store_true", help="skip unknown tensor names instead of failing") - parser = argparse.ArgumentParser( - description="Convert a LLaMa model to a GGML compatible file" - ) - - parser.add_argument( - "model", - type=Path, - help="Directory containing the model file or the model file itself (*.pth, *.pt, *.bin)", - ) - - parser.add_argument( - "--awq-path", - type=Path, - help="Path to the Activation-aware Weight Quantization cache file", - default=None, - ) - - parser.add_argument( - "--dump", - action="store_true", - help="Display the model content without converting it", - ) - - parser.add_argument( - "--dump-single", - action="store_true", - help="Display the content of a single model file without conversion", - ) - - parser.add_argument( - "--vocab-only", - action="store_true", - help="Extract and output only the vocabulary", - ) - - parser.add_argument( - "--outtype", - choices=output_choices, - help="Output format - note: q8_0 may be very slow (default: f16 or f32 based on input)", - ) - - parser.add_argument( - "--vocab-dir", - type=Path, - help="Directory containing the tokenizer.model, if separate from the model file", - ) - - parser.add_argument( - "--vocab-type", - choices=["spm", "bpe", "hfft"], # hfft: Hugging Face Fast Tokenizer - default="spm", - help="The vocabulary format used to define the tokenizer model (default: spm)", - ) - - parser.add_argument( - "--pad-vocab", - action="store_true", - help="Add padding tokens when the model's vocabulary size exceeds the tokenizer metadata", - ) - - parser.add_argument( - "--outfile", - type=Path, - help="Specify the path for the output file (default is based on input)", - ) - - parser.add_argument( - "--ctx", type=int, help="Model training context (default is based on input)" - ) - - parser.add_argument( - "--concurrency", - type=int, - help=f"Concurrency used for conversion (default: {DEFAULT_CONCURRENCY})", - default=DEFAULT_CONCURRENCY, - ) - - parser.add_argument( - "--big-endian", - action="store_true", - help="Indicate that the model is executed on a big-endian machine", - ) - - return parser - - -def main(argv: Optional[list[str]] = None) -> None: - parser = get_argument_parser() - args = parser.parse_args(argv) - + args = parser.parse_args(args_in) if args.awq_path: - sys.path.insert(1, str(Path(__file__).resolve().parent / "awq-py")) - from awq.apply_awq import add_scale_weights - + sys.path.insert(1, str(Path(__file__).parent / 'awq-py')) + from awq.apply_awq import add_scale_weights # type: ignore[import-not-found] tmp_model_path = args.model / "weighted_model" if tmp_model_path.is_dir(): print(f"{tmp_model_path} exists as a weighted model.") @@ -1576,14 +1418,11 @@ def main(argv: Optional[list[str]] = None) -> None: if not args.vocab_only: model_plus = load_some_model(args.model) else: - model_plus = ModelPlus( - model={}, paths=[args.model / "dummy"], format="none", vocab=None - ) + model_plus = ModelPlus(model = {}, paths = [args.model / 'dummy'], format = 'none', vocab = None) if args.dump: do_dump_model(model_plus) return - endianess = gguf.GGUFEndian.LITTLE if args.big_endian: endianess = gguf.GGUFEndian.BIG @@ -1591,12 +1430,10 @@ def main(argv: Optional[list[str]] = None) -> None: params = Params.load(model_plus) if params.n_ctx == -1: if args.ctx is None: - raise Exception( - "The model doesn't have a context size, and you didn't specify one with --ctx\n" - "Please specify one with --ctx:\n" - " - LLaMA v1: --ctx 2048\n" - " - LLaMA v2: --ctx 4096\n" - ) + raise Exception("The model doesn't have a context size, and you didn't specify one with --ctx\n" + "Please specify one with --ctx:\n" + " - LLaMA v1: --ctx 2048\n" + " - LLaMA v2: --ctx 4096\n") params.n_ctx = args.ctx if args.outtype: @@ -1617,42 +1454,30 @@ def main(argv: Optional[list[str]] = None) -> None: if not args.outfile: raise ValueError("need --outfile if using --vocab-only") outfile = args.outfile - OutputFile.write_vocab_only( - outfile, - params, - vocab, - special_vocab, - endianess=endianess, - pad_vocab=args.pad_vocab, - ) + OutputFile.write_vocab_only(outfile, params, vocab, special_vocab, + endianess=endianess, pad_vocab=args.pad_vocab) print(f"Wrote {outfile}") return if model_plus.vocab is not None and args.vocab_dir is None: vocab = model_plus.vocab - model = model_plus.model - model = convert_model_names(model, params) - ftype = pick_output_type(model, args.outtype) - model = convert_to_output_type(model, ftype) - outfile = args.outfile or default_output_file(model_plus.paths, ftype) + print(f"Vocab info: {vocab}") + print(f"Special vocab info: {special_vocab}") + + model = model_plus.model + model = convert_model_names(model, params, args.skip_unknown) + ftype = pick_output_type(model, args.outtype) + model = convert_to_output_type(model, ftype) + outfile = args.outfile or default_outfile(model_plus.paths, ftype) params.ftype = ftype print(f"Writing {outfile}, format {ftype}") - OutputFile.write_all( - outfile, - ftype, - params, - model, - vocab, - special_vocab, - concurrency=args.concurrency, - endianess=endianess, - pad_vocab=args.pad_vocab, - ) + OutputFile.write_all(outfile, ftype, params, model, vocab, special_vocab, + concurrency=args.concurrency, endianess=endianess, pad_vocab=args.pad_vocab) print(f"Wrote {outfile}") -if __name__ == "__main__": - main(sys.argv[1:]) # Exclude the first element (script name) from sys.argv +if __name__ == '__main__': + main() diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 13db64e9f..7fb2e1e92 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -24,6 +24,9 @@ else() add_subdirectory(infill) add_subdirectory(llama-bench) add_subdirectory(llava) + if (LLAMA_SYCL) + add_subdirectory(sycl) + endif() add_subdirectory(main) add_subdirectory(tokenize) add_subdirectory(parallel) @@ -36,11 +39,9 @@ else() add_subdirectory(speculative) add_subdirectory(lookahead) add_subdirectory(lookup) + add_subdirectory(gguf) add_subdirectory(train-text-from-scratch) add_subdirectory(imatrix) - if (LLAMA_METAL) - add_subdirectory(metal) - endif() if (LLAMA_BUILD_SERVER) add_subdirectory(server) endif() diff --git a/examples/batched-bench/batched-bench.cpp b/examples/batched-bench/batched-bench.cpp index 57596ed98..55dfd9784 100644 --- a/examples/batched-bench/batched-bench.cpp +++ b/examples/batched-bench/batched-bench.cpp @@ -82,13 +82,17 @@ int main(int argc, char ** argv) { // init LLM - llama_backend_init(params.numa); + llama_backend_init(); + llama_numa_init(params.numa); // initialize the model llama_model_params model_params = llama_model_default_params(); + const std::vector t_split(llama_max_devices(), 0.0f); + model_params.n_gpu_layers = n_gpu_layers; + model_params.tensor_split = t_split.data(); llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params); diff --git a/examples/batched.swift/Sources/main.swift b/examples/batched.swift/Sources/main.swift index 4d0005349..d75c503d5 100644 --- a/examples/batched.swift/Sources/main.swift +++ b/examples/batched.swift/Sources/main.swift @@ -17,7 +17,7 @@ let n_parallel: Int = arguments.count > 3 && Int(arguments[3]) != nil ? Int(argu let n_len: Int = 32 // init LLM -llama_backend_init(false) +llama_backend_init() defer { llama_backend_free() } diff --git a/examples/batched/batched.cpp b/examples/batched/batched.cpp index b1775e0b0..eab636692 100644 --- a/examples/batched/batched.cpp +++ b/examples/batched/batched.cpp @@ -50,7 +50,8 @@ int main(int argc, char ** argv) { // init LLM - llama_backend_init(params.numa); + llama_backend_init(); + llama_numa_init(params.numa); // initialize the model diff --git a/examples/beam-search/beam-search.cpp b/examples/beam-search/beam-search.cpp index 679b382e1..866c6d7a6 100644 --- a/examples/beam-search/beam-search.cpp +++ b/examples/beam-search/beam-search.cpp @@ -119,7 +119,8 @@ int main(int argc, char ** argv) // Init LLM : //--------------------------------- - llama_backend_init(params.numa); + llama_backend_init(); + llama_numa_init(params.numa); llama_model * model; llama_context * ctx; diff --git a/examples/benchmark/benchmark-matmult.cpp b/examples/benchmark/benchmark-matmult.cpp index 434e1d6bd..e89f3de2f 100644 --- a/examples/benchmark/benchmark-matmult.cpp +++ b/examples/benchmark/benchmark-matmult.cpp @@ -194,7 +194,7 @@ int main(int argc, char ** argv) { // Set up a the benchmark matrices // printf("Creating new tensor q11 & Running quantize\n"); struct ggml_tensor * q11 = ggml_new_tensor_2d(ctx, qtype, sizex, sizey); - ggml_quantize_chunk(qtype, (const float *) m11->data, q11->data, 0, nelements, hist_cur.data()); + ggml_quantize_chunk(qtype, (const float *) m11->data, q11->data, 0, nelements/m11->ne[0], m11->ne[0], hist_cur.data(), nullptr); // Set up a the compute graph // printf("Creating new tensor q31\n"); @@ -207,7 +207,7 @@ int main(int argc, char ** argv) { // Set up a second graph computation to make sure we override the CPU cache lines // printf("Creating new tensor q12 & Running quantize\n"); struct ggml_tensor * q12 = ggml_new_tensor_2d(ctx, qtype, sizex, sizey); - ggml_quantize_chunk(qtype, (const float *) m12->data, q12->data, 0, nelements, hist_cur.data()); + ggml_quantize_chunk(qtype, (const float *) m12->data, q12->data, 0, nelements/m12->ne[0], m12->ne[0], hist_cur.data(), nullptr); // printf("Creating new tensor q32\n"); struct ggml_tensor * q32 = ggml_mul_mat(ctx, q12, m2); diff --git a/examples/cmap-example/kvcache2.cpp b/examples/cmap-example/kvcache2.cpp new file mode 100644 index 000000000..5b1b541c9 --- /dev/null +++ b/examples/cmap-example/kvcache2.cpp @@ -0,0 +1,76 @@ +/* +A utility to represent the kv-cache occupancy graphically +Takes as parameters +- total cache size (-c) +- number of simultaneous accesses/slots (-np) +- a parameter related to the display context (max window width - data display requirements) +It then uses a trick borrowed from tqdm to display occupancy +TODO: Show contiguous space and block availability +*/ +#include +#include +#include +#include // for rand() + +// my custom function to display graphics of the kvcache status +static void show_kvcache(std::vector> used_blocks, int cache_size) { + + int max_length = 128; + int num_blocks = used_blocks.size(); + int slot_cache_size = cache_size / num_blocks; + bool cls_flag = true; + std::string slot_symbol1 = ""; + std::string slot_symbol2 = ""; + std::string slot_symbol3 = ""; + auto& p = used_blocks[0]; + llama_client_slot slot = p.second; + + return; // remove when not in debug mode + + if ((used_blocks.size() == 0) || (used_blocks[0].first == 0)) { + return; + } + + // Print visualization + // Always start at the top left of the window (H means 'move cursor to this position'; 2J = cls) + // Only clear the screen the first time round + if (cls_flag) { + printf("\033[2J"); + cls_flag = false; + } + printf("\033[1;0H\033[K**************************\n\033[KKVcache occupancy by slot:\n\033[K**************************\n"); + for(int i=0; i split_lines(const std::string & s) { + std::string line; + std::vector lines; + std::stringstream ss(s); + while (std::getline(ss, line)) { + lines.push_back(line); + } + return lines; +} + +static void batch_add_seq(llama_batch & batch, const std::vector & tokens, int seq_id) { + for (size_t i = 0; i < tokens.size(); i++) { + llama_batch_add(batch, tokens[i], i, { seq_id }, false); + } +} + +static void normalize(float * vec, float * out, int n) { + float norm = 0; + for (int i = 0; i < n; i++) { + norm += vec[i] * vec[i]; + } + norm = sqrt(norm); + for (int i = 0; i < n; i++) { + out[i] = vec[i] / norm; + } +} + +static void batch_decode(llama_context * ctx, llama_batch & batch, float * output, int n_seq, int n_embd) { + // clear previous kv_cache values (irrelevant for embeddings) + llama_kv_cache_clear(ctx); + + // run model + fprintf(stderr, "%s: n_tokens = %d, n_seq = %d\n", __func__, batch.n_tokens, n_seq); + if (llama_decode(ctx, batch) < 0) { + fprintf(stderr, "%s : failed to decode\n", __func__); + } + + // normalize on copy + for (int k = 0; k < n_seq; k++) { + float * emb = llama_get_embeddings_ith(ctx, k); + float * out = output + k * n_embd; + normalize(emb, out, n_embd); + } +} + int main(int argc, char ** argv) { gpt_params params; @@ -29,7 +74,8 @@ int main(int argc, char ** argv) { params.prompt = gpt_random_prompt(rng); } - llama_backend_init(params.numa); + llama_backend_init(); + llama_numa_init(params.numa); llama_model * model; llama_context * ctx; @@ -55,49 +101,84 @@ int main(int argc, char ** argv) { fprintf(stderr, "%s\n", get_system_info(params).c_str()); } - int n_past = 0; + // split the prompt into lines + std::vector prompts = split_lines(params.prompt); - // tokenize the prompt - auto embd_inp = ::llama_tokenize(ctx, params.prompt, true); + // max batch size + const uint64_t n_batch = params.n_batch; + GGML_ASSERT(params.n_batch == params.n_ctx); + // tokenize the prompts and trim + std::vector> inputs; + for (const auto & prompt : prompts) { + auto inp = ::llama_tokenize(ctx, prompt, true); + if (inp.size() > n_batch) { + inp.resize(n_batch); + } + inputs.push_back(inp); + } + + // tokenization stats if (params.verbose_prompt) { - fprintf(stderr, "\n"); - fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str()); - fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size()); - for (int i = 0; i < (int) embd_inp.size(); i++) { - fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], llama_token_to_piece(ctx, embd_inp[i]).c_str()); + for (int i = 0; i < (int) inputs.size(); i++) { + fprintf(stderr, "%s: prompt %d: '%s'\n", __func__, i, prompts[i].c_str()); + fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, inputs[i].size()); + for (int j = 0; j < (int) inputs[i].size(); j++) { + fprintf(stderr, "%6d -> '%s'\n", inputs[i][j], llama_token_to_piece(ctx, inputs[i][j]).c_str()); + } + fprintf(stderr, "\n\n"); } - fprintf(stderr, "\n"); } - if (embd_inp.size() > (size_t)n_ctx) { - fprintf(stderr, "%s: error: prompt is longer than the context window (%zu tokens, n_ctx = %d)\n", - __func__, embd_inp.size(), n_ctx); - return 1; - } - - while (!embd_inp.empty()) { - int n_tokens = std::min(params.n_batch, (int) embd_inp.size()); - if (llama_decode(ctx, llama_batch_get_one(embd_inp.data(), n_tokens, n_past, 0))) { - fprintf(stderr, "%s : failed to eval\n", __func__); - return 1; - } - n_past += n_tokens; - embd_inp.erase(embd_inp.begin(), embd_inp.begin() + n_tokens); - } + // initialize batch + const int n_prompts = prompts.size(); + struct llama_batch batch = llama_batch_init(n_batch, 0, n_prompts); + // allocate output const int n_embd = llama_n_embd(model); - const auto * embeddings = llama_get_embeddings(ctx); + std::vector embeddings(n_prompts * n_embd, 0); + float * emb = embeddings.data(); - for (int i = 0; i < n_embd; i++) { - printf("%f ", embeddings[i]); + // break into batches + int p = 0; // number of prompts processed already + int s = 0; // number of prompts in current batch + for (int k = 0; k < n_prompts; k++) { + // clamp to n_batch tokens + auto & inp = inputs[k]; + const uint64_t n_toks = inp.size(); + + // encode if at capacity + if (batch.n_tokens + n_toks > n_batch) { + float * out = emb + p * n_embd; + batch_decode(ctx, batch, out, s, n_embd); + llama_batch_clear(batch); + p += s; + s = 0; + } + + // add to batch + batch_add_seq(batch, inp, s); + s += 1; } - printf("\n"); + // final batch + float * out = emb + p * n_embd; + batch_decode(ctx, batch, out, s, n_embd); + + // print first 3 embeddings + for (int j = 0; j < std::min(3, n_prompts); j++) { + fprintf(stderr, "embedding %d: ", j); + for (int i = 0; i < n_embd; i++) { + fprintf(stderr, "%f ", emb[j * n_embd + i]); + } + fprintf(stderr, "\n\n"); + } + fprintf(stderr, "\n"); + + // clean up llama_print_timings(ctx); llama_free(ctx); llama_free_model(model); - llama_backend_free(); return 0; diff --git a/examples/export-lora/export-lora.cpp b/examples/export-lora/export-lora.cpp index 58fbe204d..2f7be8a13 100644 --- a/examples/export-lora/export-lora.cpp +++ b/examples/export-lora/export-lora.cpp @@ -245,9 +245,8 @@ static struct lora_data * load_lora(struct lora_info * info) { params_ggml.no_alloc = true; result->ctx = ggml_init(params_ggml); - uint32_t LLAMA_FILE_MAGIC_LORA = 0x67676C61; // 'ggla' uint32_t magic = file.read_u32(); - if (magic != LLAMA_FILE_MAGIC_LORA) { + if (magic != LLAMA_FILE_MAGIC_GGLA) { die_fmt("unexpected lora header file magic in '%s'", info->filename.c_str()); } uint32_t version = file.read_u32(); @@ -338,24 +337,14 @@ static bool apply_lora(struct ggml_tensor * tensor, struct lora_data * lora, int params.mem_buffer = NULL; params.no_alloc = true; struct ggml_context * ctx = NULL; - struct ggml_allocr * alloc = NULL; - struct ggml_cgraph * gf = NULL; + struct ggml_gallocr * alloc = NULL; + struct ggml_cgraph * gf = NULL; ctx = ggml_init(params); - alloc = ggml_allocr_new_measure(tensor_alignment); + alloc = ggml_gallocr_new(ggml_backend_cpu_buffer_type()); gf = build_graph_lora(ctx, tensor, lora_a, lora_b, scaling); - size_t alloc_size = ggml_allocr_alloc_graph(alloc, gf); - ggml_allocr_free(alloc); - ggml_free(ctx); - static std::vector data_compute; - data_compute.resize(alloc_size + tensor_alignment); - - ctx = ggml_init(params); - alloc = ggml_allocr_new(data_compute.data(), data_compute.size(), tensor_alignment); - gf = build_graph_lora(ctx, tensor, lora_a, lora_b, scaling); - ggml_allocr_alloc_graph(alloc, gf); - ggml_allocr_free(alloc); + ggml_gallocr_alloc_graph(alloc, gf); struct ggml_cplan cplan = ggml_graph_plan(gf, n_threads); static std::vector data_work; @@ -364,6 +353,7 @@ static bool apply_lora(struct ggml_tensor * tensor, struct lora_data * lora, int ggml_graph_compute(gf, &cplan); + ggml_gallocr_free(alloc); ggml_free(ctx); return true; } diff --git a/examples/finetune/README.md b/examples/finetune/README.md index a884706c5..2fafd505e 100644 --- a/examples/finetune/README.md +++ b/examples/finetune/README.md @@ -80,9 +80,9 @@ The LORA rank can be configured for each model tensor type separately with these --rank-wk N LORA rank for wk tensor (default 4) --rank-wv N LORA rank for wv tensor (default 4) --rank-wo N LORA rank for wo tensor (default 4) - --rank-w1 N LORA rank for w1 tensor (default 4) - --rank-w2 N LORA rank for w2 tensor (default 4) - --rank-w3 N LORA rank for w3 tensor (default 4) + --rank-ffn_gate N LORA rank for ffn_gate tensor (default 4) + --rank-ffn_down N LORA rank for ffn_down tensor (default 4) + --rank-ffn_up N LORA rank for ffn_up tensor (default 4) ``` The LORA rank of 'norm' tensors should always be 1. diff --git a/examples/finetune/finetune.cpp b/examples/finetune/finetune.cpp index eaca42fc1..98bf5a07a 100644 --- a/examples/finetune/finetune.cpp +++ b/examples/finetune/finetune.cpp @@ -1,5 +1,6 @@ #include "ggml.h" #include "ggml-alloc.h" +#include "ggml-backend.h" #include "llama.h" #include "common.h" #include "train.h" @@ -13,8 +14,6 @@ #pragma warning(disable: 4244 4267) // possible loss of data #endif -static const size_t tensor_alignment = 32; - struct my_llama_hparams { uint32_t n_vocab = 32000; uint32_t n_ctx = 512; @@ -61,9 +60,9 @@ struct my_llama_layer { struct ggml_tensor * ffn_norm; // ff - struct ggml_tensor * w1; - struct ggml_tensor * w2; - struct ggml_tensor * w3; + struct ggml_tensor * ffn_gate; // w1 + struct ggml_tensor * ffn_down; // w2 + struct ggml_tensor * ffn_up; // w3 }; struct my_llama_model { @@ -86,9 +85,9 @@ struct my_llama_lora_hparams { uint32_t n_rank_wv = 4; uint32_t n_rank_wo = 4; uint32_t n_rank_ffn_norm = 1; - uint32_t n_rank_w1 = 4; - uint32_t n_rank_w2 = 4; - uint32_t n_rank_w3 = 4; + uint32_t n_rank_ffn_gate = 4; + uint32_t n_rank_ffn_down = 4; + uint32_t n_rank_ffn_up = 4; uint32_t n_rank_tok_embeddings = 4; uint32_t n_rank_norm = 1; uint32_t n_rank_output = 4; @@ -118,17 +117,17 @@ struct my_llama_lora_layer { struct ggml_tensor * ffn_norm_b; // ff - struct ggml_tensor * w1_a; - struct ggml_tensor * w1_b; - struct ggml_tensor * w2_a; - struct ggml_tensor * w2_b; - struct ggml_tensor * w3_a; - struct ggml_tensor * w3_b; + struct ggml_tensor * ffn_gate_a; + struct ggml_tensor * ffn_gate_b; + struct ggml_tensor * ffn_down_a; + struct ggml_tensor * ffn_down_b; + struct ggml_tensor * ffn_up_a; + struct ggml_tensor * ffn_up_b; }; struct my_llama_lora { struct ggml_context * ctx = NULL; - std::vector data; + ggml_backend_buffer_t data; my_llama_lora_hparams hparams; @@ -209,9 +208,9 @@ static void print_lora_params(struct my_llama_lora_hparams * params) { printf("%s: n_rank_wv : %u\n", __func__, params->n_rank_wv); printf("%s: n_rank_wo : %u\n", __func__, params->n_rank_wo); printf("%s: n_rank_ffn_norm : %u\n", __func__, params->n_rank_ffn_norm); - printf("%s: n_rank_w1 : %u\n", __func__, params->n_rank_w1); - printf("%s: n_rank_w2 : %u\n", __func__, params->n_rank_w2); - printf("%s: n_rank_w3 : %u\n", __func__, params->n_rank_w3); + printf("%s: n_rank_ffn_gate : %u\n", __func__, params->n_rank_ffn_gate); + printf("%s: n_rank_ffn_down : %u\n", __func__, params->n_rank_ffn_down); + printf("%s: n_rank_ffn_up : %u\n", __func__, params->n_rank_ffn_up); printf("%s: n_rank_tok_embeddings : %u\n", __func__, params->n_rank_tok_embeddings); printf("%s: n_rank_norm : %u\n", __func__, params->n_rank_norm); printf("%s: n_rank_output : %u\n", __func__, params->n_rank_output); @@ -320,9 +319,9 @@ static void init_model(struct llama_model * input, struct my_llama_model * model layer.wv = llama_get_model_tensor(input, tni(LLM_TENSOR_ATTN_V, i)); layer.wo = llama_get_model_tensor(input, tni(LLM_TENSOR_ATTN_OUT, i)); layer.ffn_norm = llama_get_model_tensor(input, tni(LLM_TENSOR_FFN_NORM, i)); - layer.w1 = llama_get_model_tensor(input, tni(LLM_TENSOR_FFN_GATE, i)); - layer.w2 = llama_get_model_tensor(input, tni(LLM_TENSOR_FFN_DOWN, i)); - layer.w3 = llama_get_model_tensor(input, tni(LLM_TENSOR_FFN_UP, i)); + layer.ffn_gate = llama_get_model_tensor(input, tni(LLM_TENSOR_FFN_GATE, i)); + layer.ffn_down = llama_get_model_tensor(input, tni(LLM_TENSOR_FFN_DOWN, i)); + layer.ffn_up = llama_get_model_tensor(input, tni(LLM_TENSOR_FFN_UP, i)); assert_shape_1d(layer.attention_norm, hparams.n_embd); assert_shape_2d(layer.wq, hparams.n_embd, hparams.n_embd); @@ -330,9 +329,9 @@ static void init_model(struct llama_model * input, struct my_llama_model * model assert_shape_2d(layer.wv, hparams.n_embd, hparams.n_embd_gqa()); assert_shape_2d(layer.wo, hparams.n_embd, hparams.n_embd); assert_shape_1d(layer.ffn_norm, hparams.n_embd); - assert_shape_2d(layer.w1, hparams.n_embd, hparams.n_ff); - assert_shape_2d(layer.w2, hparams.n_ff, hparams.n_embd); - assert_shape_2d(layer.w3, hparams.n_embd, hparams.n_ff); + assert_shape_2d(layer.ffn_gate, hparams.n_embd, hparams.n_ff); + assert_shape_2d(layer.ffn_down, hparams.n_ff, hparams.n_embd); + assert_shape_2d(layer.ffn_up, hparams.n_embd, hparams.n_ff); } } @@ -363,69 +362,12 @@ static void set_param_lora(struct my_llama_lora * lora) { ggml_set_param(ctx, layer.wo_b); ggml_set_param(ctx, layer.ffn_norm_a); ggml_set_param(ctx, layer.ffn_norm_b); - ggml_set_param(ctx, layer.w1_a); - ggml_set_param(ctx, layer.w1_b); - ggml_set_param(ctx, layer.w2_a); - ggml_set_param(ctx, layer.w2_b); - ggml_set_param(ctx, layer.w3_a); - ggml_set_param(ctx, layer.w3_b); - } -} - -static void alloc_lora(struct ggml_allocr * alloc, struct my_llama_lora * lora) { - ggml_allocr_alloc(alloc, lora->tok_embeddings_a); - ggml_allocr_alloc(alloc, lora->tok_embeddings_b); - ggml_allocr_alloc(alloc, lora->norm_a); - ggml_allocr_alloc(alloc, lora->norm_b); - ggml_allocr_alloc(alloc, lora->output_a); - ggml_allocr_alloc(alloc, lora->output_b); - for (uint32_t i = 0; i < lora->layers.size(); ++i) { - auto & layer = lora->layers[i]; - ggml_allocr_alloc(alloc, layer.attention_norm_a); - ggml_allocr_alloc(alloc, layer.attention_norm_b); - ggml_allocr_alloc(alloc, layer.wq_a); - ggml_allocr_alloc(alloc, layer.wq_b); - ggml_allocr_alloc(alloc, layer.wk_a); - ggml_allocr_alloc(alloc, layer.wk_b); - ggml_allocr_alloc(alloc, layer.wv_a); - ggml_allocr_alloc(alloc, layer.wv_b); - ggml_allocr_alloc(alloc, layer.wo_a); - ggml_allocr_alloc(alloc, layer.wo_b); - ggml_allocr_alloc(alloc, layer.ffn_norm_a); - ggml_allocr_alloc(alloc, layer.ffn_norm_b); - ggml_allocr_alloc(alloc, layer.w1_a); - ggml_allocr_alloc(alloc, layer.w1_b); - ggml_allocr_alloc(alloc, layer.w2_a); - ggml_allocr_alloc(alloc, layer.w2_b); - ggml_allocr_alloc(alloc, layer.w3_a); - ggml_allocr_alloc(alloc, layer.w3_b); - } - ggml_allocr_alloc(alloc, lora->tok_embeddings_a->grad); - ggml_allocr_alloc(alloc, lora->tok_embeddings_b->grad); - ggml_allocr_alloc(alloc, lora->norm_a->grad); - ggml_allocr_alloc(alloc, lora->norm_b->grad); - ggml_allocr_alloc(alloc, lora->output_a->grad); - ggml_allocr_alloc(alloc, lora->output_b->grad); - for (uint32_t i = 0; i < lora->layers.size(); ++i) { - auto & layer = lora->layers[i]; - ggml_allocr_alloc(alloc, layer.attention_norm_a->grad); - ggml_allocr_alloc(alloc, layer.attention_norm_b->grad); - ggml_allocr_alloc(alloc, layer.wq_a->grad); - ggml_allocr_alloc(alloc, layer.wq_b->grad); - ggml_allocr_alloc(alloc, layer.wk_a->grad); - ggml_allocr_alloc(alloc, layer.wk_b->grad); - ggml_allocr_alloc(alloc, layer.wv_a->grad); - ggml_allocr_alloc(alloc, layer.wv_b->grad); - ggml_allocr_alloc(alloc, layer.wo_a->grad); - ggml_allocr_alloc(alloc, layer.wo_b->grad); - ggml_allocr_alloc(alloc, layer.ffn_norm_a->grad); - ggml_allocr_alloc(alloc, layer.ffn_norm_b->grad); - ggml_allocr_alloc(alloc, layer.w1_a->grad); - ggml_allocr_alloc(alloc, layer.w1_b->grad); - ggml_allocr_alloc(alloc, layer.w2_a->grad); - ggml_allocr_alloc(alloc, layer.w2_b->grad); - ggml_allocr_alloc(alloc, layer.w3_a->grad); - ggml_allocr_alloc(alloc, layer.w3_b->grad); + ggml_set_param(ctx, layer.ffn_gate_a); + ggml_set_param(ctx, layer.ffn_gate_b); + ggml_set_param(ctx, layer.ffn_down_a); + ggml_set_param(ctx, layer.ffn_down_b); + ggml_set_param(ctx, layer.ffn_up_a); + ggml_set_param(ctx, layer.ffn_up_b); } } @@ -493,12 +435,12 @@ static void init_lora(const struct my_llama_model * model, struct my_llama_lora layer.ffn_norm_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_ffn_norm, n_embd); layer.ffn_norm_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_ffn_norm, 1); - layer.w1_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_w1, n_embd); - layer.w1_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_w1, n_ff); - layer.w2_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_w2, n_ff); - layer.w2_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_w2, n_embd); - layer.w3_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_w3, n_embd); - layer.w3_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_w3, n_ff); + layer.ffn_gate_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_ffn_gate, n_embd); + layer.ffn_gate_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_ffn_gate, n_ff); + layer.ffn_down_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_ffn_down, n_ff); + layer.ffn_down_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_ffn_down, n_embd); + layer.ffn_up_a = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_ffn_up, n_embd); + layer.ffn_up_b = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, lparams.n_rank_ffn_up, n_ff); ggml_set_name(layer.attention_norm_a, tni(LLM_TENSOR_ATTN_NORM, ".weight.lora_a", i)); ggml_set_name(layer.attention_norm_b, tni(LLM_TENSOR_ATTN_NORM, ".weight.lora_b", i)); @@ -512,28 +454,18 @@ static void init_lora(const struct my_llama_model * model, struct my_llama_lora ggml_set_name(layer.wo_b, tni(LLM_TENSOR_ATTN_OUT, ".weight.lora_b", i)); ggml_set_name(layer.ffn_norm_a, tni(LLM_TENSOR_FFN_NORM, ".weight.lora_a", i)); ggml_set_name(layer.ffn_norm_b, tni(LLM_TENSOR_FFN_NORM, ".weight.lora_b", i)); - ggml_set_name(layer.w1_a, tni(LLM_TENSOR_FFN_GATE, ".weight.lora_a", i)); - ggml_set_name(layer.w1_b, tni(LLM_TENSOR_FFN_GATE, ".weight.lora_b", i)); - ggml_set_name(layer.w2_a, tni(LLM_TENSOR_FFN_DOWN, ".weight.lora_a", i)); - ggml_set_name(layer.w2_b, tni(LLM_TENSOR_FFN_DOWN, ".weight.lora_b", i)); - ggml_set_name(layer.w3_a, tni(LLM_TENSOR_FFN_UP, ".weight.lora_a", i)); - ggml_set_name(layer.w3_b, tni(LLM_TENSOR_FFN_UP, ".weight.lora_b", i)); + ggml_set_name(layer.ffn_gate_a, tni(LLM_TENSOR_FFN_GATE, ".weight.lora_a", i)); + ggml_set_name(layer.ffn_gate_b, tni(LLM_TENSOR_FFN_GATE, ".weight.lora_b", i)); + ggml_set_name(layer.ffn_down_a, tni(LLM_TENSOR_FFN_DOWN, ".weight.lora_a", i)); + ggml_set_name(layer.ffn_down_b, tni(LLM_TENSOR_FFN_DOWN, ".weight.lora_b", i)); + ggml_set_name(layer.ffn_up_a, tni(LLM_TENSOR_FFN_UP, ".weight.lora_a", i)); + ggml_set_name(layer.ffn_up_b, tni(LLM_TENSOR_FFN_UP, ".weight.lora_b", i)); } set_param_lora(lora); - // measure data size - size_t size = 0; - for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { - size += GGML_PAD(ggml_nbytes(t), tensor_alignment); - } - - // allocate data - struct ggml_allocr * alloc = NULL; - lora->data.resize(size + tensor_alignment); - alloc = ggml_allocr_new(lora->data.data(), lora->data.size(), tensor_alignment); - alloc_lora(alloc, lora); - ggml_allocr_free(alloc); + // allocate data for lora tensors + lora->data = ggml_backend_alloc_ctx_tensors_from_buft(ctx, ggml_backend_cpu_buffer_type()); } static void randomize_lora(struct my_llama_lora * lora, int seed, float mean, float std, float min, float max) { @@ -565,12 +497,12 @@ static void randomize_lora(struct my_llama_lora * lora, int seed, float mean, fl randomize_tensor_normal(layer.ffn_norm_a, rnd); ggml_set_zero(layer.ffn_norm_b); - randomize_tensor_normal(layer.w1_a, rnd); - ggml_set_zero(layer.w1_b); - randomize_tensor_normal(layer.w2_a, rnd); - ggml_set_zero(layer.w2_b); - randomize_tensor_normal(layer.w3_a, rnd); - ggml_set_zero(layer.w3_b); + randomize_tensor_normal(layer.ffn_gate_a, rnd); + ggml_set_zero(layer.ffn_gate_b); + randomize_tensor_normal(layer.ffn_down_a, rnd); + ggml_set_zero(layer.ffn_down_b); + randomize_tensor_normal(layer.ffn_up_a, rnd); + ggml_set_zero(layer.ffn_up_b); } free_random_normal_distribution(rnd); @@ -579,7 +511,7 @@ static void randomize_lora(struct my_llama_lora * lora, int seed, float mean, fl static struct ggml_tensor * llama_build_lora_finetune_graphs( struct my_llama_model * model, struct my_llama_lora * lora, - struct ggml_allocr * alloc, + ggml_gallocr_t alloc, struct ggml_context * ctx, struct ggml_cgraph * gf, struct ggml_cgraph * gb, @@ -590,7 +522,8 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs( const int n_tokens, const int n_batch, const bool enable_flash_attn, - const bool enable_checkpointing) { + const bool enable_checkpointing, + const bool measure_only) { ggml_set_scratch(ctx, { 0, 0, nullptr, }); const int n_past = 0; @@ -622,13 +555,7 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs( // KQ_pos - contains the positions struct ggml_tensor * KQ_pos = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, N); - ggml_allocr_alloc(alloc, KQ_pos); - if (!ggml_allocr_is_measure(alloc)) { - int * data = (int *) KQ_pos->data; - for (int i = 0; i < N; ++i) { - data[i] = n_past + i; - } - } + ggml_set_input(KQ_pos); // rope has so much parameters that we make a custom function for it auto rope = [ctx, KQ_pos, n_rot, n_ctx, rope_freq_base, rope_freq_scale] @@ -683,13 +610,13 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs( struct ggml_tensor * attention_norm = add_to_f32(ctx, layer.attention_norm, ggml_mul_mat(ctx, llayer.attention_norm_a, llayer.attention_norm_b)); struct ggml_tensor * ffn_norm = add_to_f32(ctx, layer.ffn_norm, ggml_mul_mat(ctx, llayer.ffn_norm_a, llayer.ffn_norm_b)); - struct ggml_tensor * wq = add_to_f32(ctx, layer.wq, ggml_mul_mat(ctx, llayer.wq_a, llayer.wq_b)); - struct ggml_tensor * wk = add_to_f32(ctx, layer.wk, ggml_mul_mat(ctx, llayer.wk_a, llayer.wk_b)); - struct ggml_tensor * wv = add_to_f32(ctx, layer.wv, ggml_mul_mat(ctx, llayer.wv_a, llayer.wv_b)); - struct ggml_tensor * wo = add_to_f32(ctx, layer.wo, ggml_mul_mat(ctx, llayer.wo_a, llayer.wo_b)); - struct ggml_tensor * w1 = add_to_f32(ctx, layer.w1, ggml_mul_mat(ctx, llayer.w1_a, llayer.w1_b)); - struct ggml_tensor * w2 = add_to_f32(ctx, layer.w2, ggml_mul_mat(ctx, llayer.w2_a, llayer.w2_b)); - struct ggml_tensor * w3 = add_to_f32(ctx, layer.w3, ggml_mul_mat(ctx, llayer.w3_a, llayer.w3_b)); + struct ggml_tensor * wq = add_to_f32(ctx, layer.wq, ggml_mul_mat(ctx, llayer.wq_a, llayer.wq_b)); + struct ggml_tensor * wk = add_to_f32(ctx, layer.wk, ggml_mul_mat(ctx, llayer.wk_a, llayer.wk_b)); + struct ggml_tensor * wv = add_to_f32(ctx, layer.wv, ggml_mul_mat(ctx, llayer.wv_a, llayer.wv_b)); + struct ggml_tensor * wo = add_to_f32(ctx, layer.wo, ggml_mul_mat(ctx, llayer.wo_a, llayer.wo_b)); + struct ggml_tensor * ffn_gate = add_to_f32(ctx, layer.ffn_gate, ggml_mul_mat(ctx, llayer.ffn_gate_a, llayer.ffn_gate_b)); + struct ggml_tensor * ffn_down = add_to_f32(ctx, layer.ffn_down, ggml_mul_mat(ctx, llayer.ffn_down_a, llayer.ffn_down_b)); + struct ggml_tensor * ffn_up = add_to_f32(ctx, layer.ffn_up, ggml_mul_mat(ctx, llayer.ffn_up_a, llayer.ffn_up_b)); struct ggml_tensor * t02 = ggml_rms_norm (ctx, cur, rms_norm_eps); set_name(t02, "t02"); assert_shape_2d(t02, n_embd, N*n_batch); struct ggml_tensor * t03 = ggml_repeat (ctx, attention_norm, t02); set_name(t03, "t03"); assert_shape_2d(t03, n_embd, N*n_batch); @@ -732,11 +659,11 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs( struct ggml_tensor * t22 = ggml_rms_norm (ctx, t21, rms_norm_eps); set_name(t22, "t22"); assert_shape_2d(t22, n_embd, N*n_batch); struct ggml_tensor * t23 = ggml_repeat (ctx, ffn_norm, t22); set_name(t23, "t23"); assert_shape_2d(t23, n_embd, N*n_batch); struct ggml_tensor * t24 = ggml_mul (ctx, t23, t22); set_name(t24, "t24"); assert_shape_2d(t24, n_embd, N*n_batch); - struct ggml_tensor * t25 = ggml_mul_mat (ctx, w3, t24); set_name(t25, "t25"); assert_shape_2d(t25, n_ff, N*n_batch); - struct ggml_tensor * t26 = ggml_mul_mat (ctx, w1, t24); set_name(t26, "t26"); assert_shape_2d(t26, n_ff, N*n_batch); + struct ggml_tensor * t25 = ggml_mul_mat (ctx, ffn_up, t24); set_name(t25, "t25"); assert_shape_2d(t25, n_ff, N*n_batch); + struct ggml_tensor * t26 = ggml_mul_mat (ctx, ffn_gate, t24); set_name(t26, "t26"); assert_shape_2d(t26, n_ff, N*n_batch); struct ggml_tensor * t27 = ggml_silu (ctx, t26); set_name(t27, "t27"); assert_shape_2d(t27, n_ff, N*n_batch); struct ggml_tensor * t28 = ggml_mul (ctx, t27, t25); set_name(t28, "t28"); assert_shape_2d(t28, n_ff, N*n_batch); - struct ggml_tensor * t29 = ggml_mul_mat (ctx, w2, t28); set_name(t29, "t29"); assert_shape_2d(t29, n_embd, N*n_batch); + struct ggml_tensor * t29 = ggml_mul_mat (ctx, ffn_down, t28); set_name(t29, "t29"); assert_shape_2d(t29, n_embd, N*n_batch); struct ggml_tensor * t30 = ggml_add (ctx, t29, t21); set_name(t30, "t30"); assert_shape_2d(t30, n_embd, N*n_batch); cur = t30; if (enable_checkpointing) { @@ -780,7 +707,7 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs( // input gradient ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, t36->grad, 1.0f)); GGML_ASSERT(t36->grad->data == NULL && t36->grad->view_src == NULL); - ggml_allocr_alloc(alloc, t36->grad); + ggml_set_input(t36->grad); // KQ_pos ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, KQ_pos, 1.0f)); @@ -796,20 +723,32 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs( ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wk, 1.0f)); ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wv, 1.0f)); ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.wo, 1.0f)); - ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.w1, 1.0f)); - ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.w2, 1.0f)); - ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.w3, 1.0f)); + ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.ffn_gate, 1.0f)); + ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.ffn_down, 1.0f)); + ggml_build_forward_expand(gb, ggml_scale_inplace(ctx, layer.ffn_up, 1.0f)); } // allocating checkpoints in one block to reduce memory fragmentation // note: they will be freed in reverse order for (unsigned int i = 0; i < checkpoints.size(); ++i) { if (checkpoints[i]->data == NULL && checkpoints[i]->view_src == NULL) { - ggml_allocr_alloc(alloc, checkpoints[i]); + ggml_set_input(checkpoints[i]); } } - ggml_allocr_alloc_graph(alloc, gb); + if (measure_only) { + ggml_gallocr_reserve(alloc, gb); + } else { + ggml_gallocr_alloc_graph(alloc, gb); + + // set KQ_pos + { + int * data = (int *) KQ_pos->data; + for (int i = 0; i < N; ++i) { + data[i] = n_past + i; + } + } + } // remove the additional nodes and leafs for (int i = n_leafs_before; i < gb->n_leafs; ++i) { @@ -859,9 +798,9 @@ static void load_llama_lora_gguf(struct gguf_context * fctx, struct ggml_context GGUF_GET_KEY(fctx, lora->hparams.n_rank_wv, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_ATTN_V); GGUF_GET_KEY(fctx, lora->hparams.n_rank_wo, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_ATTN_OUT); GGUF_GET_KEY(fctx, lora->hparams.n_rank_ffn_norm, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_FFN_NORM); - GGUF_GET_KEY(fctx, lora->hparams.n_rank_w1, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_FFN_GATE); - GGUF_GET_KEY(fctx, lora->hparams.n_rank_w2, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_FFN_DOWN); - GGUF_GET_KEY(fctx, lora->hparams.n_rank_w3, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_FFN_UP); + GGUF_GET_KEY(fctx, lora->hparams.n_rank_ffn_gate, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_FFN_GATE); + GGUF_GET_KEY(fctx, lora->hparams.n_rank_ffn_down, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_FFN_DOWN); + GGUF_GET_KEY(fctx, lora->hparams.n_rank_ffn_up, gguf_get_val_u32, GGUF_TYPE_UINT32, true, LLM_KV_TRAINING_LORA_RANK_FFN_UP); init_lora(model, lora); @@ -886,12 +825,12 @@ static void load_llama_lora_gguf(struct gguf_context * fctx, struct ggml_context copy_tensor_by_name(layer.wo_b, f_ggml_ctx, ggml_get_name(layer.wo_b)); copy_tensor_by_name(layer.ffn_norm_a, f_ggml_ctx, ggml_get_name(layer.ffn_norm_a)); copy_tensor_by_name(layer.ffn_norm_b, f_ggml_ctx, ggml_get_name(layer.ffn_norm_b)); - copy_tensor_by_name(layer.w1_a, f_ggml_ctx, ggml_get_name(layer.w1_a)); - copy_tensor_by_name(layer.w1_b, f_ggml_ctx, ggml_get_name(layer.w1_b)); - copy_tensor_by_name(layer.w2_a, f_ggml_ctx, ggml_get_name(layer.w2_a)); - copy_tensor_by_name(layer.w2_b, f_ggml_ctx, ggml_get_name(layer.w2_b)); - copy_tensor_by_name(layer.w3_a, f_ggml_ctx, ggml_get_name(layer.w3_a)); - copy_tensor_by_name(layer.w3_b, f_ggml_ctx, ggml_get_name(layer.w3_b)); + copy_tensor_by_name(layer.ffn_gate_a, f_ggml_ctx, ggml_get_name(layer.ffn_gate_a)); + copy_tensor_by_name(layer.ffn_gate_b, f_ggml_ctx, ggml_get_name(layer.ffn_gate_b)); + copy_tensor_by_name(layer.ffn_down_a, f_ggml_ctx, ggml_get_name(layer.ffn_down_a)); + copy_tensor_by_name(layer.ffn_down_b, f_ggml_ctx, ggml_get_name(layer.ffn_down_b)); + copy_tensor_by_name(layer.ffn_up_a, f_ggml_ctx, ggml_get_name(layer.ffn_up_a)); + copy_tensor_by_name(layer.ffn_up_b, f_ggml_ctx, ggml_get_name(layer.ffn_up_b)); } } @@ -929,9 +868,9 @@ static void save_llama_lora_gguf(struct gguf_context * fctx, struct my_llama_mod gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_ATTN_V, lora->hparams.n_rank_wv); gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_ATTN_OUT, lora->hparams.n_rank_wo); gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_FFN_NORM, lora->hparams.n_rank_ffn_norm); - gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_FFN_GATE, lora->hparams.n_rank_w1); - gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_FFN_DOWN, lora->hparams.n_rank_w2); - gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_FFN_UP, lora->hparams.n_rank_w3); + gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_FFN_GATE, lora->hparams.n_rank_ffn_gate); + gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_FFN_DOWN, lora->hparams.n_rank_ffn_down); + gguf_set_val_u32(fctx, LLM_KV_TRAINING_LORA_RANK_FFN_UP, lora->hparams.n_rank_ffn_up); gguf_add_tensor(fctx, lora->tok_embeddings_a); gguf_add_tensor(fctx, lora->tok_embeddings_b); @@ -955,12 +894,12 @@ static void save_llama_lora_gguf(struct gguf_context * fctx, struct my_llama_mod gguf_add_tensor(fctx, layer.wo_b); gguf_add_tensor(fctx, layer.ffn_norm_a); gguf_add_tensor(fctx, layer.ffn_norm_b); - gguf_add_tensor(fctx, layer.w1_a); - gguf_add_tensor(fctx, layer.w1_b); - gguf_add_tensor(fctx, layer.w2_a); - gguf_add_tensor(fctx, layer.w2_b); - gguf_add_tensor(fctx, layer.w3_a); - gguf_add_tensor(fctx, layer.w3_b); + gguf_add_tensor(fctx, layer.ffn_gate_a); + gguf_add_tensor(fctx, layer.ffn_gate_b); + gguf_add_tensor(fctx, layer.ffn_down_a); + gguf_add_tensor(fctx, layer.ffn_down_b); + gguf_add_tensor(fctx, layer.ffn_up_a); + gguf_add_tensor(fctx, layer.ffn_up_b); } } @@ -1138,9 +1077,8 @@ static void save_as_llama_lora(const char * filename, struct my_llama_lora * lor return tn_buf.data(); }; - uint32_t LLAMA_FILE_MAGIC_LORA = 0x67676C61; // 'ggla' // write_magic - file.write_u32(LLAMA_FILE_MAGIC_LORA); // magic + file.write_u32(LLAMA_FILE_MAGIC_GGLA); // magic file.write_u32(1); // version // write_hparams file.write_u32(lora->hparams.lora_r); @@ -1166,12 +1104,12 @@ static void save_as_llama_lora(const char * filename, struct my_llama_lora * lor write_tensor(&file, layer.wo_b, tni(LLM_TENSOR_ATTN_OUT, i, ".weight.loraB")); write_tensor(&file, layer.ffn_norm_a, tni(LLM_TENSOR_FFN_NORM, i, ".weight.loraA")); write_tensor(&file, layer.ffn_norm_b, tni(LLM_TENSOR_FFN_NORM, i, ".weight.loraB")); - write_tensor(&file, layer.w1_a, tni(LLM_TENSOR_FFN_GATE, i, ".weight.loraA")); - write_tensor(&file, layer.w1_b, tni(LLM_TENSOR_FFN_GATE, i, ".weight.loraB")); - write_tensor(&file, layer.w2_a, tni(LLM_TENSOR_FFN_DOWN, i, ".weight.loraA")); - write_tensor(&file, layer.w2_b, tni(LLM_TENSOR_FFN_DOWN, i, ".weight.loraB")); - write_tensor(&file, layer.w3_a, tni(LLM_TENSOR_FFN_UP, i, ".weight.loraA")); - write_tensor(&file, layer.w3_b, tni(LLM_TENSOR_FFN_UP, i, ".weight.loraB")); + write_tensor(&file, layer.ffn_gate_a, tni(LLM_TENSOR_FFN_GATE, i, ".weight.loraA")); + write_tensor(&file, layer.ffn_gate_b, tni(LLM_TENSOR_FFN_GATE, i, ".weight.loraB")); + write_tensor(&file, layer.ffn_down_a, tni(LLM_TENSOR_FFN_DOWN, i, ".weight.loraA")); + write_tensor(&file, layer.ffn_down_b, tni(LLM_TENSOR_FFN_DOWN, i, ".weight.loraB")); + write_tensor(&file, layer.ffn_up_a, tni(LLM_TENSOR_FFN_UP, i, ".weight.loraA")); + write_tensor(&file, layer.ffn_up_b, tni(LLM_TENSOR_FFN_UP, i, ".weight.loraB")); } } @@ -1201,9 +1139,9 @@ struct train_params { uint32_t n_rank_wv; uint32_t n_rank_wo; uint32_t n_rank_ffn_norm; - uint32_t n_rank_w1; - uint32_t n_rank_w2; - uint32_t n_rank_w3; + uint32_t n_rank_ffn_gate; + uint32_t n_rank_ffn_down; + uint32_t n_rank_ffn_up; uint32_t n_rank_tok_embeddings; uint32_t n_rank_norm; uint32_t n_rank_output; @@ -1214,9 +1152,9 @@ struct train_params { bool custom_n_rank_wv; bool custom_n_rank_wo; bool custom_n_rank_ffn_norm; - bool custom_n_rank_w1; - bool custom_n_rank_w2; - bool custom_n_rank_w3; + bool custom_n_rank_ffn_gate; + bool custom_n_rank_ffn_down; + bool custom_n_rank_ffn_up; bool custom_n_rank_tok_embeddings; bool custom_n_rank_norm; bool custom_n_rank_output; @@ -1248,9 +1186,9 @@ static struct train_params get_default_train_params() { params.n_rank_wv = 4; params.n_rank_wo = 4; params.n_rank_ffn_norm = 1; - params.n_rank_w1 = 4; - params.n_rank_w2 = 4; - params.n_rank_w3 = 4; + params.n_rank_ffn_gate = 4; + params.n_rank_ffn_down = 4; + params.n_rank_ffn_up = 4; params.n_rank_tok_embeddings = 4; params.n_rank_norm = 1; params.n_rank_output = 4; @@ -1261,9 +1199,9 @@ static struct train_params get_default_train_params() { params.custom_n_rank_wv = false; params.custom_n_rank_wo = false; params.custom_n_rank_ffn_norm = false; - params.custom_n_rank_w1 = false; - params.custom_n_rank_w2 = false; - params.custom_n_rank_w3 = false; + params.custom_n_rank_ffn_gate = false; + params.custom_n_rank_ffn_down = false; + params.custom_n_rank_ffn_up = false; params.custom_n_rank_tok_embeddings = false; params.custom_n_rank_norm = false; params.custom_n_rank_output = false; @@ -1294,9 +1232,9 @@ static void train_print_usage(int argc, char ** argv, const struct train_params fprintf(stderr, " --rank-wk N LORA rank for wk tensor, overrides default rank.\n"); fprintf(stderr, " --rank-wv N LORA rank for wv tensor, overrides default rank.\n"); fprintf(stderr, " --rank-wo N LORA rank for wo tensor, overrides default rank.\n"); - fprintf(stderr, " --rank-w1 N LORA rank for w1 tensor, overrides default rank.\n"); - fprintf(stderr, " --rank-w2 N LORA rank for w2 tensor, overrides default rank.\n"); - fprintf(stderr, " --rank-w3 N LORA rank for w3 tensor, overrides default rank.\n"); + fprintf(stderr, " --rank-ffn_gate N LORA rank for ffn_gate tensor, overrides default rank.\n"); + fprintf(stderr, " --rank-ffn_down N LORA rank for ffn_down tensor, overrides default rank.\n"); + fprintf(stderr, " --rank-ffn_up N LORA rank for ffn_up tensor, overrides default rank.\n"); print_common_train_usage(argc, argv, ¶ms->common); } @@ -1431,27 +1369,27 @@ static bool train_params_parse(int argc, char ** argv, struct train_params * par } params->n_rank_wo = std::stoi(argv[i]); params->custom_n_rank_wo = true; - } else if (arg == "--rank-w1") { + } else if (arg == "--rank-ffn_gate") { if (++i >= argc) { invalid_param = true; break; } - params->n_rank_w1 = std::stoi(argv[i]); - params->custom_n_rank_w1 = true; - } else if (arg == "--rank-w2") { + params->n_rank_ffn_gate = std::stoi(argv[i]); + params->custom_n_rank_ffn_gate = true; + } else if (arg == "--rank-ffn_down") { if (++i >= argc) { invalid_param = true; break; } - params->n_rank_w2 = std::stoi(argv[i]); - params->custom_n_rank_w2 = true; - } else if (arg == "--rank-w3") { + params->n_rank_ffn_down = std::stoi(argv[i]); + params->custom_n_rank_ffn_down = true; + } else if (arg == "--rank-ffn_up") { if (++i >= argc) { invalid_param = true; break; } - params->n_rank_w3 = std::stoi(argv[i]); - params->custom_n_rank_w3 = true; + params->n_rank_ffn_up = std::stoi(argv[i]); + params->custom_n_rank_ffn_up = true; } else { fprintf(stderr, "error: unknown argument: %s\n", arg.c_str()); train_print_usage(argc, argv, &default_params); @@ -1514,12 +1452,12 @@ static int64_t get_parameter_count(struct my_llama_lora* lora) { nx += ggml_nelements(layer.wo_b); nx += ggml_nelements(layer.ffn_norm_a); nx += ggml_nelements(layer.ffn_norm_b); - nx += ggml_nelements(layer.w1_a); - nx += ggml_nelements(layer.w1_b); - nx += ggml_nelements(layer.w2_a); - nx += ggml_nelements(layer.w2_b); - nx += ggml_nelements(layer.w3_a); - nx += ggml_nelements(layer.w3_b); + nx += ggml_nelements(layer.ffn_gate_a); + nx += ggml_nelements(layer.ffn_gate_b); + nx += ggml_nelements(layer.ffn_down_a); + nx += ggml_nelements(layer.ffn_down_b); + nx += ggml_nelements(layer.ffn_up_a); + nx += ggml_nelements(layer.ffn_up_b); } return nx; } @@ -1573,9 +1511,9 @@ int main(int argc, char ** argv) { uint32_t n_rank_wv = params.custom_n_rank_wv ? params.n_rank_wv : params.lora_r; uint32_t n_rank_wo = params.custom_n_rank_wo ? params.n_rank_wo : params.lora_r; uint32_t n_rank_ffn_norm = params.custom_n_rank_ffn_norm ? params.n_rank_ffn_norm : 1; - uint32_t n_rank_w1 = params.custom_n_rank_w1 ? params.n_rank_w1 : params.lora_r; - uint32_t n_rank_w2 = params.custom_n_rank_w2 ? params.n_rank_w2 : params.lora_r; - uint32_t n_rank_w3 = params.custom_n_rank_w3 ? params.n_rank_w3 : params.lora_r; + uint32_t n_rank_ffn_gate = params.custom_n_rank_ffn_gate ? params.n_rank_ffn_gate : params.lora_r; + uint32_t n_rank_ffn_down = params.custom_n_rank_ffn_down ? params.n_rank_ffn_down : params.lora_r; + uint32_t n_rank_ffn_up = params.custom_n_rank_ffn_up ? params.n_rank_ffn_up : params.lora_r; uint32_t n_rank_tok_embeddings = params.custom_n_rank_tok_embeddings ? params.n_rank_tok_embeddings : params.lora_r; uint32_t n_rank_norm = params.custom_n_rank_norm ? params.n_rank_norm : 1; uint32_t n_rank_output = params.custom_n_rank_output ? params.n_rank_output : params.lora_r; @@ -1585,9 +1523,9 @@ int main(int argc, char ** argv) { lora.hparams.n_rank_wv = n_rank_wv; lora.hparams.n_rank_wo = n_rank_wo; lora.hparams.n_rank_ffn_norm = n_rank_ffn_norm; - lora.hparams.n_rank_w1 = n_rank_w1; - lora.hparams.n_rank_w2 = n_rank_w2; - lora.hparams.n_rank_w3 = n_rank_w3; + lora.hparams.n_rank_ffn_gate = n_rank_ffn_gate; + lora.hparams.n_rank_ffn_down = n_rank_ffn_down; + lora.hparams.n_rank_ffn_up = n_rank_ffn_up; lora.hparams.n_rank_tok_embeddings = n_rank_tok_embeddings; lora.hparams.n_rank_norm = n_rank_norm; lora.hparams.n_rank_output = n_rank_output; @@ -1628,9 +1566,9 @@ int main(int argc, char ** argv) { || (lora.hparams.n_rank_wv != n_rank_wv) || (lora.hparams.n_rank_wo != n_rank_wo) || (lora.hparams.n_rank_ffn_norm != n_rank_ffn_norm) - || (lora.hparams.n_rank_w1 != n_rank_w1) - || (lora.hparams.n_rank_w2 != n_rank_w2) - || (lora.hparams.n_rank_w3 != n_rank_w3) + || (lora.hparams.n_rank_ffn_gate != n_rank_ffn_gate) + || (lora.hparams.n_rank_ffn_down != n_rank_ffn_down) + || (lora.hparams.n_rank_ffn_up != n_rank_ffn_up) || (lora.hparams.n_rank_tok_embeddings != n_rank_tok_embeddings) || (lora.hparams.n_rank_norm != n_rank_norm) || (lora.hparams.n_rank_output != n_rank_output) @@ -1664,7 +1602,7 @@ int main(int argc, char ** argv) { printf("%s: seen train_samples %llu\n", __func__, (long long unsigned) train->train_samples); printf("%s: seen train_tokens %llu\n", __func__, (long long unsigned) train->train_tokens); printf("%s: completed train_epochs %llu\n", __func__, (long long unsigned) train->train_epochs); - printf("%s: lora_size = %zu bytes (%.1f MB)\n", __func__, (ggml_used_mem(lora.ctx) + lora.data.size()), (float) (ggml_used_mem(lora.ctx) + lora.data.size()) / (1024.0f*1024.0f)); + printf("%s: lora_size = %zu bytes (%.1f MB)\n", __func__, (ggml_used_mem(lora.ctx) + ggml_backend_buffer_get_size(lora.data)), (float) (ggml_used_mem(lora.ctx) + ggml_backend_buffer_get_size(lora.data)) / (1024.0f*1024.0f)); if (params.only_write_lora) { save_train_files_data save_data; @@ -1691,10 +1629,6 @@ int main(int argc, char ** argv) { int n_vocab = model.hparams.n_vocab; int n_batch = params.common.n_batch; - - std::vector mem_input_data; - std::vector mem_compute_data; - // context for input tensors without their data struct ggml_init_params ctx_input_params = { ggml_tensor_overhead() * 2, // mem_size @@ -1707,17 +1641,11 @@ int main(int argc, char ** argv) { struct ggml_tensor * tokens_input = ggml_new_tensor_2d(ctx_input, GGML_TYPE_I32, n_tokens, n_batch); struct ggml_tensor * target_probs = ggml_new_tensor_3d(ctx_input, GGML_TYPE_F32, n_vocab, n_tokens, n_batch); - // measure required memory for input tensors - size_t max_input_size = GGML_PAD(ggml_nbytes(tokens_input), tensor_alignment) + - GGML_PAD(ggml_nbytes(target_probs), tensor_alignment) + - tensor_alignment; - printf("%s: input_size = %zu bytes (%.1f MB)\n", __func__, max_input_size, (float) max_input_size / (1024.0f*1024.0f)); - // allocate input tensors - mem_input_data.resize(max_input_size); - ggml_allocr_t alloc_inps = ggml_allocr_new(mem_input_data.data(), mem_input_data.size(), tensor_alignment); - ggml_allocr_alloc(alloc_inps, tokens_input); - ggml_allocr_alloc(alloc_inps, target_probs); + // measure required memory for input tensors + ggml_backend_buffer_t input_data = ggml_backend_alloc_ctx_tensors_from_buft(ctx_input, ggml_backend_cpu_buffer_type()); + size_t max_input_size = ggml_backend_buffer_get_size(input_data); + printf("%s: input_size = %zu bytes (%.1f MB)\n", __func__, max_input_size, (float) max_input_size / (1024.0f*1024.0f)); // context for compute tensors without their data const size_t estimated_compute_size_wo_data = ( @@ -1744,7 +1672,7 @@ int main(int argc, char ** argv) { // find best evaluation order for (unsigned order = 0; order < (unsigned) GGML_CGRAPH_EVAL_ORDER_COUNT; ++order) { ctx_compute = ggml_init(ctx_compute_params); - ggml_allocr_t alloc = ggml_allocr_new_measure(tensor_alignment); + ggml_gallocr_t alloc = ggml_gallocr_new(ggml_backend_cpu_buffer_type()); gf = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true); gf->order = (enum ggml_cgraph_eval_order) order; gb = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true); @@ -1757,14 +1685,15 @@ int main(int argc, char ** argv) { &logits, tokens_input, target_probs, n_tokens, n_batch, params.common.use_flash, - params.common.use_checkpointing + params.common.use_checkpointing, + true ); - size_t max_compute_size = ggml_allocr_max_size(alloc) + tensor_alignment; + size_t max_compute_size = ggml_gallocr_get_buffer_size(alloc, 0); // FIXME: this will still allocate the buffer if (max_compute_size < best_compute_size) { best_compute_size = max_compute_size; best_order = gf->order; } - ggml_allocr_free(alloc); + ggml_gallocr_free(alloc); ggml_free(ctx_compute); } size_t max_compute_size = best_compute_size; @@ -1775,9 +1704,8 @@ int main(int argc, char ** argv) { "invalid"); // allocate compute tensors - mem_compute_data.resize(max_compute_size); ctx_compute = ggml_init(ctx_compute_params); - ggml_allocr_t alloc = ggml_allocr_new(mem_compute_data.data(), mem_compute_data.size(), tensor_alignment); + ggml_gallocr_t alloc = ggml_gallocr_new(ggml_backend_cpu_buffer_type()); gf = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true); gf->order = best_order; gb = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true); @@ -1790,17 +1718,17 @@ int main(int argc, char ** argv) { &logits, tokens_input, target_probs, n_tokens, n_batch, params.common.use_flash, - params.common.use_checkpointing + params.common.use_checkpointing, + false ); - ggml_allocr_free(alloc); - ggml_allocr_free(alloc_inps); - // tokenize data std::vector train_tokens; std::vector train_samples_begin; std::vector train_samples_size; - printf("%s: tokenize training data\n", __func__); + printf("%s: tokenize training data from %s\n", __func__, params.common.fn_train_data); + printf("%s: sample-start: %s\n", __func__, params.common.sample_start.c_str()); + printf("%s: include-sample-start: %s\n", __func__, params.common.include_sample_start ? "true" : "false"); tokenize_file(lctx, params.common.fn_train_data, params.common.sample_start, @@ -1907,6 +1835,8 @@ int main(int argc, char ** argv) { ggml_free(ctx_work); ggml_free(ctx_compute); ggml_free(ctx_input); + ggml_gallocr_free(alloc); + int64_t t1 = ggml_time_ms(); printf("%s: total training time: ", __func__); diff --git a/examples/imatrix/README.md b/examples/imatrix/README.md new file mode 100644 index 000000000..578e8fc27 --- /dev/null +++ b/examples/imatrix/README.md @@ -0,0 +1,32 @@ +# llama.cpp/examples/imatrix + +Compute an importance matrix for a model and given text dataset. Can be used during quantization to enchance the quality of the quantum models. +More information is available here: https://github.com/ggerganov/llama.cpp/pull/4861 + +## Usage + +``` +./imatrix -m -f [-o ] [--verbosity ] + [-ofreq num_chunks] [-ow <0 or 1>] [other common params] +``` + +Here `-m` with a model name and `-f` with a file containing training data (such as e.g. `wiki.train.raw`) are mandatory. +The parameters in square brackets are optional and have the following meaning: +* `-o` (or `--output-file`) specifies the name of the file where the computed data will be stored. If missing `imatrix.dat` is used. +* `--verbosity` specifies the verbosity level. If set to `0`, no output other than the perplexity of the processed chunks will be generated. If set to `1`, each time the results are saved a message is written to `stderr`. If `>=2`, a message is output each time data is collected for any tensor. Default verbosity level is `1`. +* `-ofreq` (or `--output-frequency`) specifies how often the so far computed result is saved to disk. Default is 10 (i.e., every 10 chunks) +* `-ow` (or `--output-weight`) specifies if data will be collected for the `output.weight` tensor. My experience is that it is better to not utilize the importance matrix when quantizing `output.weight`, so this is set to `false` by default. + +For faster computation, make sure to use GPU offloading via the `-ngl` argument + +## Example + +```bash +LLAMA_CUBLAS=1 make -j + +# generate importance matrix (imatrix.dat) +./imatrix -m ggml-model-f16.gguf -f train-data.txt -ngl 99 + +# use the imatrix to perform a Q4_K_M quantization +./quantize --imatrix imatrix.dat ggml-model-f16.gguf ./ggml-model-q4_k_m.gguf q4_k_m +``` diff --git a/examples/imatrix/imatrix.cpp b/examples/imatrix/imatrix.cpp index 1461bc963..f21bc48f3 100644 --- a/examples/imatrix/imatrix.cpp +++ b/examples/imatrix/imatrix.cpp @@ -26,6 +26,7 @@ struct StatParams { std::string ofile = "imatrix.dat"; int n_output_frequency = 10; int verbosity = 1; + int keep_every = 0; bool collect_output_weight = false; }; @@ -33,47 +34,146 @@ class IMatrixCollector { public: IMatrixCollector() = default; void set_parameters(StatParams&& params) { m_params = std::move(params); } - void collect_imatrix(const struct ggml_tensor * src0, const struct ggml_tensor * src1); + bool collect_imatrix(struct ggml_tensor * t, bool ask, void * user_data); void save_imatrix() const; + bool load_imatrix(const char * file_name, bool add); + static bool load_imatrix(const char * file_name, std::unordered_map& imatrix); private: std::unordered_map m_stats; StatParams m_params; std::mutex m_mutex; int m_last_call = 0; + std::vector m_src1_data; + std::vector m_ids; // the expert ids from ggml_mul_mat_id + // + void save_imatrix(const char * file_name) const; + void keep_imatrix(int ncall) const; }; -void IMatrixCollector::collect_imatrix(const struct ggml_tensor * src0, const struct ggml_tensor * src1) { - if (src1->ne[1] < 16 || src1->type != GGML_TYPE_F32) return; - if (!(strncmp(src0->name, "blk.", 4) == 0 || (m_params.collect_output_weight && strcmp(src0->name, "output.weight") == 0))) return; +bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void * user_data) { + GGML_UNUSED(user_data); + + const struct ggml_tensor * src0 = t->src[0]; + const struct ggml_tensor * src1 = t->src[1]; + + // when ask is true, the scheduler wants to know if we are interested in data from this tensor + // if we return true, a follow-up call will be made with ask=false in which we can do the actual collection + if (ask) { + if (t->op == GGML_OP_MUL_MAT_ID) return true; // collect all indirect matrix multiplications + if (t->op != GGML_OP_MUL_MAT) return false; + if (src1->ne[1] < 16 || src1->type != GGML_TYPE_F32) return false; + if (!(strncmp(src0->name, "blk.", 4) == 0 || (m_params.collect_output_weight && strcmp(src0->name, "output.weight") == 0))) return false; + return true; + } + std::lock_guard lock(m_mutex); - auto& e = m_stats[src0->name]; - if (e.values.empty()) { - e.values.resize(src1->ne[0], 0); + + // copy the data from the GPU memory if needed + const bool is_host = ggml_backend_buffer_is_host(src1->buffer); + + if (!is_host) { + m_src1_data.resize(ggml_nelements(src1)); + ggml_backend_tensor_get(src1, m_src1_data.data(), 0, ggml_nbytes(src1)); } - else if (e.values.size() != (size_t)src1->ne[0]) { - fprintf(stderr, "Oops: inconsistent size for %s (%d vs %d)\n", src0->name, (int)e.values.size(), (int)src1->ne[0]); - exit(1); //GGML_ASSERT(false); - } - ++e.ncall; - if (m_params.verbosity > 1) { - printf("%s[%d]: %s, %d x %d, %d\n",__func__,m_last_call,src0->name,(int)src1->ne[0],(int)src1->ne[1],(int)src1->type); - } - for (int row = 0; row < (int)src1->ne[1]; ++row) { - const float * x = (const float *)src1->data + row * src1->ne[0]; - for (int j = 0; j < (int)src1->ne[0]; ++j) { - e.values[j] += x[j]*x[j]; - } - } - if (e.ncall > m_last_call) { - m_last_call = e.ncall; - if (m_last_call % m_params.n_output_frequency == 0) { - save_imatrix(); + + const float * data = is_host ? (const float *) src1->data : m_src1_data.data(); + + if (t->op == GGML_OP_MUL_MAT_ID) { + const int idx = ((int32_t *) t->op_params)[0]; + const int n_as = ((int32_t *) t->op_params)[1]; + + // the top-k selected expert ids are stored in the src0 tensor + // for simplicity, always copy src0 to host, because it is small + // take into account that src0 is not contiguous! + GGML_ASSERT(src0->ne[1] == src1->ne[1]); + GGML_ASSERT(n_as*ggml_nrows(src0)*sizeof(int) == GGML_PAD(ggml_nbytes(src0), n_as*sizeof(int))); + m_ids.resize(ggml_nbytes(src0)/sizeof(int)); + ggml_backend_tensor_get(src0, m_ids.data(), 0, ggml_nbytes(src0)); + + // loop over all possible experts, regardless if they are used or not in the batch + // this is necessary to guarantee equal number of "ncall" for each tensor + for (int ex = 0; ex < n_as; ++ex) { + src0 = t->src[2 + ex]; + auto& e = m_stats[src0->name]; + if (e.values.empty()) { + e.values.resize(src1->ne[0], 0); + } + else if (e.values.size() != (size_t)src1->ne[0]) { + fprintf(stderr, "Oops: inconsistent size for %s (%d vs %d)\n", src0->name, (int)e.values.size(), (int)src1->ne[0]); + exit(1); //GGML_ASSERT(false); + } + // NOTE: since we select top-k experts, the number of calls for the expert tensors will be k times larger + // using the following line, we can correct for that if needed + //if (idx == t->src[0]->ne[0] - 1) ++e.ncall; + ++e.ncall; + if (m_params.verbosity > 1) { + printf("%s[%d]: %32s, %s, %5d x %5d, %d\n", __func__, m_last_call, src0->name, ggml_op_name(t->op), (int)src1->ne[0], (int)src1->ne[1], (int)src1->type); + } + for (int row = 0; row < (int)src1->ne[1]; ++row) { + const int excur = m_ids[row*n_as + idx]; + GGML_ASSERT(excur >= 0 && excur < n_as); // sanity check + if (excur != ex) continue; + const float * x = data + row * src1->ne[0]; + for (int j = 0; j < (int)src1->ne[0]; ++j) { + e.values[j] += x[j]*x[j]; + } + } + if (e.ncall > m_last_call) { + m_last_call = e.ncall; + if (m_last_call % m_params.n_output_frequency == 0) { + save_imatrix(); + } + if (m_params.keep_every > 0 && m_last_call%m_params.keep_every == 0) { + keep_imatrix(m_last_call); + } + } + } + } else { + auto& e = m_stats[src0->name]; + if (e.values.empty()) { + e.values.resize(src1->ne[0], 0); + } + else if (e.values.size() != (size_t)src1->ne[0]) { + fprintf(stderr, "Oops: inconsistent size for %s (%d vs %d)\n", src0->name, (int)e.values.size(), (int)src1->ne[0]); + exit(1); //GGML_ASSERT(false); + } + ++e.ncall; + if (m_params.verbosity > 1) { + printf("%s[%d]: %32s, %s, %5d x %5d, %d\n", __func__, m_last_call, src0->name, ggml_op_name(t->op), (int)src1->ne[0], (int)src1->ne[1], (int)src1->type); + } + for (int row = 0; row < (int)src1->ne[1]; ++row) { + const float * x = data + row * src1->ne[0]; + for (int j = 0; j < (int)src1->ne[0]; ++j) { + e.values[j] += x[j]*x[j]; + } + } + if (e.ncall > m_last_call) { + m_last_call = e.ncall; + if (m_last_call % m_params.n_output_frequency == 0) { + save_imatrix(); + } + if (m_params.keep_every > 0 && m_last_call%m_params.keep_every == 0) { + keep_imatrix(m_last_call); + } } } + + return true; } void IMatrixCollector::save_imatrix() const { - const char * fname = m_params.ofile.empty() ? "imatrix.dat" : m_params.ofile.c_str(); + save_imatrix(m_params.ofile.empty() ? "imatrix.dat" : m_params.ofile.c_str()); +} + +void IMatrixCollector::keep_imatrix(int ncall) const { + auto file_name = m_params.ofile; + if (file_name.empty()) file_name = "imatrix.dat"; + file_name += ".at_"; + file_name += std::to_string(ncall); + save_imatrix(file_name.c_str()); +} + +void IMatrixCollector::save_imatrix(const char * fname) const { std::ofstream out(fname, std::ios::binary); int n_entries = m_stats.size(); out.write((const char*)&n_entries, sizeof(n_entries)); @@ -91,10 +191,61 @@ void IMatrixCollector::save_imatrix() const { } } +bool IMatrixCollector::load_imatrix(const char * imatrix_file, std::unordered_map& imatrix_data) { + std::ifstream in(imatrix_file, std::ios::binary); + if (!in) { + printf("%s: failed to open %s\n",__func__,imatrix_file); + return false; + } + int n_entries; + in.read((char*)&n_entries, sizeof(n_entries)); + if (in.fail() || n_entries < 1) { + printf("%s: no data in file %s\n", __func__, imatrix_file); + return false; + } + for (int i = 0; i < n_entries; ++i) { + int len; in.read((char *)&len, sizeof(len)); + std::vector name_as_vec(len+1); + in.read((char *)name_as_vec.data(), len); + if (in.fail()) { + printf("%s: failed reading name for entry %d from %s\n",__func__,i+1,imatrix_file); + return false; + } + name_as_vec[len] = 0; + std::string name{name_as_vec.data()}; + auto& e = imatrix_data[std::move(name)]; + int ncall; + in.read((char*)&ncall, sizeof(ncall)); + int nval; + in.read((char *)&nval, sizeof(nval)); + if (in.fail() || nval < 1) { + printf("%s: failed reading number of values for entry %d\n",__func__,i); + imatrix_data = {}; + return false; + } + e.values.resize(nval); + in.read((char*)e.values.data(), nval*sizeof(float)); + if (in.fail()) { + printf("%s: failed reading data for entry %d\n",__func__,i); + imatrix_data = {}; + return false; + } + e.ncall = ncall; + } + return true; +} + +bool IMatrixCollector::load_imatrix(const char * file_name, bool add) { + if (!add) { + m_stats.clear(); + } + return load_imatrix(file_name, m_stats); +} + static IMatrixCollector g_collector; -static void ik_collect_imatrix(const struct ggml_tensor * src0, const struct ggml_tensor * src1) { - g_collector.collect_imatrix(src0, src1); +static bool ik_collect_imatrix(struct ggml_tensor * t, bool ask, void * user_data) { + return g_collector.collect_imatrix(t, ask, user_data); } @@ -171,7 +322,7 @@ static void process_logits( } } -static bool compute_imatrix(llama_context * ctx, const gpt_params & params) { +static bool compute_imatrix(llama_context * ctx, const gpt_params & params, bool compute_ppl, int from_chunk) { const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx)); const int n_ctx = llama_n_ctx(ctx); @@ -184,6 +335,15 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params) { auto tim2 = std::chrono::high_resolution_clock::now(); fprintf(stderr, "%s: tokenization took %g ms\n",__func__,1e-3*std::chrono::duration_cast(tim2-tim1).count()); + if (from_chunk > 0) { + if (size_t((from_chunk + 2)*n_ctx) >= tokens.size()) { + fprintf(stderr, "%s: there will be not enough tokens left after removing %d chunks\n", __func__, from_chunk); + return false; + } + fprintf(stderr, "%s: removing initial %d chunks (%d tokens)\n", __func__, from_chunk, from_chunk*n_ctx); + tokens.erase(tokens.begin(), tokens.begin() + from_chunk*n_ctx); + } + if (int(tokens.size()) < 2*n_ctx) { fprintf(stderr, "%s: you need at least %d tokens for a context of %d tokens\n",__func__,2*n_ctx, n_ctx); @@ -192,10 +352,12 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params) { } std::vector logit_history; - logit_history.resize(tokens.size()); - std::vector prob_history; - prob_history.resize(tokens.size()); + + if (compute_ppl) { + logit_history.resize(tokens.size()); + prob_history.resize(tokens.size()); + } const int n_chunk_max = tokens.size() / n_ctx; @@ -211,12 +373,17 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params) { std::vector workers(std::thread::hardware_concurrency() - 1); + const int num_batches = (n_ctx + n_batch - 1) / n_batch; + + std::vector logits; + if (compute_ppl && num_batches > 1) { + logits.reserve((size_t)n_ctx * n_vocab); + } + for (int i = 0; i < n_chunk; ++i) { const int start = i * n_ctx; const int end = start + n_ctx; - const int num_batches = (n_ctx + n_batch - 1) / n_batch; - std::vector logits; const auto t_start = std::chrono::high_resolution_clock::now(); @@ -244,8 +411,10 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params) { // restore the original token in case it was set to BOS tokens[batch_start] = token_org; - const auto * batch_logits = llama_get_logits(ctx); - logits.insert(logits.end(), batch_logits, batch_logits + batch_size * n_vocab); + if (compute_ppl && num_batches > 1) { + const auto * batch_logits = llama_get_logits(ctx); + logits.insert(logits.end(), batch_logits, batch_logits + batch_size * n_vocab); + } } const auto t_end = std::chrono::high_resolution_clock::now(); @@ -261,25 +430,32 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params) { fprintf(stderr, "%.2f minutes\n", total_seconds / 60.0); } - const int first = n_ctx/2; - process_logits(n_vocab, logits.data() + first*n_vocab, tokens.data() + start + first, n_ctx - 1 - first, - workers, nll, nll2, logit_history.data() + start + first, prob_history.data() + start + first); - count += n_ctx - first - 1; + if (compute_ppl) { + const int first = n_ctx/2; + const auto all_logits = num_batches > 1 ? logits.data() : llama_get_logits(ctx); + process_logits(n_vocab, all_logits + first*n_vocab, tokens.data() + start + first, n_ctx - 1 - first, + workers, nll, nll2, logit_history.data() + start + first, prob_history.data() + start + first); + count += n_ctx - first - 1; - printf("[%d]%.4lf,", i + 1, std::exp(nll / count)); - fflush(stdout); + printf("[%d]%.4lf,", i + 1, std::exp(nll / count)); + fflush(stdout); + + logits.clear(); + } } printf("\n"); - nll2 /= count; - nll /= count; - const double ppl = exp(nll); - nll2 -= nll * nll; - if (nll2 > 0) { - nll2 = sqrt(nll2/(count-1)); - printf("Final estimate: PPL = %.4lf +/- %.5lf\n", ppl, nll2*ppl); - } else { - printf("Unexpected negative standard deviation of log(prob)\n"); + if (compute_ppl) { + nll2 /= count; + nll /= count; + const double ppl = exp(nll); + nll2 -= nll * nll; + if (nll2 > 0) { + nll2 = sqrt(nll2/(count-1)); + printf("Final estimate: PPL = %.4lf +/- %.5lf\n", ppl, nll2*ppl); + } else { + printf("Unexpected negative standard deviation of log(prob)\n"); + } } return true; @@ -288,6 +464,10 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params) { int main(int argc, char ** argv) { StatParams sparams; + std::string prev_result_file; + std::string combine_files; + bool compute_ppl = true; + int from_chunk = 0; std::vector args; args.push_back(argv[0]); int iarg = 1; @@ -304,12 +484,66 @@ int main(int argc, char ** argv) { } else if (arg == "--verbosity") { sparams.verbosity = std::stoi(argv[++iarg]); + } else if (arg == "--no-ppl") { + compute_ppl = false; + } else if (arg == "--keep-imatrix") { + sparams.keep_every = std::stoi(argv[++iarg]); + } else if (arg == "--continue-from") { + prev_result_file = argv[++iarg]; + } else if (arg == "--combine") { + combine_files = argv[++iarg]; + } + else if (arg == "--from-chunk") { + from_chunk = std::stoi(argv[++iarg]); } else { args.push_back(argv[iarg]); } } if (iarg < argc) { - args.push_back(argv[iarg]); + std::string arg{argv[iarg]}; + if (arg == "--no-ppl") { + compute_ppl = false; + } else { + args.push_back(argv[iarg]); + } + } + + g_collector.set_parameters(std::move(sparams)); + + if (!combine_files.empty()) { + std::vector files; + size_t pos = 0; + while (true) { + auto new_pos = combine_files.find(',', pos); + if (new_pos != std::string::npos) { + files.emplace_back(combine_files.substr(pos, new_pos - pos)); + pos = new_pos + 1; + } else { + files.emplace_back(combine_files.substr(pos)); + break; + } + } + if (files.size() < 2) { + fprintf(stderr, "You must provide at least two comma separated files to use --combine\n"); + return 1; + } + printf("Combining the following %d files\n", int(files.size())); + for (auto& file : files) { + printf(" %s\n", file.c_str()); + if (!g_collector.load_imatrix(file.c_str(), true)) { + fprintf(stderr, "Failed to load %s\n", file.c_str()); + return 1; + } + } + g_collector.save_imatrix(); + return 0; + } + + if (!prev_result_file.empty()) { + if (!g_collector.load_imatrix(prev_result_file.c_str(), false)) { + fprintf(stderr, "=============== Failed to load %s\n", prev_result_file.c_str()); + return 1; + } } gpt_params params; @@ -318,10 +552,6 @@ int main(int argc, char ** argv) { return 1; } - g_collector.set_parameters(std::move(sparams)); - - ggml_set_imatrix_collection(ik_collect_imatrix); - params.logits_all = true; params.n_batch = std::min(params.n_batch, params.n_ctx); @@ -338,18 +568,30 @@ int main(int argc, char ** argv) { params.prompt = gpt_random_prompt(rng); } - llama_backend_init(params.numa); + llama_backend_init(); + llama_numa_init(params.numa); - llama_model * model; - llama_context * ctx; + llama_model_params mparams = llama_model_params_from_gpt_params(params); - // load the model and apply lora adapter, if any - std::tie(model, ctx) = llama_init_from_gpt_params(params); + llama_model * model = llama_load_model_from_file(params.model.c_str(), mparams); if (model == NULL) { fprintf(stderr, "%s: error: unable to load model\n", __func__); return 1; } + llama_context_params cparams = llama_context_params_from_gpt_params(params); + + // pass the callback to the backend scheduler + // it will be executed for each node during the graph computation + cparams.cb_eval = ik_collect_imatrix; + cparams.cb_eval_user_data = NULL; + + llama_context * ctx = llama_new_context_with_model(model, cparams); + if (ctx == NULL) { + fprintf(stderr, "%s: error: unable to create context\n", __func__); + return 1; + } + const int n_ctx_train = llama_n_ctx_train(model); if (params.n_ctx > n_ctx_train) { fprintf(stderr, "%s: warning: model was trained on only %d context tokens (%d specified)\n", @@ -362,7 +604,7 @@ int main(int argc, char ** argv) { fprintf(stderr, "%s\n", get_system_info(params).c_str()); } - bool OK = compute_imatrix(ctx, params); + bool OK = compute_imatrix(ctx, params, compute_ppl, from_chunk); if (!OK) { return 1; } diff --git a/examples/infill/infill.cpp b/examples/infill/infill.cpp index 4a7827876..92c67b7cf 100644 --- a/examples/infill/infill.cpp +++ b/examples/infill/infill.cpp @@ -202,7 +202,8 @@ int main(int argc, char ** argv) { std::mt19937 rng(params.seed); LOG("%s: llama backend init\n", __func__); - llama_backend_init(params.numa); + llama_backend_init(); + llama_numa_init(params.numa); llama_model * model; llama_context * ctx; @@ -241,7 +242,7 @@ int main(int argc, char ** argv) { LOG("add_bos: %d\n", add_bos); bool suff_rm_leading_spc = params.escape; - if (suff_rm_leading_spc && params.input_suffix.find_first_of(" ") == 0 && params.input_suffix.size() > 1) { + if (suff_rm_leading_spc && params.input_suffix.find_first_of(' ') == 0 && params.input_suffix.size() > 1) { params.input_suffix.erase(0, 1); suff_rm_leading_spc = false; } diff --git a/examples/llama-bench/README.md b/examples/llama-bench/README.md index d02824bfa..374e40a7d 100644 --- a/examples/llama-bench/README.md +++ b/examples/llama-bench/README.md @@ -23,19 +23,23 @@ usage: ./llama-bench [options] options: -h, --help - -m, --model (default: models/7B/ggml-model-q4_0.gguf) - -p, --n-prompt (default: 512) - -n, --n-gen (default: 128) - -b, --batch-size (default: 512) - --memory-f32 <0|1> (default: 0) - -t, --threads (default: 16) - -ngl N, --n-gpu-layers (default: 99) - -mg i, --main-gpu (default: 0) - -mmq, --mul-mat-q <0|1> (default: 1) - -ts, --tensor_split - -r, --repetitions (default: 5) - -o, --output (default: md) - -v, --verbose (default: 0) + -m, --model (default: models/7B/ggml-model-q4_0.gguf) + -p, --n-prompt (default: 512) + -n, --n-gen (default: 128) + -b, --batch-size (default: 512) + -ctk , --cache-type-k (default: f16) + -ctv , --cache-type-v (default: f16) + -t, --threads (default: 112) + -ngl, --n-gpu-layers (default: 99) + -sm, --split-mode (default: layer) + -mg, --main-gpu (default: 0) + -nkvo, --no-kv-offload <0|1> (default: 0) + -mmp, --mmap <0|1> (default: 1) + -mmq, --mul-mat-q <0|1> (default: 1) + -ts, --tensor_split (default: 0) + -r, --repetitions (default: 5) + -o, --output (default: md) + -v, --verbose (default: 0) Multiple values can be given for each parameter by separating them with ',' or by specifying the parameter multiple times. ``` @@ -51,6 +55,10 @@ Each test is repeated the number of times given by `-r`, and the results are ave For a description of the other options, see the [main example](../main/README.md). +Note: + +- When using SYCL backend, there would be hang issue in some cases. Please set `--mmp 0`. + ## Examples ### Text generation with different models diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp index 7f7186cde..11410f8ae 100644 --- a/examples/llama-bench/llama-bench.cpp +++ b/examples/llama-bench/llama-bench.cpp @@ -20,6 +20,7 @@ #include "llama.h" #include "common.h" #include "ggml-cuda.h" +#include "ggml-sycl.h" // utils static uint64_t get_time_ns() { @@ -120,6 +121,22 @@ static std::string get_gpu_info() { id += "/"; } } +#endif +#ifdef GGML_USE_SYCL + int device_list[GGML_SYCL_MAX_DEVICES]; + ggml_sycl_get_gpu_list(device_list, GGML_SYCL_MAX_DEVICES); + + for (int i = 0; i < GGML_SYCL_MAX_DEVICES; i++) { + if (device_list[i] >0 ){ + char buf[128]; + ggml_sycl_get_device_description(i, buf, sizeof(buf)); + id += buf; + id += "/"; + } + } + if (id.length() >2 ) { + id.pop_back(); + } #endif // TODO: other backends return id; @@ -128,6 +145,25 @@ static std::string get_gpu_info() { // command line params enum output_formats {CSV, JSON, MARKDOWN, SQL}; +static const char * output_format_str(output_formats format) { + switch (format) { + case CSV: return "csv"; + case JSON: return "json"; + case MARKDOWN: return "md"; + case SQL: return "sql"; + default: GGML_ASSERT(!"invalid output format"); + } +} + +static const char * split_mode_str(llama_split_mode mode) { + switch (mode) { + case LLAMA_SPLIT_NONE: return "none"; + case LLAMA_SPLIT_LAYER: return "layer"; + case LLAMA_SPLIT_ROW: return "row"; + default: GGML_ASSERT(!"invalid split mode"); + } +} + struct cmd_params { std::vector model; std::vector n_prompt; @@ -137,10 +173,12 @@ struct cmd_params { std::vector type_v; std::vector n_threads; std::vector n_gpu_layers; + std::vector split_mode; std::vector main_gpu; std::vector no_kv_offload; std::vector mul_mat_q; - std::vector> tensor_split; + std::vector> tensor_split; + std::vector use_mmap; int reps; bool verbose; output_formats output_format; @@ -155,10 +193,12 @@ static const cmd_params cmd_params_defaults = { /* type_v */ {GGML_TYPE_F16}, /* n_threads */ {get_num_physical_cores()}, /* n_gpu_layers */ {99}, + /* split_mode */ {LLAMA_SPLIT_LAYER}, /* main_gpu */ {0}, /* no_kv_offload */ {false}, /* mul_mat_q */ {true}, - /* tensor_split */ {{}}, + /* tensor_split */ {std::vector(llama_max_devices(), 0.0f)}, + /* use_mmap */ {true}, /* reps */ 5, /* verbose */ false, /* output_format */ MARKDOWN @@ -169,21 +209,23 @@ static void print_usage(int /* argc */, char ** argv) { printf("\n"); printf("options:\n"); printf(" -h, --help\n"); - printf(" -m, --model (default: %s)\n", join(cmd_params_defaults.model, ",").c_str()); - printf(" -p, --n-prompt (default: %s)\n", join(cmd_params_defaults.n_prompt, ",").c_str()); - printf(" -n, --n-gen (default: %s)\n", join(cmd_params_defaults.n_gen, ",").c_str()); - printf(" -b, --batch-size (default: %s)\n", join(cmd_params_defaults.n_batch, ",").c_str()); - printf(" -ctk , --cache-type-k (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_k, ggml_type_name), ",").c_str()); - printf(" -ctv , --cache-type-v (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_v, ggml_type_name), ",").c_str()); - printf(" -t, --threads (default: %s)\n", join(cmd_params_defaults.n_threads, ",").c_str()); - printf(" -ngl, --n-gpu-layers (default: %s)\n", join(cmd_params_defaults.n_gpu_layers, ",").c_str()); - printf(" -mg, --main-gpu (default: %s)\n", join(cmd_params_defaults.main_gpu, ",").c_str()); - printf(" -nkvo, --no-kv-offload <0|1> (default: %s)\n", join(cmd_params_defaults.no_kv_offload, ",").c_str()); - printf(" -mmq, --mul-mat-q <0|1> (default: %s)\n", join(cmd_params_defaults.mul_mat_q, ",").c_str()); - printf(" -ts, --tensor_split \n"); - printf(" -r, --repetitions (default: %d)\n", cmd_params_defaults.reps); - printf(" -o, --output (default: %s)\n", cmd_params_defaults.output_format == CSV ? "csv" : cmd_params_defaults.output_format == JSON ? "json" : cmd_params_defaults.output_format == MARKDOWN ? "md" : "sql"); - printf(" -v, --verbose (default: %s)\n", cmd_params_defaults.verbose ? "1" : "0"); + printf(" -m, --model (default: %s)\n", join(cmd_params_defaults.model, ",").c_str()); + printf(" -p, --n-prompt (default: %s)\n", join(cmd_params_defaults.n_prompt, ",").c_str()); + printf(" -n, --n-gen (default: %s)\n", join(cmd_params_defaults.n_gen, ",").c_str()); + printf(" -b, --batch-size (default: %s)\n", join(cmd_params_defaults.n_batch, ",").c_str()); + printf(" -ctk , --cache-type-k (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_k, ggml_type_name), ",").c_str()); + printf(" -ctv , --cache-type-v (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_v, ggml_type_name), ",").c_str()); + printf(" -t, --threads (default: %s)\n", join(cmd_params_defaults.n_threads, ",").c_str()); + printf(" -ngl, --n-gpu-layers (default: %s)\n", join(cmd_params_defaults.n_gpu_layers, ",").c_str()); + printf(" -sm, --split-mode (default: %s)\n", join(transform_to_str(cmd_params_defaults.split_mode, split_mode_str), ",").c_str()); + printf(" -mg, --main-gpu (default: %s)\n", join(cmd_params_defaults.main_gpu, ",").c_str()); + printf(" -nkvo, --no-kv-offload <0|1> (default: %s)\n", join(cmd_params_defaults.no_kv_offload, ",").c_str()); + printf(" -mmp, --mmap <0|1> (default: %s)\n", join(cmd_params_defaults.use_mmap, ",").c_str()); + printf(" -mmq, --mul-mat-q <0|1> (default: %s)\n", join(cmd_params_defaults.mul_mat_q, ",").c_str()); + printf(" -ts, --tensor_split (default: 0)\n"); + printf(" -r, --repetitions (default: %d)\n", cmd_params_defaults.reps); + printf(" -o, --output (default: %s)\n", output_format_str(cmd_params_defaults.output_format)); + printf(" -v, --verbose (default: %s)\n", cmd_params_defaults.verbose ? "1" : "0"); printf("\n"); printf("Multiple values can be given for each parameter by separating them with ',' or by specifying the parameter multiple times.\n"); } @@ -306,6 +348,28 @@ static cmd_params parse_cmd_params(int argc, char ** argv) { } auto p = split(argv[i], split_delim); params.n_gpu_layers.insert(params.n_gpu_layers.end(), p.begin(), p.end()); + } else if (arg == "-sm" || arg == "--split-mode") { + if (++i >= argc) { + invalid_param = true; + break; + } + auto p = split(argv[i], split_delim); + std::vector modes; + for (const auto & m : p) { + llama_split_mode mode; + if (m == "none") { + mode = LLAMA_SPLIT_NONE; + } else if (m == "layer") { + mode = LLAMA_SPLIT_LAYER; + } else if (m == "row") { + mode = LLAMA_SPLIT_ROW; + } else { + invalid_param = true; + break; + } + modes.push_back(mode); + } + params.split_mode.insert(params.split_mode.end(), modes.begin(), modes.end()); } else if (arg == "-mg" || arg == "--main-gpu") { if (++i >= argc) { invalid_param = true; @@ -326,6 +390,13 @@ static cmd_params parse_cmd_params(int argc, char ** argv) { } auto p = split(argv[i], split_delim); params.mul_mat_q.insert(params.mul_mat_q.end(), p.begin(), p.end()); + } else if (arg == "-mmp" || arg == "--mmap") { + if (++i >= argc) { + invalid_param = true; + break; + } + auto p = split(argv[i], split_delim); + params.use_mmap.insert(params.use_mmap.end(), p.begin(), p.end()); } else if (arg == "-ts" || arg == "--tensor-split") { if (++i >= argc) { invalid_param = true; @@ -336,10 +407,10 @@ static cmd_params parse_cmd_params(int argc, char ** argv) { const std::regex regex{R"([;/]+)"}; std::sregex_token_iterator it{ts.begin(), ts.end(), regex, -1}; std::vector split_arg{it, {}}; - GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES); + GGML_ASSERT(split_arg.size() <= llama_max_devices()); - std::array tensor_split; - for (size_t i = 0; i < LLAMA_MAX_DEVICES; ++i) { + std::vector tensor_split(llama_max_devices()); + for (size_t i = 0; i < llama_max_devices(); ++i) { if (i < split_arg.size()) { tensor_split[i] = std::stof(split_arg[i]); } else { @@ -392,10 +463,12 @@ static cmd_params parse_cmd_params(int argc, char ** argv) { if (params.type_k.empty()) { params.type_k = cmd_params_defaults.type_k; } if (params.type_v.empty()) { params.type_v = cmd_params_defaults.type_v; } if (params.n_gpu_layers.empty()) { params.n_gpu_layers = cmd_params_defaults.n_gpu_layers; } + if (params.split_mode.empty()) { params.split_mode = cmd_params_defaults.split_mode; } if (params.main_gpu.empty()) { params.main_gpu = cmd_params_defaults.main_gpu; } if (params.no_kv_offload.empty()){ params.no_kv_offload = cmd_params_defaults.no_kv_offload; } if (params.mul_mat_q.empty()) { params.mul_mat_q = cmd_params_defaults.mul_mat_q; } if (params.tensor_split.empty()) { params.tensor_split = cmd_params_defaults.tensor_split; } + if (params.use_mmap.empty()) { params.use_mmap = cmd_params_defaults.use_mmap; } if (params.n_threads.empty()) { params.n_threads = cmd_params_defaults.n_threads; } return params; @@ -410,17 +483,21 @@ struct cmd_params_instance { ggml_type type_v; int n_threads; int n_gpu_layers; + llama_split_mode split_mode; int main_gpu; bool no_kv_offload; bool mul_mat_q; - std::array tensor_split; + std::vector tensor_split; + bool use_mmap; llama_model_params to_llama_mparams() const { llama_model_params mparams = llama_model_default_params(); mparams.n_gpu_layers = n_gpu_layers; + mparams.split_mode = split_mode; mparams.main_gpu = main_gpu; mparams.tensor_split = tensor_split.data(); + mparams.use_mmap = use_mmap; return mparams; } @@ -428,7 +505,9 @@ struct cmd_params_instance { bool equal_mparams(const cmd_params_instance & other) const { return model == other.model && n_gpu_layers == other.n_gpu_layers && + split_mode == other.split_mode && main_gpu == other.main_gpu && + use_mmap == other.use_mmap && tensor_split == other.tensor_split; } @@ -446,47 +525,16 @@ struct cmd_params_instance { } }; -static std::vector get_cmd_params_instances_int(const cmd_params & params, int n_gen, int n_prompt) { - std::vector instances; - - for (const auto & m : params.model) - for (const auto & nl : params.n_gpu_layers) - for (const auto & mg : params.main_gpu) - for (const auto & ts : params.tensor_split) - for (const auto & nb : params.n_batch) - for (const auto & tk : params.type_k) - for (const auto & tv : params.type_v) - for (const auto & mmq : params.mul_mat_q) - for (const auto & nkvo : params.no_kv_offload) - for (const auto & nt : params.n_threads) { - cmd_params_instance instance = { - /* .model = */ m, - /* .n_prompt = */ n_prompt, - /* .n_gen = */ n_gen, - /* .n_batch = */ nb, - /* .type_k = */ tk, - /* .type_v = */ tv, - /* .n_threads = */ nt, - /* .n_gpu_layers = */ nl, - /* .main_gpu = */ mg, - /* .no_kv_offload= */ nkvo, - /* .mul_mat_q = */ mmq, - /* .tensor_split = */ ts, - }; - instances.push_back(instance); - } - return instances; -} - static std::vector get_cmd_params_instances(const cmd_params & params) { std::vector instances; -#if 1 // this ordering minimizes the number of times that each model needs to be reloaded for (const auto & m : params.model) for (const auto & nl : params.n_gpu_layers) + for (const auto & sm : params.split_mode) for (const auto & mg : params.main_gpu) for (const auto & ts : params.tensor_split) + for (const auto & mmp : params.use_mmap) for (const auto & nb : params.n_batch) for (const auto & tk : params.type_k) for (const auto & tv : params.type_v) @@ -506,10 +554,12 @@ static std::vector get_cmd_params_instances(const cmd_param /* .type_v = */ tv, /* .n_threads = */ nt, /* .n_gpu_layers = */ nl, + /* .split_mode = */ sm, /* .main_gpu = */ mg, /* .no_kv_offload= */ nkvo, /* .mul_mat_q = */ mmq, /* .tensor_split = */ ts, + /* .use_mmap = */ mmp, }; instances.push_back(instance); } @@ -527,32 +577,16 @@ static std::vector get_cmd_params_instances(const cmd_param /* .type_v = */ tv, /* .n_threads = */ nt, /* .n_gpu_layers = */ nl, + /* .split_mode = */ sm, /* .main_gpu = */ mg, /* .no_kv_offload= */ nkvo, /* .mul_mat_q = */ mmq, /* .tensor_split = */ ts, + /* .use_mmap = */ mmp, }; instances.push_back(instance); } } -#else - // this ordering separates the prompt and generation tests - for (const auto & n_prompt : params.n_prompt) { - if (n_prompt == 0) { - continue; - } - auto instances_prompt = get_cmd_params_instances_int(params, 0, n_prompt); - instances.insert(instances.end(), instances_prompt.begin(), instances_prompt.end()); - } - - for (const auto & n_gen : params.n_gen) { - if (n_gen == 0) { - continue; - } - auto instances_gen = get_cmd_params_instances_int(params, n_gen, 0); - instances.insert(instances.end(), instances_gen.begin(), instances_gen.end()); - } -#endif return instances; } @@ -562,7 +596,10 @@ struct test { static const int build_number; static const bool cuda; static const bool opencl; + static const bool vulkan; + static const bool kompute; static const bool metal; + static const bool sycl; static const bool gpu_blas; static const bool blas; static const std::string cpu_info; @@ -576,10 +613,12 @@ struct test { ggml_type type_k; ggml_type type_v; int n_gpu_layers; + llama_split_mode split_mode; int main_gpu; bool no_kv_offload; bool mul_mat_q; - std::array tensor_split; + std::vector tensor_split; + bool use_mmap; int n_prompt; int n_gen; std::string test_time; @@ -597,10 +636,12 @@ struct test { type_k = inst.type_k; type_v = inst.type_v; n_gpu_layers = inst.n_gpu_layers; + split_mode = inst.split_mode; main_gpu = inst.main_gpu; no_kv_offload = inst.no_kv_offload; mul_mat_q = inst.mul_mat_q; tensor_split = inst.tensor_split; + use_mmap = inst.use_mmap; n_prompt = inst.n_prompt; n_gen = inst.n_gen; // RFC 3339 date-time format @@ -641,27 +682,38 @@ struct test { if (opencl) { return "OpenCL"; } + if (vulkan) { + return "Vulkan"; + } + if (kompute) { + return "Kompute"; + } if (metal) { return "Metal"; } + if (sycl) { + return GGML_SYCL_NAME; + } if (gpu_blas) { return "GPU BLAS"; } if (blas) { return "BLAS"; } + return "CPU"; } static const std::vector & get_fields() { static const std::vector fields = { "build_commit", "build_number", - "cuda", "opencl", "metal", "gpu_blas", "blas", + "cuda", "opencl", "vulkan", "kompute", "metal", "sycl", "gpu_blas", "blas", "cpu_info", "gpu_info", "model_filename", "model_type", "model_size", "model_n_params", "n_batch", "n_threads", "type_k", "type_v", - "n_gpu_layers", "main_gpu", "no_kv_offload", - "mul_mat_q", "tensor_split", + "n_gpu_layers", "split_mode", + "main_gpu", "no_kv_offload", + "mul_mat_q", "tensor_split", "use_mmap", "n_prompt", "n_gen", "test_time", "avg_ns", "stddev_ns", "avg_ts", "stddev_ts" @@ -679,8 +731,9 @@ struct test { field == "avg_ns" || field == "stddev_ns") { return INT; } - if (field == "cuda" || field == "opencl" || field == "metal" || field == "gpu_blas" || field == "blas" || - field == "f16_kv" || field == "no_kv_offload" || field == "mul_mat_q") { + if (field == "cuda" || field == "opencl" || field == "vulkan" || field == "kompute" || field == "metal" || + field == "gpu_blas" || field == "blas" || field == "sycl" ||field == "f16_kv" || field == "no_kv_offload" || + field == "mul_mat_q" || field == "use_mmap") { return BOOL; } if (field == "avg_ts" || field == "stddev_ts") { @@ -692,7 +745,7 @@ struct test { std::vector get_values() const { std::string tensor_split_str; int max_nonzero = 0; - for (int i = 0; i < LLAMA_MAX_DEVICES; i++) { + for (size_t i = 0; i < llama_max_devices(); i++) { if (tensor_split[i] > 0) { max_nonzero = i; } @@ -707,12 +760,14 @@ struct test { } std::vector values = { build_commit, std::to_string(build_number), - std::to_string(cuda), std::to_string(opencl), std::to_string(metal), std::to_string(gpu_blas), std::to_string(blas), + std::to_string(cuda), std::to_string(opencl), std::to_string(vulkan), std::to_string(vulkan), + std::to_string(metal), std::to_string(sycl), std::to_string(gpu_blas), std::to_string(blas), cpu_info, gpu_info, model_filename, model_type, std::to_string(model_size), std::to_string(model_n_params), std::to_string(n_batch), std::to_string(n_threads), ggml_type_name(type_k), ggml_type_name(type_v), - std::to_string(n_gpu_layers), std::to_string(main_gpu), std::to_string(no_kv_offload), - std::to_string(mul_mat_q), tensor_split_str, + std::to_string(n_gpu_layers), split_mode_str(split_mode), + std::to_string(main_gpu), std::to_string(no_kv_offload), + std::to_string(mul_mat_q), tensor_split_str, std::to_string(use_mmap), std::to_string(n_prompt), std::to_string(n_gen), test_time, std::to_string(avg_ns()), std::to_string(stdev_ns()), std::to_string(avg_ts()), std::to_string(stdev_ts()) @@ -734,9 +789,12 @@ const std::string test::build_commit = LLAMA_COMMIT; const int test::build_number = LLAMA_BUILD_NUMBER; const bool test::cuda = !!ggml_cpu_has_cublas(); const bool test::opencl = !!ggml_cpu_has_clblast(); +const bool test::vulkan = !!ggml_cpu_has_vulkan(); +const bool test::kompute = !!ggml_cpu_has_kompute(); const bool test::metal = !!ggml_cpu_has_metal(); const bool test::gpu_blas = !!ggml_cpu_has_gpublas(); const bool test::blas = !!ggml_cpu_has_blas(); +const bool test::sycl = !!ggml_cpu_has_sycl(); const std::string test::cpu_info = get_cpu_info(); const std::string test::gpu_info = get_gpu_info(); @@ -867,6 +925,9 @@ struct markdown_printer : public printer { if (field == "n_gpu_layers") { return "ngl"; } + if (field == "split_mode") { + return "sm"; + } if (field == "n_threads") { return "threads"; } @@ -876,6 +937,9 @@ struct markdown_printer : public printer { if (field == "no_kv_offload") { return "nkvo"; } + if (field == "use_mmap") { + return "mmap"; + } if (field == "tensor_split") { return "ts"; } @@ -884,40 +948,46 @@ struct markdown_printer : public printer { void print_header(const cmd_params & params) override { // select fields to print - fields.push_back("model"); - fields.push_back("size"); - fields.push_back("params"); - fields.push_back("backend"); + fields.emplace_back("model"); + fields.emplace_back("size"); + fields.emplace_back("params"); + fields.emplace_back("backend"); bool is_cpu_backend = test::get_backend() == "CPU" || test::get_backend() == "BLAS"; if (!is_cpu_backend) { - fields.push_back("n_gpu_layers"); + fields.emplace_back("n_gpu_layers"); } if (params.n_threads.size() > 1 || params.n_threads != cmd_params_defaults.n_threads || is_cpu_backend) { - fields.push_back("n_threads"); + fields.emplace_back("n_threads"); } if (params.n_batch.size() > 1 || params.n_batch != cmd_params_defaults.n_batch) { - fields.push_back("n_batch"); + fields.emplace_back("n_batch"); } if (params.type_k.size() > 1 || params.type_k != cmd_params_defaults.type_k) { - fields.push_back("type_k"); + fields.emplace_back("type_k"); } if (params.type_v.size() > 1 || params.type_v != cmd_params_defaults.type_v) { - fields.push_back("type_v"); + fields.emplace_back("type_v"); } if (params.main_gpu.size() > 1 || params.main_gpu != cmd_params_defaults.main_gpu) { - fields.push_back("main_gpu"); + fields.emplace_back("main_gpu"); + } + if (params.split_mode.size() > 1 || params.split_mode != cmd_params_defaults.split_mode) { + fields.emplace_back("split_mode"); } if (params.mul_mat_q.size() > 1 || params.mul_mat_q != cmd_params_defaults.mul_mat_q) { - fields.push_back("mul_mat_q"); + fields.emplace_back("mul_mat_q"); } if (params.no_kv_offload.size() > 1 || params.no_kv_offload != cmd_params_defaults.no_kv_offload) { - fields.push_back("no_kv_offload"); + fields.emplace_back("no_kv_offload"); } if (params.tensor_split.size() > 1 || params.tensor_split != cmd_params_defaults.tensor_split) { - fields.push_back("tensor_split"); + fields.emplace_back("tensor_split"); } - fields.push_back("test"); - fields.push_back("t/s"); + if (params.use_mmap.size() > 1 || params.use_mmap != cmd_params_defaults.use_mmap) { + fields.emplace_back("use_mmap"); + } + fields.emplace_back("test"); + fields.emplace_back("t/s"); fprintf(fout, "|"); for (const auto & field : fields) { @@ -1081,8 +1151,7 @@ int main(int argc, char ** argv) { if (!params.verbose) { llama_log_set(llama_null_log_callback, NULL); } - bool numa = false; - llama_backend_init(numa); + llama_backend_init(); // initialize printer std::unique_ptr p; diff --git a/examples/llama.android/.gitignore b/examples/llama.android/.gitignore new file mode 100644 index 000000000..347e252ef --- /dev/null +++ b/examples/llama.android/.gitignore @@ -0,0 +1,33 @@ +# Gradle files +.gradle/ +build/ + +# Local configuration file (sdk path, etc) +local.properties + +# Log/OS Files +*.log + +# Android Studio generated files and folders +captures/ +.externalNativeBuild/ +.cxx/ +*.apk +output.json + +# IntelliJ +*.iml +.idea/ +misc.xml +deploymentTargetDropDown.xml +render.experimental.xml + +# Keystore files +*.jks +*.keystore + +# Google Services (e.g. APIs or Firebase) +google-services.json + +# Android Profiling +*.hprof diff --git a/examples/llama.android/README.md b/examples/llama.android/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/examples/llama.android/app/.gitignore b/examples/llama.android/app/.gitignore new file mode 100644 index 000000000..796b96d1c --- /dev/null +++ b/examples/llama.android/app/.gitignore @@ -0,0 +1 @@ +/build diff --git a/examples/llama.android/app/proguard-rules.pro b/examples/llama.android/app/proguard-rules.pro new file mode 100644 index 000000000..f1b424510 --- /dev/null +++ b/examples/llama.android/app/proguard-rules.pro @@ -0,0 +1,21 @@ +# Add project specific ProGuard rules here. +# You can control the set of applied configuration files using the +# proguardFiles setting in build.gradle. +# +# For more details, see +# http://developer.android.com/guide/developing/tools/proguard.html + +# If your project uses WebView with JS, uncomment the following +# and specify the fully qualified class name to the JavaScript interface +# class: +#-keepclassmembers class fqcn.of.javascript.interface.for.webview { +# public *; +#} + +# Uncomment this to preserve the line number information for +# debugging stack traces. +#-keepattributes SourceFile,LineNumberTable + +# If you keep the line number information, uncomment this to +# hide the original source file name. +#-renamesourcefileattribute SourceFile diff --git a/examples/llama.android/app/src/main/AndroidManifest.xml b/examples/llama.android/app/src/main/AndroidManifest.xml new file mode 100644 index 000000000..41a358a29 --- /dev/null +++ b/examples/llama.android/app/src/main/AndroidManifest.xml @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + diff --git a/examples/llama.android/app/src/main/cpp/CMakeLists.txt b/examples/llama.android/app/src/main/cpp/CMakeLists.txt new file mode 100644 index 000000000..85139329a --- /dev/null +++ b/examples/llama.android/app/src/main/cpp/CMakeLists.txt @@ -0,0 +1,50 @@ + +# For more information about using CMake with Android Studio, read the +# documentation: https://d.android.com/studio/projects/add-native-code.html. +# For more examples on how to use CMake, see https://github.com/android/ndk-samples. + +# Sets the minimum CMake version required for this project. +cmake_minimum_required(VERSION 3.22.1) + +# Declares the project name. The project name can be accessed via ${ PROJECT_NAME}, +# Since this is the top level CMakeLists.txt, the project name is also accessible +# with ${CMAKE_PROJECT_NAME} (both CMake variables are in-sync within the top level +# build script scope). +project("llama-android") + +include(FetchContent) +FetchContent_Declare( + llama + GIT_REPOSITORY https://github.com/ggerganov/llama.cpp + GIT_TAG master +) + +# Also provides "common" +FetchContent_MakeAvailable(llama) + +# Creates and names a library, sets it as either STATIC +# or SHARED, and provides the relative paths to its source code. +# You can define multiple libraries, and CMake builds them for you. +# Gradle automatically packages shared libraries with your APK. +# +# In this top level CMakeLists.txt, ${CMAKE_PROJECT_NAME} is used to define +# the target library name; in the sub-module's CMakeLists.txt, ${PROJECT_NAME} +# is preferred for the same purpose. +# +# In order to load a library into your app from Java/Kotlin, you must call +# System.loadLibrary() and pass the name of the library defined here; +# for GameActivity/NativeActivity derived applications, the same library name must be +# used in the AndroidManifest.xml file. +add_library(${CMAKE_PROJECT_NAME} SHARED + # List C/C++ source files with relative paths to this CMakeLists.txt. + llama-android.cpp) + +# Specifies libraries CMake should link to your target library. You +# can link libraries from various origins, such as libraries defined in this +# build script, prebuilt third-party libraries, or Android system libraries. +target_link_libraries(${CMAKE_PROJECT_NAME} + # List libraries link to the target library + llama + common + android + log) diff --git a/examples/llama.android/app/src/main/cpp/llama-android.cpp b/examples/llama.android/app/src/main/cpp/llama-android.cpp new file mode 100644 index 000000000..2beb1e0d5 --- /dev/null +++ b/examples/llama.android/app/src/main/cpp/llama-android.cpp @@ -0,0 +1,394 @@ +#include +#include +#include +#include +#include +#include +#include "llama.h" +#include "common/common.h" + +// Write C++ code here. +// +// Do not forget to dynamically load the C++ library into your application. +// +// For instance, +// +// In MainActivity.java: +// static { +// System.loadLibrary("llama-android"); +// } +// +// Or, in MainActivity.kt: +// companion object { +// init { +// System.loadLibrary("llama-android") +// } +// } + +#define TAG "llama-android.cpp" +#define LOGi(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__) +#define LOGe(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__) + +jclass la_int_var; +jmethodID la_int_var_value; +jmethodID la_int_var_inc; + +static void log_callback(ggml_log_level level, const char * fmt, void * data) { + if (level == GGML_LOG_LEVEL_ERROR) __android_log_print(ANDROID_LOG_ERROR, TAG, fmt, data); + else if (level == GGML_LOG_LEVEL_INFO) __android_log_print(ANDROID_LOG_INFO, TAG, fmt, data); + else if (level == GGML_LOG_LEVEL_WARN) __android_log_print(ANDROID_LOG_WARN, TAG, fmt, data); + else __android_log_print(ANDROID_LOG_DEFAULT, TAG, fmt, data); +} + +extern "C" +JNIEXPORT jlong JNICALL +Java_com_example_llama_Llm_load_1model(JNIEnv *env, jobject, jstring filename) { + llama_model_params model_params = llama_model_default_params(); + + auto path_to_model = env->GetStringUTFChars(filename, 0); + LOGi("Loading model from %s", path_to_model); + + auto model = llama_load_model_from_file(path_to_model, model_params); + env->ReleaseStringUTFChars(filename, path_to_model); + + if (!model) { + LOGe("load_model() failed"); + env->ThrowNew(env->FindClass("java/lang/IllegalStateException"), "load_model() failed"); + return 0; + } + + return reinterpret_cast(model); +} + +extern "C" +JNIEXPORT void JNICALL +Java_com_example_llama_Llm_free_1model(JNIEnv *, jobject, jlong model) { + llama_free_model(reinterpret_cast(model)); +} + +extern "C" +JNIEXPORT jlong JNICALL +Java_com_example_llama_Llm_new_1context(JNIEnv *env, jobject, jlong jmodel) { + auto model = reinterpret_cast(jmodel); + + if (!model) { + LOGe("new_context(): model cannot be null"); + env->ThrowNew(env->FindClass("java/lang/IllegalArgumentException"), "Model cannot be null"); + return 0; + } + + int n_threads = std::max(1, std::min(8, (int) sysconf(_SC_NPROCESSORS_ONLN) - 2)); + LOGi("Using %d threads", n_threads); + + llama_context_params ctx_params = llama_context_default_params(); + ctx_params.seed = 1234; + ctx_params.n_ctx = 2048; + ctx_params.n_threads = n_threads; + ctx_params.n_threads_batch = n_threads; + + llama_context * context = llama_new_context_with_model(model, ctx_params); + + if (!context) { + LOGe("llama_new_context_with_model() returned null)"); + env->ThrowNew(env->FindClass("java/lang/IllegalStateException"), + "llama_new_context_with_model() returned null)"); + return 0; + } + + return reinterpret_cast(context); +} + +extern "C" +JNIEXPORT void JNICALL +Java_com_example_llama_Llm_free_1context(JNIEnv *, jobject, jlong context) { + llama_free(reinterpret_cast(context)); +} + +extern "C" +JNIEXPORT void JNICALL +Java_com_example_llama_Llm_backend_1free(JNIEnv *, jobject) { + llama_backend_free(); +} + +extern "C" +JNIEXPORT void JNICALL +Java_com_example_llama_Llm_log_1to_1android(JNIEnv *, jobject) { + llama_log_set(log_callback, NULL); +} + +extern "C" +JNIEXPORT jstring JNICALL +Java_com_example_llama_Llm_bench_1model( + JNIEnv *env, + jobject, + jlong context_pointer, + jlong model_pointer, + jlong batch_pointer, + jint pp, + jint tg, + jint pl, + jint nr + ) { + auto pp_avg = 0.0; + auto tg_avg = 0.0; + auto pp_std = 0.0; + auto tg_std = 0.0; + + const auto context = reinterpret_cast(context_pointer); + const auto model = reinterpret_cast(model_pointer); + const auto batch = reinterpret_cast(batch_pointer); + + const int n_ctx = llama_n_ctx(context); + + LOGi("n_ctx = %d", n_ctx); + + int i, j; + int nri; + for (nri = 0; nri < nr; nri++) { + LOGi("Benchmark prompt processing (pp)"); + + llama_batch_clear(*batch); + + const int n_tokens = pp; + for (i = 0; i < n_tokens; i++) { + llama_batch_add(*batch, 0, i, { 0 }, false); + } + + batch->logits[batch->n_tokens - 1] = true; + llama_kv_cache_clear(context); + + const auto t_pp_start = ggml_time_us(); + if (llama_decode(context, *batch) != 0) { + LOGi("llama_decode() failed during prompt processing"); + } + const auto t_pp_end = ggml_time_us(); + + // bench text generation + + LOGi("Benchmark text generation (tg)"); + + llama_kv_cache_clear(context); + const auto t_tg_start = ggml_time_us(); + for (i = 0; i < tg; i++) { + + llama_batch_clear(*batch); + for (j = 0; j < pl; j++) { + llama_batch_add(*batch, 0, i, { j }, true); + } + + LOGi("llama_decode() text generation: %d", i); + if (llama_decode(context, *batch) != 0) { + LOGi("llama_decode() failed during text generation"); + } + } + + const auto t_tg_end = ggml_time_us(); + + llama_kv_cache_clear(context); + + const auto t_pp = double(t_pp_end - t_pp_start) / 1000000.0; + const auto t_tg = double(t_tg_end - t_tg_start) / 1000000.0; + + const auto speed_pp = double(pp) / t_pp; + const auto speed_tg = double(pl * tg) / t_tg; + + pp_avg += speed_pp; + tg_avg += speed_tg; + + pp_std += speed_pp * speed_pp; + tg_std += speed_tg * speed_tg; + + LOGi("pp %f t/s, tg %f t/s", speed_pp, speed_tg); + } + + pp_avg /= double(nr); + tg_avg /= double(nr); + + if (nr > 1) { + pp_std = sqrt(pp_std / double(nr - 1) - pp_avg * pp_avg * double(nr) / double(nr - 1)); + tg_std = sqrt(tg_std / double(nr - 1) - tg_avg * tg_avg * double(nr) / double(nr - 1)); + } else { + pp_std = 0; + tg_std = 0; + } + + char model_desc[128]; + llama_model_desc(model, model_desc, sizeof(model_desc)); + + const auto model_size = double(llama_model_size(model)) / 1024.0 / 1024.0 / 1024.0; + const auto model_n_params = double(llama_model_n_params(model)) / 1e9; + + const auto backend = "(Android)"; // TODO: What should this be? + + std::stringstream result; + result << std::setprecision(2); + result << "| model | size | params | backend | test | t/s |\n"; + result << "| --- | --- | --- | --- | --- | --- |\n"; + result << "| " << model_desc << " | " << model_size << "GiB | " << model_n_params << "B | " << backend << " | pp " << pp << " | " << pp_avg << " ± " << pp_std << " |\n"; + result << "| " << model_desc << " | " << model_size << "GiB | " << model_n_params << "B | " << backend << " | tg " << tg << " | " << tg_avg << " ± " << tg_std << " |\n"; + + return env->NewStringUTF(result.str().c_str()); +} + +extern "C" +JNIEXPORT void JNICALL +Java_com_example_llama_Llm_free_1batch(JNIEnv *, jobject, jlong batch_pointer) { + llama_batch_free(*reinterpret_cast(batch_pointer)); +} + +extern "C" +JNIEXPORT jlong JNICALL +Java_com_example_llama_Llm_new_1batch(JNIEnv *, jobject, jint n_tokens, jint embd, jint n_seq_max) { + + // Source: Copy of llama.cpp:llama_batch_init but heap-allocated. + + llama_batch *batch = new llama_batch { + 0, + nullptr, + nullptr, + nullptr, + nullptr, + nullptr, + nullptr, + 0, + 0, + 0, + }; + + if (embd) { + batch->embd = (float *) malloc(sizeof(float) * n_tokens * embd); + } else { + batch->token = (llama_token *) malloc(sizeof(llama_token) * n_tokens); + } + + batch->pos = (llama_pos *) malloc(sizeof(llama_pos) * n_tokens); + batch->n_seq_id = (int32_t *) malloc(sizeof(int32_t) * n_tokens); + batch->seq_id = (llama_seq_id **) malloc(sizeof(llama_seq_id *) * n_tokens); + for (int i = 0; i < n_tokens; ++i) { + batch->seq_id[i] = (llama_seq_id *) malloc(sizeof(llama_seq_id) * n_seq_max); + } + batch->logits = (int8_t *) malloc(sizeof(int8_t) * n_tokens); + + return reinterpret_cast(batch); +} + +extern "C" +JNIEXPORT void JNICALL +Java_com_example_llama_Llm_backend_1init(JNIEnv *, jobject) { + llama_backend_init(); +} + +extern "C" +JNIEXPORT jstring JNICALL +Java_com_example_llama_Llm_system_1info(JNIEnv *env, jobject) { + return env->NewStringUTF(llama_print_system_info()); +} + +extern "C" +JNIEXPORT jint JNICALL +Java_com_example_llama_Llm_completion_1init( + JNIEnv *env, + jobject, + jlong context_pointer, + jlong batch_pointer, + jstring jtext, + jint n_len + ) { + + const auto text = env->GetStringUTFChars(jtext, 0); + const auto context = reinterpret_cast(context_pointer); + const auto batch = reinterpret_cast(batch_pointer); + + const auto tokens_list = llama_tokenize(context, text, 1); + + auto n_ctx = llama_n_ctx(context); + auto n_kv_req = tokens_list.size() + (n_len - tokens_list.size()); + + LOGi("n_len = %d, n_ctx = %d, n_kv_req = %d", n_len, n_ctx, n_kv_req); + + if (n_kv_req > n_ctx) { + LOGe("error: n_kv_req > n_ctx, the required KV cache size is not big enough"); + } + + for (auto id : tokens_list) { + LOGi("%s", llama_token_to_piece(context, id).c_str()); + } + + llama_batch_clear(*batch); + + // evaluate the initial prompt + for (auto i = 0; i < tokens_list.size(); i++) { + llama_batch_add(*batch, tokens_list[i], i, { 0 }, false); + } + + // llama_decode will output logits only for the last token of the prompt + batch->logits[batch->n_tokens - 1] = true; + + if (llama_decode(context, *batch) != 0) { + LOGe("llama_decode() failed"); + } + + env->ReleaseStringUTFChars(jtext, text); + + return batch->n_tokens; +} + +extern "C" +JNIEXPORT jstring JNICALL +Java_com_example_llama_Llm_completion_1loop( + JNIEnv * env, + jobject, + jlong context_pointer, + jlong batch_pointer, + jint n_len, + jobject intvar_ncur +) { + const auto context = reinterpret_cast(context_pointer); + const auto batch = reinterpret_cast(batch_pointer); + const auto model = llama_get_model(context); + + if (!la_int_var) la_int_var = env->GetObjectClass(intvar_ncur); + if (!la_int_var_value) la_int_var_value = env->GetMethodID(la_int_var, "getValue", "()I"); + if (!la_int_var_inc) la_int_var_inc = env->GetMethodID(la_int_var, "inc", "()V"); + + auto n_vocab = llama_n_vocab(model); + auto logits = llama_get_logits_ith(context, batch->n_tokens - 1); + + std::vector candidates; + candidates.reserve(n_vocab); + + for (llama_token token_id = 0; token_id < n_vocab; token_id++) { + candidates.emplace_back(llama_token_data{ token_id, logits[token_id], 0.0f }); + } + + llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false }; + + // sample the most likely token + const auto new_token_id = llama_sample_token_greedy(context, &candidates_p); + + const auto n_cur = env->CallIntMethod(intvar_ncur, la_int_var_value); + if (new_token_id == llama_token_eos(model) || n_cur == n_len) { + return env->NewStringUTF(""); + } + + auto new_token_chars = llama_token_to_piece(context, new_token_id); + LOGi("new_token_chars: `%s`", new_token_chars.c_str()); + auto new_token = env->NewStringUTF(new_token_chars.c_str()); + + llama_batch_clear(*batch); + llama_batch_add(*batch, new_token_id, n_cur, { 0 }, true); + + env->CallVoidMethod(intvar_ncur, la_int_var_inc); + + if (llama_decode(context, *batch) != 0) { + LOGe("llama_decode() returned null"); + } + + return new_token; +} + +extern "C" +JNIEXPORT void JNICALL +Java_com_example_llama_Llm_kv_1cache_1clear(JNIEnv *, jobject, jlong context) { + llama_kv_cache_clear(reinterpret_cast(context)); +} diff --git a/examples/llama.android/app/src/main/java/com/example/llama/Downloadable.kt b/examples/llama.android/app/src/main/java/com/example/llama/Downloadable.kt new file mode 100644 index 000000000..78c231ae5 --- /dev/null +++ b/examples/llama.android/app/src/main/java/com/example/llama/Downloadable.kt @@ -0,0 +1,119 @@ +package com.example.llama + +import android.app.DownloadManager +import android.net.Uri +import android.util.Log +import androidx.compose.material3.Button +import androidx.compose.material3.Text +import androidx.compose.runtime.Composable +import androidx.compose.runtime.getValue +import androidx.compose.runtime.mutableDoubleStateOf +import androidx.compose.runtime.mutableStateOf +import androidx.compose.runtime.remember +import androidx.compose.runtime.rememberCoroutineScope +import androidx.compose.runtime.setValue +import androidx.core.database.getLongOrNull +import androidx.core.net.toUri +import kotlinx.coroutines.delay +import kotlinx.coroutines.launch +import java.io.File + +data class Downloadable(val name: String, val source: Uri, val destination: File) { + companion object { + @JvmStatic + private val tag: String? = this::class.qualifiedName + + sealed interface State + data object Ready: State + data class Downloading(val id: Long): State + data class Downloaded(val downloadable: Downloadable): State + data class Error(val message: String): State + + @JvmStatic + @Composable + fun Button(viewModel: MainViewModel, dm: DownloadManager, item: Downloadable) { + var status: State by remember { + mutableStateOf( + if (item.destination.exists()) Downloaded(item) + else Ready + ) + } + var progress by remember { mutableDoubleStateOf(0.0) } + + val coroutineScope = rememberCoroutineScope() + + suspend fun waitForDownload(result: Downloading, item: Downloadable): State { + while (true) { + val cursor = dm.query(DownloadManager.Query().setFilterById(result.id)) + + if (cursor == null) { + Log.e(tag, "dm.query() returned null") + return Error("dm.query() returned null") + } + + if (!cursor.moveToFirst() || cursor.count < 1) { + cursor.close() + Log.i(tag, "cursor.moveToFirst() returned false or cursor.count < 1, download canceled?") + return Ready + } + + val pix = cursor.getColumnIndex(DownloadManager.COLUMN_BYTES_DOWNLOADED_SO_FAR) + val tix = cursor.getColumnIndex(DownloadManager.COLUMN_TOTAL_SIZE_BYTES) + val sofar = cursor.getLongOrNull(pix) ?: 0 + val total = cursor.getLongOrNull(tix) ?: 1 + cursor.close() + + if (sofar == total) { + return Downloaded(item) + } + + progress = (sofar * 1.0) / total + + delay(1000L) + } + } + + fun onClick() { + when (val s = status) { + is Downloaded -> { + viewModel.load(item.destination.path) + } + + is Downloading -> { + coroutineScope.launch { + status = waitForDownload(s, item) + } + } + + else -> { + item.destination.delete() + + val request = DownloadManager.Request(item.source).apply { + setTitle("Downloading model") + setDescription("Downloading model: ${item.name}") + setAllowedNetworkTypes(DownloadManager.Request.NETWORK_WIFI) + setDestinationUri(item.destination.toUri()) + } + + viewModel.log("Saving ${item.name} to ${item.destination.path}") + Log.i(tag, "Saving ${item.name} to ${item.destination.path}") + + val id = dm.enqueue(request) + status = Downloading(id) + onClick() + } + } + } + + Button(onClick = { onClick() }, enabled = status !is Downloading) { + when (status) { + is Downloading -> Text(text = "Downloading ${(progress * 100).toInt()}%") + is Downloaded -> Text("Load ${item.name}") + is Ready -> Text("Download ${item.name}") + is Error -> Text("Download ${item.name}") + } + } + } + + } +} diff --git a/examples/llama.android/app/src/main/java/com/example/llama/Llm.kt b/examples/llama.android/app/src/main/java/com/example/llama/Llm.kt new file mode 100644 index 000000000..5f3270372 --- /dev/null +++ b/examples/llama.android/app/src/main/java/com/example/llama/Llm.kt @@ -0,0 +1,172 @@ +package com.example.llama + +import android.util.Log +import kotlinx.coroutines.CoroutineDispatcher +import kotlinx.coroutines.asCoroutineDispatcher +import kotlinx.coroutines.flow.Flow +import kotlinx.coroutines.flow.flow +import kotlinx.coroutines.flow.flowOn +import kotlinx.coroutines.withContext +import java.util.concurrent.Executors +import kotlin.concurrent.thread + +class Llm { + private val tag: String? = this::class.simpleName + + private val threadLocalState: ThreadLocal = ThreadLocal.withInitial { State.Idle } + + private val runLoop: CoroutineDispatcher = Executors.newSingleThreadExecutor { + thread(start = false, name = "Llm-RunLoop") { + Log.d(tag, "Dedicated thread for native code: ${Thread.currentThread().name}") + + // No-op if called more than once. + System.loadLibrary("llama-android") + + // Set llama log handler to Android + log_to_android() + backend_init(false) + + Log.d(tag, system_info()) + + it.run() + }.apply { + uncaughtExceptionHandler = Thread.UncaughtExceptionHandler { _, exception: Throwable -> + Log.e(tag, "Unhandled exception", exception) + } + } + }.asCoroutineDispatcher() + + private val nlen: Int = 64 + + private external fun log_to_android() + private external fun load_model(filename: String): Long + private external fun free_model(model: Long) + private external fun new_context(model: Long): Long + private external fun free_context(context: Long) + private external fun backend_init(numa: Boolean) + private external fun backend_free() + private external fun free_batch(batch: Long) + private external fun new_batch(nTokens: Int, embd: Int, nSeqMax: Int): Long + private external fun bench_model( + context: Long, + model: Long, + batch: Long, + pp: Int, + tg: Int, + pl: Int, + nr: Int + ): String + + private external fun system_info(): String + + private external fun completion_init( + context: Long, + batch: Long, + text: String, + nLen: Int + ): Int + + private external fun completion_loop( + context: Long, + batch: Long, + nLen: Int, + ncur: IntVar + ): String + + private external fun kv_cache_clear(context: Long) + + suspend fun bench(pp: Int, tg: Int, pl: Int, nr: Int = 1): String { + return withContext(runLoop) { + when (val state = threadLocalState.get()) { + is State.Loaded -> { + Log.d(tag, "bench(): $state") + bench_model(state.context, state.model, state.batch, pp, tg, pl, nr) + } + + else -> throw IllegalStateException("No model loaded") + } + } + } + + suspend fun load(pathToModel: String) { + withContext(runLoop) { + when (threadLocalState.get()) { + is State.Idle -> { + val model = load_model(pathToModel) + if (model == 0L) throw IllegalStateException("load_model() failed") + + val context = new_context(model) + if (context == 0L) throw IllegalStateException("new_context() failed") + + val batch = new_batch(512, 0, 1) + if (batch == 0L) throw IllegalStateException("new_batch() failed") + + Log.i(tag, "Loaded model $pathToModel") + threadLocalState.set(State.Loaded(model, context, batch)) + } + else -> throw IllegalStateException("Model already loaded") + } + } + } + + fun send(message: String): Flow = flow { + when (val state = threadLocalState.get()) { + is State.Loaded -> { + val ncur = IntVar(completion_init(state.context, state.batch, message, nlen)) + while (ncur.value <= nlen) { + val str = completion_loop(state.context, state.batch, nlen, ncur) + if (str.isEmpty()) { + break + } + emit(str) + } + kv_cache_clear(state.context) + } + else -> {} + } + }.flowOn(runLoop) + + /** + * Unloads the model and frees resources. + * + * This is a no-op if there's no model loaded. + */ + suspend fun unload() { + withContext(runLoop) { + when (val state = threadLocalState.get()) { + is State.Loaded -> { + free_context(state.context) + free_model(state.model) + free_batch(state.batch) + + threadLocalState.set(State.Idle) + } + else -> {} + } + } + } + + companion object { + private class IntVar(value: Int) { + @Volatile + var value: Int = value + private set + + fun inc() { + synchronized(this) { + value += 1 + } + } + } + + private sealed interface State { + data object Idle: State + data class Loaded(val model: Long, val context: Long, val batch: Long): State + } + + // Enforce only one instance of Llm. + private val _instance: Llm = Llm() + + fun instance(): Llm = _instance + } +} diff --git a/examples/llama.android/app/src/main/java/com/example/llama/MainActivity.kt b/examples/llama.android/app/src/main/java/com/example/llama/MainActivity.kt new file mode 100644 index 000000000..9da04f7d3 --- /dev/null +++ b/examples/llama.android/app/src/main/java/com/example/llama/MainActivity.kt @@ -0,0 +1,154 @@ +package com.example.llama + +import android.app.ActivityManager +import android.app.DownloadManager +import android.content.ClipData +import android.content.ClipboardManager +import android.net.Uri +import android.os.Bundle +import android.os.StrictMode +import android.os.StrictMode.VmPolicy +import android.text.format.Formatter +import androidx.activity.ComponentActivity +import androidx.activity.compose.setContent +import androidx.activity.viewModels +import androidx.compose.foundation.layout.Box +import androidx.compose.foundation.layout.Column +import androidx.compose.foundation.layout.Row +import androidx.compose.foundation.layout.fillMaxSize +import androidx.compose.foundation.layout.padding +import androidx.compose.foundation.lazy.LazyColumn +import androidx.compose.foundation.lazy.items +import androidx.compose.foundation.lazy.rememberLazyListState +import androidx.compose.material3.Button +import androidx.compose.material3.LocalContentColor +import androidx.compose.material3.MaterialTheme +import androidx.compose.material3.OutlinedTextField +import androidx.compose.material3.Surface +import androidx.compose.material3.Text +import androidx.compose.runtime.Composable +import androidx.compose.ui.Modifier +import androidx.compose.ui.unit.dp +import androidx.core.content.getSystemService +import com.example.llama.ui.theme.LlamaAndroidTheme +import java.io.File + +class MainActivity( + activityManager: ActivityManager? = null, + downloadManager: DownloadManager? = null, + clipboardManager: ClipboardManager? = null, +): ComponentActivity() { + private val tag: String? = this::class.simpleName + + private val activityManager by lazy { activityManager ?: getSystemService()!! } + private val downloadManager by lazy { downloadManager ?: getSystemService()!! } + private val clipboardManager by lazy { clipboardManager ?: getSystemService()!! } + + private val viewModel: MainViewModel by viewModels() + + // Get a MemoryInfo object for the device's current memory status. + private fun availableMemory(): ActivityManager.MemoryInfo { + return ActivityManager.MemoryInfo().also { memoryInfo -> + activityManager.getMemoryInfo(memoryInfo) + } + } + + override fun onCreate(savedInstanceState: Bundle?) { + super.onCreate(savedInstanceState) + + StrictMode.setVmPolicy( + VmPolicy.Builder(StrictMode.getVmPolicy()) + .detectLeakedClosableObjects() + .build() + ) + + val free = Formatter.formatFileSize(this, availableMemory().availMem) + val total = Formatter.formatFileSize(this, availableMemory().totalMem) + + viewModel.log("Current memory: $free / $total") + viewModel.log("Downloads directory: ${getExternalFilesDir(null)}") + + val extFilesDir = getExternalFilesDir(null) + + val models = listOf( + Downloadable( + "Phi-2 7B (Q4_0, 1.6 GiB)", + Uri.parse("https://huggingface.co/ggml-org/models/resolve/main/phi-2/ggml-model-q4_0.gguf?download=true"), + File(extFilesDir, "phi-2-q4_0.gguf"), + ), + Downloadable( + "TinyLlama 1.1B (f16, 2.2 GiB)", + Uri.parse("https://huggingface.co/ggml-org/models/resolve/main/tinyllama-1.1b/ggml-model-f16.gguf?download=true"), + File(extFilesDir, "tinyllama-1.1-f16.gguf"), + ), + Downloadable( + "Phi 2 DPO (Q3_K_M, 1.48 GiB)", + Uri.parse("https://huggingface.co/TheBloke/phi-2-dpo-GGUF/resolve/main/phi-2-dpo.Q3_K_M.gguf?download=true"), + File(extFilesDir, "phi-2-dpo.Q3_K_M.gguf") + ), + ) + + setContent { + LlamaAndroidTheme { + // A surface container using the 'background' color from the theme + Surface( + modifier = Modifier.fillMaxSize(), + color = MaterialTheme.colorScheme.background + ) { + MainCompose( + viewModel, + clipboardManager, + downloadManager, + models, + ) + } + + } + } + } +} + +@Composable +fun MainCompose( + viewModel: MainViewModel, + clipboard: ClipboardManager, + dm: DownloadManager, + models: List +) { + Column { + val scrollState = rememberLazyListState() + + Box(modifier = Modifier.weight(1f)) { + LazyColumn(state = scrollState) { + items(viewModel.messages) { + Text( + it, + style = MaterialTheme.typography.bodyLarge.copy(color = LocalContentColor.current), + modifier = Modifier.padding(16.dp) + ) + } + } + } + OutlinedTextField( + value = viewModel.message, + onValueChange = { viewModel.updateMessage(it) }, + label = { Text("Message") }, + ) + Row { + Button({ viewModel.send() }) { Text("Send") } + Button({ viewModel.bench(8, 4, 1) }) { Text("Bench") } + Button({ viewModel.clear() }) { Text("Clear") } + Button({ + viewModel.messages.joinToString("\n").let { + clipboard.setPrimaryClip(ClipData.newPlainText("", it)) + } + }) { Text("Copy") } + } + + Column { + for (model in models) { + Downloadable.Button(viewModel, dm, model) + } + } + } +} diff --git a/examples/llama.android/app/src/main/java/com/example/llama/MainViewModel.kt b/examples/llama.android/app/src/main/java/com/example/llama/MainViewModel.kt new file mode 100644 index 000000000..be95e2221 --- /dev/null +++ b/examples/llama.android/app/src/main/java/com/example/llama/MainViewModel.kt @@ -0,0 +1,104 @@ +package com.example.llama + +import android.util.Log +import androidx.compose.runtime.getValue +import androidx.compose.runtime.mutableStateOf +import androidx.compose.runtime.setValue +import androidx.lifecycle.ViewModel +import androidx.lifecycle.viewModelScope +import kotlinx.coroutines.flow.catch +import kotlinx.coroutines.launch + +class MainViewModel(private val llm: Llm = Llm.instance()): ViewModel() { + companion object { + @JvmStatic + private val NanosPerSecond = 1_000_000_000.0 + } + + private val tag: String? = this::class.simpleName + + var messages by mutableStateOf(listOf("Initializing...")) + private set + + var message by mutableStateOf("") + private set + + override fun onCleared() { + super.onCleared() + + viewModelScope.launch { + try { + llm.unload() + } catch (exc: IllegalStateException) { + messages += exc.message!! + } + } + } + + fun send() { + val text = message + message = "" + + // Add to messages console. + messages += text + messages += "" + + viewModelScope.launch { + llm.send(text) + .catch { + Log.e(tag, "send() failed", it) + messages += it.message!! + } + .collect { messages = messages.dropLast(1) + (messages.last() + it) } + } + } + + fun bench(pp: Int, tg: Int, pl: Int, nr: Int = 1) { + viewModelScope.launch { + try { + val start = System.nanoTime() + val warmupResult = llm.bench(pp, tg, pl, nr) + val end = System.nanoTime() + + messages += warmupResult + + val warmup = (end - start).toDouble() / NanosPerSecond + messages += "Warm up time: $warmup seconds, please wait..." + + if (warmup > 5.0) { + messages += "Warm up took too long, aborting benchmark" + return@launch + } + + messages += llm.bench(512, 128, 1, 3) + } catch (exc: IllegalStateException) { + Log.e(tag, "bench() failed", exc) + messages += exc.message!! + } + } + } + + fun load(pathToModel: String) { + viewModelScope.launch { + try { + llm.load(pathToModel) + messages += "Loaded $pathToModel" + } catch (exc: IllegalStateException) { + Log.e(tag, "load() failed", exc) + messages += exc.message!! + } + } + } + + fun updateMessage(newMessage: String) { + message = newMessage + } + + fun clear() { + messages = listOf() + } + + fun log(message: String) { + messages += message + } +} diff --git a/examples/llama.android/app/src/main/java/com/example/llama/ui/theme/Color.kt b/examples/llama.android/app/src/main/java/com/example/llama/ui/theme/Color.kt new file mode 100644 index 000000000..40c30e8d9 --- /dev/null +++ b/examples/llama.android/app/src/main/java/com/example/llama/ui/theme/Color.kt @@ -0,0 +1,11 @@ +package com.example.llama.ui.theme + +import androidx.compose.ui.graphics.Color + +val Purple80 = Color(0xFFD0BCFF) +val PurpleGrey80 = Color(0xFFCCC2DC) +val Pink80 = Color(0xFFEFB8C8) + +val Purple40 = Color(0xFF6650a4) +val PurpleGrey40 = Color(0xFF625b71) +val Pink40 = Color(0xFF7D5260) diff --git a/examples/llama.android/app/src/main/java/com/example/llama/ui/theme/Theme.kt b/examples/llama.android/app/src/main/java/com/example/llama/ui/theme/Theme.kt new file mode 100644 index 000000000..e742220a8 --- /dev/null +++ b/examples/llama.android/app/src/main/java/com/example/llama/ui/theme/Theme.kt @@ -0,0 +1,70 @@ +package com.example.llama.ui.theme + +import android.app.Activity +import android.os.Build +import androidx.compose.foundation.isSystemInDarkTheme +import androidx.compose.material3.MaterialTheme +import androidx.compose.material3.darkColorScheme +import androidx.compose.material3.dynamicDarkColorScheme +import androidx.compose.material3.dynamicLightColorScheme +import androidx.compose.material3.lightColorScheme +import androidx.compose.runtime.Composable +import androidx.compose.runtime.SideEffect +import androidx.compose.ui.graphics.toArgb +import androidx.compose.ui.platform.LocalContext +import androidx.compose.ui.platform.LocalView +import androidx.core.view.WindowCompat + +private val DarkColorScheme = darkColorScheme( + primary = Purple80, + secondary = PurpleGrey80, + tertiary = Pink80 +) + +private val LightColorScheme = lightColorScheme( + primary = Purple40, + secondary = PurpleGrey40, + tertiary = Pink40 + + /* Other default colors to override + background = Color(0xFFFFFBFE), + surface = Color(0xFFFFFBFE), + onPrimary = Color.White, + onSecondary = Color.White, + onTertiary = Color.White, + onBackground = Color(0xFF1C1B1F), + onSurface = Color(0xFF1C1B1F), + */ +) + +@Composable +fun LlamaAndroidTheme( + darkTheme: Boolean = isSystemInDarkTheme(), + // Dynamic color is available on Android 12+ + dynamicColor: Boolean = true, + content: @Composable () -> Unit +) { + val colorScheme = when { + dynamicColor && Build.VERSION.SDK_INT >= Build.VERSION_CODES.S -> { + val context = LocalContext.current + if (darkTheme) dynamicDarkColorScheme(context) else dynamicLightColorScheme(context) + } + + darkTheme -> DarkColorScheme + else -> LightColorScheme + } + val view = LocalView.current + if (!view.isInEditMode) { + SideEffect { + val window = (view.context as Activity).window + window.statusBarColor = colorScheme.primary.toArgb() + WindowCompat.getInsetsController(window, view).isAppearanceLightStatusBars = darkTheme + } + } + + MaterialTheme( + colorScheme = colorScheme, + typography = Typography, + content = content + ) +} diff --git a/examples/llama.android/app/src/main/java/com/example/llama/ui/theme/Type.kt b/examples/llama.android/app/src/main/java/com/example/llama/ui/theme/Type.kt new file mode 100644 index 000000000..0b87946ca --- /dev/null +++ b/examples/llama.android/app/src/main/java/com/example/llama/ui/theme/Type.kt @@ -0,0 +1,34 @@ +package com.example.llama.ui.theme + +import androidx.compose.material3.Typography +import androidx.compose.ui.text.TextStyle +import androidx.compose.ui.text.font.FontFamily +import androidx.compose.ui.text.font.FontWeight +import androidx.compose.ui.unit.sp + +// Set of Material typography styles to start with +val Typography = Typography( + bodyLarge = TextStyle( + fontFamily = FontFamily.Default, + fontWeight = FontWeight.Normal, + fontSize = 16.sp, + lineHeight = 24.sp, + letterSpacing = 0.5.sp + ) + /* Other default text styles to override + titleLarge = TextStyle( + fontFamily = FontFamily.Default, + fontWeight = FontWeight.Normal, + fontSize = 22.sp, + lineHeight = 28.sp, + letterSpacing = 0.sp + ), + labelSmall = TextStyle( + fontFamily = FontFamily.Default, + fontWeight = FontWeight.Medium, + fontSize = 11.sp, + lineHeight = 16.sp, + letterSpacing = 0.5.sp + ) + */ +) diff --git a/examples/llama.android/app/src/main/res/drawable/ic_launcher_background.xml b/examples/llama.android/app/src/main/res/drawable/ic_launcher_background.xml new file mode 100644 index 000000000..07d5da9cb --- /dev/null +++ b/examples/llama.android/app/src/main/res/drawable/ic_launcher_background.xml @@ -0,0 +1,170 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/examples/llama.android/app/src/main/res/drawable/ic_launcher_foreground.xml b/examples/llama.android/app/src/main/res/drawable/ic_launcher_foreground.xml new file mode 100644 index 000000000..7706ab9e6 --- /dev/null +++ b/examples/llama.android/app/src/main/res/drawable/ic_launcher_foreground.xml @@ -0,0 +1,30 @@ + + + + + + + + + + + diff --git a/examples/llama.android/app/src/main/res/mipmap-anydpi/ic_launcher.xml b/examples/llama.android/app/src/main/res/mipmap-anydpi/ic_launcher.xml new file mode 100644 index 000000000..b3e26b4c6 --- /dev/null +++ b/examples/llama.android/app/src/main/res/mipmap-anydpi/ic_launcher.xml @@ -0,0 +1,6 @@ + + + + + + diff --git a/examples/llama.android/app/src/main/res/mipmap-anydpi/ic_launcher_round.xml b/examples/llama.android/app/src/main/res/mipmap-anydpi/ic_launcher_round.xml new file mode 100644 index 000000000..b3e26b4c6 --- /dev/null +++ b/examples/llama.android/app/src/main/res/mipmap-anydpi/ic_launcher_round.xml @@ -0,0 +1,6 @@ + + + + + + diff --git a/examples/llama.android/app/src/main/res/mipmap-hdpi/ic_launcher.webp b/examples/llama.android/app/src/main/res/mipmap-hdpi/ic_launcher.webp new file mode 100644 index 000000000..c209e78ec Binary files /dev/null and b/examples/llama.android/app/src/main/res/mipmap-hdpi/ic_launcher.webp differ diff --git a/examples/llama.android/app/src/main/res/mipmap-hdpi/ic_launcher_round.webp b/examples/llama.android/app/src/main/res/mipmap-hdpi/ic_launcher_round.webp new file mode 100644 index 000000000..b2dfe3d1b Binary files /dev/null and b/examples/llama.android/app/src/main/res/mipmap-hdpi/ic_launcher_round.webp differ diff --git a/examples/llama.android/app/src/main/res/mipmap-mdpi/ic_launcher.webp b/examples/llama.android/app/src/main/res/mipmap-mdpi/ic_launcher.webp new file mode 100644 index 000000000..4f0f1d64e Binary files /dev/null and b/examples/llama.android/app/src/main/res/mipmap-mdpi/ic_launcher.webp differ diff --git a/examples/llama.android/app/src/main/res/mipmap-mdpi/ic_launcher_round.webp b/examples/llama.android/app/src/main/res/mipmap-mdpi/ic_launcher_round.webp new file mode 100644 index 000000000..62b611da0 Binary files /dev/null and b/examples/llama.android/app/src/main/res/mipmap-mdpi/ic_launcher_round.webp differ diff --git a/examples/llama.android/app/src/main/res/mipmap-xhdpi/ic_launcher.webp b/examples/llama.android/app/src/main/res/mipmap-xhdpi/ic_launcher.webp new file mode 100644 index 000000000..948a3070f Binary files /dev/null and b/examples/llama.android/app/src/main/res/mipmap-xhdpi/ic_launcher.webp differ diff --git a/examples/llama.android/app/src/main/res/mipmap-xhdpi/ic_launcher_round.webp b/examples/llama.android/app/src/main/res/mipmap-xhdpi/ic_launcher_round.webp new file mode 100644 index 000000000..1b9a6956b Binary files /dev/null and b/examples/llama.android/app/src/main/res/mipmap-xhdpi/ic_launcher_round.webp differ diff --git a/examples/llama.android/app/src/main/res/mipmap-xxhdpi/ic_launcher.webp b/examples/llama.android/app/src/main/res/mipmap-xxhdpi/ic_launcher.webp new file mode 100644 index 000000000..28d4b77f9 Binary files /dev/null and b/examples/llama.android/app/src/main/res/mipmap-xxhdpi/ic_launcher.webp differ diff --git a/examples/llama.android/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.webp b/examples/llama.android/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.webp new file mode 100644 index 000000000..9287f5083 Binary files /dev/null and b/examples/llama.android/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.webp differ diff --git a/examples/llama.android/app/src/main/res/mipmap-xxxhdpi/ic_launcher.webp b/examples/llama.android/app/src/main/res/mipmap-xxxhdpi/ic_launcher.webp new file mode 100644 index 000000000..aa7d6427e Binary files /dev/null and b/examples/llama.android/app/src/main/res/mipmap-xxxhdpi/ic_launcher.webp differ diff --git a/examples/llama.android/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.webp b/examples/llama.android/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.webp new file mode 100644 index 000000000..9126ae37c Binary files /dev/null and b/examples/llama.android/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.webp differ diff --git a/examples/llama.android/app/src/main/res/values/colors.xml b/examples/llama.android/app/src/main/res/values/colors.xml new file mode 100644 index 000000000..ca1931bca --- /dev/null +++ b/examples/llama.android/app/src/main/res/values/colors.xml @@ -0,0 +1,10 @@ + + + #FFBB86FC + #FF6200EE + #FF3700B3 + #FF03DAC5 + #FF018786 + #FF000000 + #FFFFFFFF + diff --git a/examples/llama.android/app/src/main/res/values/strings.xml b/examples/llama.android/app/src/main/res/values/strings.xml new file mode 100644 index 000000000..7a9d314e2 --- /dev/null +++ b/examples/llama.android/app/src/main/res/values/strings.xml @@ -0,0 +1,3 @@ + + LlamaAndroid + diff --git a/examples/llama.android/app/src/main/res/values/themes.xml b/examples/llama.android/app/src/main/res/values/themes.xml new file mode 100644 index 000000000..8a24fda56 --- /dev/null +++ b/examples/llama.android/app/src/main/res/values/themes.xml @@ -0,0 +1,5 @@ + + + +