diff --git a/Makefile b/Makefile index 5ec996c87..74c60bce1 100644 --- a/Makefile +++ b/Makefile @@ -559,19 +559,19 @@ ifdef GGML_OPENBLAS64 MK_CPPFLAGS += -DGGML_USE_BLAS $(shell pkg-config --cflags-only-I openblas64) MK_CFLAGS += $(shell pkg-config --cflags-only-other openblas64) MK_LDFLAGS += $(shell pkg-config --libs openblas64) - OBJ_GGML += src/ggml-blas/ggml-blas.o + OBJ_GGML += ggml/src/ggml-blas/ggml-blas.o endif # GGML_OPENBLAS64 ifdef GGML_BLIS MK_CPPFLAGS += -DGGML_USE_BLAS -DGGML_BLAS_USE_BLIS -I/usr/local/include/blis -I/usr/include/blis MK_LDFLAGS += -lblis -L/usr/local/lib - OBJ_GGML += src/ggml-blas/ggml-blas.o + OBJ_GGML += ggml/src/ggml-blas/ggml-blas.o endif # GGML_BLIS ifdef GGML_NVPL MK_CPPFLAGS += -DGGML_USE_BLAS -DGGML_BLAS_USE_NVPL -DNVPL_ILP64 -I/usr/local/include/nvpl_blas -I/usr/include/nvpl_blas MK_LDFLAGS += -L/usr/local/lib -lnvpl_blas_core -lnvpl_blas_ilp64_gomp - OBJ_GGML += src/ggml-blas/ggml-blas.o + OBJ_GGML += ggml/src/ggml-blas/ggml-blas.o endif # GGML_NVPL ifndef GGML_NO_LLAMAFILE @@ -1267,13 +1267,22 @@ clean: rm -rvf ggml/*.dll rm -rvf ggml/*.so rm -rvf ggml/src/*.o + rm -rvf common/build-info.cpp rm -rvf ggml/src/ggml-cpu/*.o rm -rvf ggml/src/ggml-cpu/llamafile/*.o - rm -rvf common/build-info.cpp - rm -vrf ggml/src/ggml-metal/ggml-metal-embed.metal + rm -vrf ggml/src/ggml-amx/*.o + rm -vrf ggml/src/ggml-blas/*.o + rm -vrf ggml/src/ggml-cann/*.o + rm -vrf ggml/src/ggml-cpu/*.o rm -vrf ggml/src/ggml-cuda/*.o rm -vrf ggml/src/ggml-cuda/template-instances/*.o - rm -vrf ggml/src/ggml-amx/*.o + rm -vrf ggml/src/ggml-hip/*.o + rm -vrf ggml/src/ggml-kompute/*.o + rm -vrf ggml/src/ggml-metal/*.o + rm -vrf ggml/src/ggml-metal/ggml-metal-embed.metal + rm -vrf ggml/src/ggml-rpc/*.o + rm -vrf ggml/src/ggml-sycl/*.o + rm -vrf ggml/src/ggml-vulkan/*.o rm -rvf $(BUILD_TARGETS) rm -rvf $(TEST_TARGETS) rm -f vulkan-shaders-gen ggml/src/ggml-vulkan-shaders.hpp ggml/src/ggml-vulkan-shaders.cpp diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp index d227e3036..8f4e0e206 100644 --- a/examples/llama-bench/llama-bench.cpp +++ b/examples/llama-bench/llama-bench.cpp @@ -872,7 +872,7 @@ struct test { static const std::vector & get_fields() { static const std::vector fields = { "build_commit", "build_number", - "cpu_info", "gpu_info", + "cpu_info", "gpu_info", "backends", "model_filename", "model_type", "model_size", "model_n_params", "n_batch", "n_ubatch", "n_threads", "cpu_mask", "cpu_strict", "poll", @@ -927,7 +927,7 @@ struct test { } std::vector values = { build_commit, std::to_string(build_number), - cpu_info, gpu_info, + cpu_info, gpu_info, get_backend(), model_filename, model_type, std::to_string(model_size), std::to_string(model_n_params), std::to_string(n_batch), std::to_string(n_ubatch), std::to_string(n_threads), cpu_mask, std::to_string(cpu_strict), std::to_string(poll), @@ -1158,7 +1158,8 @@ struct markdown_printer : public printer { fields.emplace_back("size"); fields.emplace_back("params"); fields.emplace_back("backend"); - bool is_cpu_backend = test::get_backend() == "CPU" || test::get_backend() == "BLAS"; + bool is_cpu_backend = test::get_backend().find("CPU") != std::string::npos || + test::get_backend().find("BLAS") != std::string::npos; if (!is_cpu_backend) { fields.emplace_back("n_gpu_layers"); } diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt index c62bda086..ec78169f2 100644 --- a/ggml/CMakeLists.txt +++ b/ggml/CMakeLists.txt @@ -244,6 +244,7 @@ endif() # FIXME: this should be done in the backend cmake files if (GGML_METAL) + # FIXME: does this need to be installed with GGML_METAL_EMBED_LIBRARY? install( FILES src/ggml-metal/ggml-metal.metal PERMISSIONS