diff --git a/Makefile b/Makefile index bdaf70ae6..97eb19aba 100644 --- a/Makefile +++ b/Makefile @@ -139,7 +139,7 @@ $(info I CC: $(CCV)) $(info I CXX: $(CXXV)) $(info ) -default: llamalib quantize llamalib_blas +default: llamalib llamalib_blas # # Build library @@ -187,6 +187,9 @@ llamalib_blas: ggml_blas.o ggml_v1.o expose.o common.o llama_adapter.o gpttype_a quantize: examples/quantize/quantize.cpp ggml.o llama.o $(CXX) $(CXXFLAGS) examples/quantize/quantize.cpp ggml.o llama.o -o quantize $(LDFLAGS) +quantize_gptj: ggml.o llama.o + $(CXX) $(CXXFLAGS) otherarch/gptj_quantize.cpp ggml.o llama.o -o quantize_gptj $(LDFLAGS) + perplexity: examples/perplexity/perplexity.cpp ggml.o llama.o common.o $(CXX) $(CXXFLAGS) examples/perplexity/perplexity.cpp ggml.o llama.o common.o -o perplexity $(LDFLAGS) diff --git a/otherarch/gptj_quantize.cpp b/otherarch/gptj_quantize.cpp index cbfbe5420..7782f2846 100644 --- a/otherarch/gptj_quantize.cpp +++ b/otherarch/gptj_quantize.cpp @@ -1,6 +1,6 @@ #include "ggml.h" -#include "utils.h" +#include "otherarch/utils.h" #include #include @@ -283,6 +283,7 @@ bool gptj_model_quantize(const std::string & fname_inp, const std::string & fnam // ./gpt-2-quantize models/gpt-2-117M/ggml-model.bin models/gpt-2-117M/ggml-model-quant.bin type // int main(int argc, char ** argv) { + ggml_time_init(); if (argc != 4) { fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type\n", argv[0]); fprintf(stderr, " type = 2 - q4_0\n");