Merge branch 'master' of https://github.com/ggerganov/llama.cpp into jon/tall-and-skinny-matmul

This commit is contained in:
jon-chuang 2023-04-30 20:57:32 +08:00
commit e112522aa9
4 changed files with 14 additions and 24 deletions

2
.gitignore vendored
View file

@ -28,7 +28,7 @@ models/*
/result /result
/perplexity /perplexity
/embedding /embedding
/benchmark-q4_0-matmult /benchmark-matmult
/vdot /vdot
/Pipfile /Pipfile

View file

@ -180,7 +180,7 @@ common.o: examples/common.cpp examples/common.h
$(CXX) $(CXXFLAGS) -c $< -o $@ $(CXX) $(CXXFLAGS) -c $< -o $@
clean: clean:
rm -vf *.o main quantize quantize-stats perplexity embedding benchmark-q4_0-matmult rm -vf *.o main quantize quantize-stats perplexity embedding benchmark-matmult
main: examples/main/main.cpp ggml.o llama.o common.o $(OBJS) main: examples/main/main.cpp ggml.o llama.o common.o $(OBJS)
$(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS) $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
@ -210,9 +210,9 @@ libllama.so: llama.o ggml.o $(OBJS)
# Tests # Tests
# #
benchmark: examples/benchmark/benchmark-q4_0-matmult.c ggml.o llama.o common.o $(OBJS) benchmark-matmult: examples/benchmark/benchmark-matmult.cpp ggml.o llama.o common.o $(OBJS)
$(CXX) $(CXXFLAGS) $^ -o benchmark-q4_0-matmult $(LDFLAGS) $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
./benchmark-q4_0-matmult ./$@
.PHONY: tests .PHONY: tests
tests: tests:

View file

@ -1,4 +1,4 @@
set(TARGET benchmark) set(TARGET benchmark)
add_executable(${TARGET} benchmark-q4_0-matmult.c) add_executable(${TARGET} benchmark-matmult.cpp)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
target_compile_features(${TARGET} PRIVATE cxx_std_11) target_compile_features(${TARGET} PRIVATE cxx_std_11)

View file

@ -1,11 +1,3 @@
/*
License: MIT License
Changelog:
- 2023-03-31 Initial version by Sebastian Apel (https://github.com/SebastianApel)
*/
#include <locale.h> #include <locale.h>
#include "ggml.h" #include "ggml.h"
#include "llama.h" #include "llama.h"
@ -99,12 +91,9 @@ int main(int argc, char ** argv) {
} }
} }
// create the ggml context // create the ggml context
printf("Starting Test\n"); printf("Starting Test\n");
struct ggml_context * ctx; struct ggml_context * ctx;
//const int sizex = 4096; //const int sizex = 4096;
//const int sizey = 11008; //const int sizey = 11008;
@ -126,16 +115,18 @@ int main(int argc, char ** argv) {
#endif #endif
//printf("Memsize required = %i\n", sizex*sizex); //printf("Memsize required = %i\n", sizex*sizex);
ggml_type wtype = GGML_TYPE_F32;
size_t ctx_size = 0; size_t ctx_size = 0;
ctx_size += sizex*sizey*ggml_type_sizef(wtype);
ctx_size += sizex*sizey*ggml_type_sizef(wtype);
ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32); ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32);
ctx_size += sizex*sizeof(float); ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32);
ctx_size += 1024*1024*100; ctx_size += sizex*sizez*ggml_type_sizef(GGML_TYPE_F32);
ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_Q4_0);
ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_Q4_0);
ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32); // BLAS
ctx_size += sizex*sizey*ggml_type_sizef(GGML_TYPE_F32); // BLAS
ctx_size += 1024*1024*16;
printf("Allocating Memory of size %li byes, %li MB\n",ctx_size, (ctx_size/1024/1024)); printf("Allocating Memory of size %li bytes, %li MB\n",ctx_size, (ctx_size/1024/1024));
struct ggml_init_params params = { struct ggml_init_params params = {
/*.mem_size =*/ ctx_size, /*.mem_size =*/ ctx_size,
@ -263,7 +254,6 @@ int main(int argc, char ** argv) {
ggml_graph_compute(ctx, &gf31); ggml_graph_compute(ctx, &gf31);
long long int stop = ggml_time_us(); long long int stop = ggml_time_us();
long long int usec = stop-start; long long int usec = stop-start;
float sec = usec/1000000;
float flops_per_usec = (1.0f*flops_per_matrix)/usec; float flops_per_usec = (1.0f*flops_per_matrix)/usec;
printf("%9i;%8i;%6i;%6i;%6i;%15lli;%18lli;%19.2f\n", printf("%9i;%8i;%6i;%6i;%6i;%15lli;%18lli;%19.2f\n",
i, i,