This commit is contained in:
jon-chuang 2023-04-15 21:54:37 +08:00
parent 69511b2c4a
commit 00e86b97cc
5 changed files with 48 additions and 8 deletions

View file

@ -176,7 +176,7 @@ libllama.so: llama.o ggml.o
# Tests
#
benchmark: examples/benchmark/benchmark-q4_0-matmult.c ggml.o
benchmark: examples/benchmark/benchmark-q4_0-matmult.c ggml.o llama.o common.o
$(CXX) $(CXXFLAGS) $^ -o benchmark-q4_0-matmult $(LDFLAGS)
./benchmark-q4_0-matmult

View file

@ -34,4 +34,5 @@ else()
add_subdirectory(quantize-stats)
add_subdirectory(perplexity)
add_subdirectory(embedding)
add_subdirectory(benchmark)
endif()

View file

@ -0,0 +1,4 @@
set(TARGET benchmark)
add_executable(${TARGET} benchmark-q4_0-matmul.cpp)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
target_compile_features(${TARGET} PRIVATE cxx_std_11)

View file

@ -8,6 +8,7 @@
#include <locale.h>
#include "ggml.h"
#include "llama.h"
#include <assert.h>
#include <math.h>
#include <cstring>
@ -45,7 +46,7 @@ float tensor_sum_elements(struct ggml_tensor * tensor) {
#define TENSOR_TYPE_AS_STR(TYPE) TYPE == GGML_TYPE_F32 ? "FP32" : TYPE == GGML_TYPE_F16 ? "FP16" : TYPE == GGML_TYPE_Q4_0 ? "Q4_0" : TYPE == GGML_TYPE_Q4_1 ? "Q4_1" : "UNKNOWN"
#define TENSOR_DUMP(TENSOR) printf("%15s: type = %i (%5s) ne = %5d x %5d x %5d, nb = (%5li, %5li, %5li) - ", #TENSOR, \
#define TENSOR_DUMP(TENSOR) printf("%15s: type = %i (%5s) ne = %5ld x %5ld x %5ld, nb = (%5li, %5li, %5li) - ", #TENSOR, \
TENSOR->type,TENSOR_TYPE_AS_STR(TENSOR->type),\
TENSOR->ne[0], TENSOR->ne[1], TENSOR->ne[2], TENSOR->nb[0], TENSOR->nb[1], TENSOR->nb[2]); \
{ float sum = tensor_sum_elements(TENSOR); printf("Sum of tensor %s is %6.2f\n",#TENSOR, sum); }
@ -170,12 +171,40 @@ int main(int argc, char ** argv) {
struct ggml_cgraph gf = ggml_build_forward(m11xm2);
gf.n_threads=benchmark_params.n_threads;
printf("cgraph->n_threads=%i\n",gf.n_threads);
fprintf(stderr, "system_info: n_threads = %d | %s\n",
benchmark_params.n_threads, llama_print_system_info());
TENSOR_DUMP(m11);
TENSOR_DUMP(m2);
ggml_graph_compute(ctx, &gf);
{
const int dimx = sizex;
const int dimy = sizey;
const int dimz = sizez;
long long int flops_per_dot_product = dimy + dimy;
long long int flops_per_matrix = flops_per_dot_product * dimx * dimz; ;
printf("Matrix Multiplication of (%i,%i,%i) x (%i,%i,%i) - about %6.2f gFLOPS\n\n", sizex, sizey, 1, sizex, sizez, 1, 1.0f*flops_per_matrix / 1000 / 1000 / 1000);
printf("Iteration;NThreads; SizeX; SizeY; SizeZ; Required_FLOPS; Elapsed_u_Seconds; FLOPS_per_u_Second\n");
printf("==============================================================================================\n");
for (int i=0;i<benchmark_params.n_iterations ;i++) {
long long int start = ggml_time_us();
//printf("Running ggml_graph_compute\n");
ggml_graph_compute(ctx, &gf);
long long int stop = ggml_time_us();
long long int usec = stop-start;
float sec = usec/1000000;
float flops_per_usec = (1.0f*flops_per_matrix)/usec;
printf("%9i;%8i;%6i;%6i;%6i;%15lli;%18lli;%19.2f\n",
i,
gf.n_threads,
sizex, sizey, sizez, flops_per_matrix,
usec,flops_per_usec);
}
}
TENSOR_DUMP(gf.nodes[0]);
@ -217,7 +246,7 @@ int main(int argc, char ** argv) {
const int dimz = sizez;
long long int flops_per_dot_product = dimy + dimy;
long long int flops_per_matrix = flops_per_dot_product * dimx * dimz; ;
printf("Matrix Multiplication of (%i,%i,%i) x (%i,%i,%i) - aboout %6.2f gFLOPS\n\n", sizex, sizey, 1, sizex, sizez, 1, 1.0f*flops_per_matrix / 1000 / 1000 / 1000);
printf("Matrix Multiplication of (%i,%i,%i) x (%i,%i,%i) - about %6.2f gFLOPS\n\n", sizex, sizey, 1, sizex, sizez, 1, 1.0f*flops_per_matrix / 1000 / 1000 / 1000);
// Let's use the F32 result from above as a reference for the q4_0 multiplication

14
ggml.c
View file

@ -437,13 +437,13 @@ static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
// AVX routine provided by GH user jon-chuang
// ref: https://github.com/ggerganov/llama.cpp/issues/956#issuecomment-1508090551
#if __AVX2__ || __AVX512F__
#if false && __AVX2__ || __AVX512F__
// Given A = K X M, B = K X N, compute one row of C = A^TB
void ggml_mul_row_f32_tall_skinny(const float * A, const float * B, float * C, int M, int N, int K) {
alignas(32) float res_vec[8];
for (int j = 0; j < N; j += 8) { // Process 8 elements of C's row at a time - 256 / size_of(float)
__m256 c_vec = _mm256_setzero_ps(); // Initialize the result vector to all zeros
for (int k = 0; k < K; ++k) {
__m256 a = _mm256_broadcast_ss(&A[k * M]); // Broadcast the k-th element of the row of A^T
__m256 b_vec = _mm256_load_ps(&B[j + k * N]); // Load the j/8-th segment of the k-th row of B^T (corresponding to the k-th column of B)
@ -451,7 +451,11 @@ void ggml_mul_row_f32_tall_skinny(const float * A, const float * B, float * C, i
}
// Store the result in the corresponding row of C
_mm256_store_ps(&C[j], c_vec);
_mm256_store_ps(&res_vec, c_vec);
for (int k = 0; k < 8; ++k) {
C[j+k] = res_vec[k];
}
}
// Handle the remainder
@ -6702,7 +6706,9 @@ static void ggml_compute_forward_mul_mat_f32(
assert(ne3 == ne03);
#if defined(__AVX2__) || defined(__AVX__)
if (ne00 <= 32) {
if (ggml_cpu_has_avx2() && ne00 <= 48 || ne00 <= 32) {
// Handle tall and skinny matrices
// TODO(jon-chuang): Also check that we only handle 2D matrices?
assert(ne00 == ne10);
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
return;