This commit is contained in:
Julia Longtin 2024-05-11 18:50:44 +08:00 committed by GitHub
commit 96b89063e5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
10 changed files with 565 additions and 3 deletions

View file

@ -121,6 +121,8 @@ CC := riscv64-unknown-linux-gnu-gcc
CXX := riscv64-unknown-linux-gnu-g++
endif
K1OM := $(shell echo | $(CC) -dM -E - | grep __k1om__)
#
# Compile flags
#
@ -307,6 +309,10 @@ endif
ifndef RISCV
ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686 amd64))
# detect the PHI cross compiler.
ifeq "${K1OM}" ""
# Use all CPU extensions that are available:
MK_CFLAGS += -march=native -mtune=native
HOST_CXXFLAGS += -march=native -mtune=native
@ -318,6 +324,11 @@ ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686 amd64))
# Usage SSSE3-only (Not is SSE3!)
#MK_CFLAGS += -mssse3
#MK_CXXFLAGS += -mssse3
else
OBJS += ggml-phi-knc.o ggml-phi-knc-dot_q5_K_q8_K.o
MK_CFLAGS += -march=knc -mtune=knc
endif
endif
ifneq '' '$(findstring mingw,$(shell $(CC) -dumpmachine))'
@ -769,6 +780,9 @@ clean:
# Helper function that replaces .c, .cpp, and .cu file endings with .o:
GET_OBJ_FILE = $(patsubst %.c,%.o,$(patsubst %.cpp,%.o,$(patsubst %.cu,%.o,$(1))))
# Helper function that replaces .c, .cpp, and .cu file endings with .s:
GET_ASM_FILE = $(patsubst %.c,%.s,$(patsubst %.cpp,%.s,$(patsubst %.cu,%.s,$(1))))
main: examples/main/main.cpp ggml.o llama.o $(COMMON_DEPS) console.o grammar-parser.o $(OBJS)
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
@ -776,6 +790,19 @@ main: examples/main/main.cpp ggml.o llama.o $(C
@echo '==== Run ./main -h for help. ===='
@echo
bench-phi-knc.s: bench-phi-knc.c
$(CC) $(CFLAGS) -S $< -o $(call GET_ASM_FILE, $<)
ggml-phi-knc.s: ggml-phi-knc.c
$(CC) $(CFLAGS) -S $< -o $(call GET_ASM_FILE, $<)
bench-phi-knc: bench-phi-knc.c ggml-phi-knc.o
$(CC) $(CFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
$(CC) $(CFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
ggml-phi-knc-dot_q5_K_q8_K.s: ggml-phi-knc-dot_q5_K_q8_K.c
$(CC) $(CFLAGS) -S $< -o $(call GET_ASM_FILE, $<)
infill: examples/infill/infill.cpp ggml.o llama.o $(COMMON_DEPS) console.o grammar-parser.o $(OBJS)
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)

64
bench-phi-knc.c Normal file
View file

@ -0,0 +1,64 @@
#include <immintrin.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <unistd.h> /*for CLOCK_REALTIME? */
#include <time.h>
#include "ggml-phi-knc.h"
#define MAXVEC 1024768
#define RUNTOTAL 12
#define RUNS
int main(void)
{
struct timespec start, middle, end;
double vector_time;
double scalar_time;
float scalar = 0.0f;
float vector = 0.0f;
int vecRuns[RUNTOTAL] = {10, 16, 17, 32, 33, 48, 49, 64, 65, 80, 81, 1024768};
for (uint32_t runCount = 0; runCount < RUNTOTAL; ++runCount)
{
// Generate random input vector of [-1, 1] values.
float vec1[MAXVEC] __attribute__((aligned(64)));
for (int i = 0; i < vecRuns[runCount]; i++)
vec1[i] = 2 * (0.5 - rand() / (float)RAND_MAX);
// Generate a second random input vector of [-1, 1] values.
float vec2[MAXVEC] __attribute__((aligned(64)));
for (int i = 0; i < vecRuns[runCount]; i++)
vec2[i] = 2 * (0.5 - rand() / (float)RAND_MAX);
// on your mark..
clock_gettime(CLOCK_MONOTONIC, &start);
// call dot product
ggml_vec_dot_f32(vecRuns[runCount], &vector, 0, vec1, 0, vec2, 0, 0);
// save the middle point..
clock_gettime(CLOCK_MONOTONIC, &middle);
// do the same work by hand;
for (int i = 0; i < vecRuns[runCount]; ++i)
scalar += vec1[i]*vec2[i];
clock_gettime(CLOCK_MONOTONIC, &end);
printf("vector\tvs\tscalar (%d items)\n", vecRuns[runCount]);
printf("%.9f\tvs\t%.9f\n", vector, scalar);
vector_time = middle.tv_sec - start.tv_sec;
vector_time += (middle.tv_nsec - start.tv_nsec) / 1000000000.0;
scalar_time = end.tv_sec - middle.tv_sec;
scalar_time += (end.tv_nsec - middle.tv_nsec) / 1000000000.0;
printf("%.9f\tvs\t%.9f\n", vector_time, scalar_time);
}
fflush(stdout);
return 0;
}

View file

@ -0,0 +1,222 @@
/* Xeon PHI IMCI support. */
/* formatted by using emacs, with (M-x set-variable RET c-basic-offset RET 4 RET) executed. */
// For uint32_t
#include <stdint.h>
// For size_t
#include <stdio.h>
// Yes, we have to tell this header to actually export stuff.
#define GGML_COMMON_IMPL_C
#include "ggml-quants.h"
#include "ggml-impl.h"
// For block_q5_K and block_q8_K.
#include "ggml-common.h"
// This SIMD unit can work with 32 float32s at once.
#define GGML_F32_STEP 32
// We can fit 16 of these float32s in a single vector register.
#define GGML_F32_EPR 16
/* we force an alignment, because i haven't written unaligned forms of the assembly functions, yet.. */
typedef float float32x16_t __attribute__((vector_size (64), aligned(64)));
typedef int8_t int8x16_t __attribute__((vector_size (16), aligned(16)));
typedef uint8_t uint8x16_t __attribute__((vector_size (16), aligned(16)));
typedef int32_t int32x16_t __attribute__((vector_size (64), aligned(64)));
/* A forward declaration, to keep GCC happy. */
void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc);
/* clear a vector of 16 floats. */
inline static void GGML_F32x16_VEC_ZERO(float32x16_t *target)
{
uint8_t zero=0;
__asm__ __volatile__ (
"vbroadcastss\t%[Z]%{uint8%},\t%%zmm8\n\t" // use an upscaling operator to clear our register.
"vmovaps\t\t%%zmm8,\t%[RES]\n\t"
: [RES] "+m" (*target)
: [Z] "m" (zero)
: "zmm8", "memory");
}
// This function perform two multiplies of an I8x16 and an I8x16 vector into two I16x16 vectors. then does an FMA on the scaled result of multiplying the two I16x16 vectors, adding the result into an I32x16.
// it loops 8 times. well, actually four, with an unroll.
inline static void GGML_8X_2xI8x16_2xI8x16_MUL_2xI16x16_S_FMA_I32x16 (int8x16_t *src11, uint8x16_t *src21, const uint8_t *scale, int32x16_t *res)
{
uint8_t zero = 0;
__asm__ __volatile__ (
"vprefetche0\t(%[SRC11])\n\t"
"vprefetche0\t(%[SRC21])\n\t"
"vprefetche0\t(%[SCALE])\n\t"
"mov\t$0,\t%%ecx\n\t"
"mov\t%[SRC11],\t%%r12\n\t"
"mov\t%[SRC21],\t%%r8\n\t"
"mov\t%[SCALE],\t%%r9\n\t"
"vpbroadcastd\t%[Z]%{uint8%},\t%%zmm7\n\t" // empty our result.
"1:\n\t"
"inc\t%%ecx\n\t" // we are in our loop, increment our counter.
"cmp\t$4,\t%%ecx\n\t" // see if this is our last run-through.
"vmovdqa32\t\t(%%r12)%{sint8%},\t%%zmm0\n\t" // load the item we will be multiplying from. upscale it from int8 to int32.
"vmovdqa32\t\t(%%r8)%{uint8%},\t%%zmm1\n\t" // load the item we will be multiplying with. upscale it from int8 to int32.
"vpmulld\t%%zmm0,\t%%zmm1,\t%%zmm2\n\t" // perform our 64 bit multiply, low side.
"vpbroadcastd\t(%%r9)%{uint8%},\t%%zmm6\n\t" // load the item we will be multiplying by.
"vpmadd231d\t%%zmm2,\t%%zmm6,\t%%zmm7\n\t" // perform our multiply-add.
"vmovdqa32\t\t16(%%r12)%{sint8%},\t%%zmm3\n\t" // load the item we will be multiplying from. upscale it from int8 to int32.
"vmovdqa32\t\t16(%%r8)%{uint8%},\t%%zmm4\n\t" // load the item we will be multiplying with. upscale it from int8 to int32.
"vpmulld\t%%zmm3,\t%%zmm4,\t%%zmm5\n\t" // perform our 64 bit multiply, low side.
"vpmadd231d\t%%zmm5,\t%%zmm6,\t%%zmm7\n\t" // perform our multiply-add.
"vmovdqa32\t\t32(%%r12)%{sint8%},\t%%zmm8\n\t" // load the item we will be multiplying from. upscale it from int8 to int32.
"vmovdqa32\t\t32(%%r8)%{uint8%},\t%%zmm1\n\t" // load the item we will be multiplying with. upscale it from int8 to int32.
"vpmulld\t%%zmm8,\t%%zmm1,\t%%zmm2\n\t" // perform our 64 bit multiply, low side.
"vpbroadcastd\t1(%%r9)%{uint8%},\t%%zmm6\n\t" // load the item we will be multiplying by.
"vpmadd231d\t%%zmm2,\t%%zmm6,\t%%zmm7\n\t" // perform our multiply-add.
"vmovdqa32\t\t48(%%r12)%{sint8%},\t%%zmm3\n\t" // load the item we will be multiplying from. upscale it from int8 to int32.
"vmovdqa32\t\t48(%%r8)%{uint8%},\t%%zmm4\n\t" // load the item we will be multiplying with. upscale it from int8 to int32.
"vpmulld\t%%zmm3,\t%%zmm4,\t%%zmm5\n\t" // perform our 64 bit multiply, low side.
"vpmadd231d\t%%zmm5,\t%%zmm6,\t%%zmm7\n\t" // perform our multiply-add.
"je\t2f\n\t" // if this is the last time through our loop, jump to 2.
"vprefetche0\t64(%%r12)\n\t" // otherwise, prepare for another run-through.
"vprefetche0\t64(%%r8)\n\t"
"vprefetche2\t128(%%r12)\n\t"
"vprefetche2\t128(%%r8)\n\t"
"add\t$64,\t%%r12\n\t"
"add\t$64,\t%%r8\n\t"
"add\t$2,\t%%r9\n\t"
"jmp\t1b\n\t"
"2:\n\t"
"vmovdqa32\t\t%%zmm7,\t(%[RES])\n\t" // save the result.
: [RES] "+r" (res)
: [SRC11] "r" (src11),
[SRC21] "r" (src21),
[SCALE] "r" (scale),
[Z] "m" (zero)
: "zmm0", "zmm1", "zmm2", "zmm3", "zmm4", "zmm5", "zmm6", "zmm7", "zmm8", "cc", "ecx", "r8", "r9", "r12", "memory");
}
// Unpack 256 unsigned 5 bit values into an 8 bit vector.
inline static void GGML_5bit_Unpack (const uint8x16_t * q4, const uint8_t * q1, uint8x16_t * dst)
{
uint8_t lowmask = 0x0F;
uint32_t allmask=0xFFFFFFFF;
uint8_t m=1;
uint8_t bit5 = 0x10;
__asm__ __volatile__ (
"vprefetche0\t(%[SRC1])\n\t" // Issue our memory requests first thing.
"vprefetche0\t(%[SRC4])\n\t"
"vprefetche1\t64(%[SRC4])\n\t"
"mov\t%[SRC4],\t%%r12\n\t" // load the address of the head of our 4-bit list.
"mov\t%[DST],\t%%r8\n\t" // load the address of the head of our destination list.
"mov\t$0,%%ecx\n\t" // initialize our counter.
"vmovdqa32\t(%[SRC1])%{uint8%},\t%%zmm6\n\t" // move 16 packed sets of single bits into the lower 8 bits of zmm6.
"vmovdqa32\t16(%[SRC1])%{uint8%},\t%%zmm7\n\t" // move the next 16 packed sets of single bits into the lower 8 bits of zmm7.
"vpbroadcastd\t%[MASK]%{uint8%},\t%%zmm2\n\t " // load our mask.
"vpbroadcastd\t%[BIT5]%{uint8},\t%%zmm9\n\t" // load the bit we want to add (conditionally).
"vpbroadcastd\t%[M]%{uint8%},\t%%zmm8\n\t" // select which bit we want to test for.
"1:\n\t"
"inc\t%%ecx\n\t" // we are in the loop. increment the counter.
"vptestmd\t%%zmm6,\t%%zmm8,\t%%k1\n\t" // perform our test.
"vptestmd\t%%zmm7,\t%%zmm8,\t%%k2\n\t" // perform our test.
"vmovdqa32\t\t(%%r12)%{uint8%},\t%%zmm0\n\t" // load our odd 4 bit sequences. note that it loads two 4 bit sequences into each zmm value.
"vpandd\t%%zmm0,\t%%zmm2,\t%%zmm4\n\t" // apply a mask, storing the low four bits of vector zmm0 into zmm4.
"vpaddd\t%%zmm4,%%zmm9,%%zmm4%{%%k1%}\n\t" // turn on bit 5 for all values that passed the prior test.
"vmovdqa32\t\t%%zmm4%{uint8%},\t(%%r8)\n\t" // save our result.
"vmovdqa32\t\t16(%%r12)%{uint8%},\t%%zmm1\n\t" // load our odd 4 bit sequences. note that it loads two 4 bit sequences into each zmm value.
"vpandd\t%%zmm1,\t%%zmm2,\t%%zmm5\n\t" // apply a mask, storing the next low four bits of vector zmm1 into zmm5.
"vpaddd\t%%zmm5,%%zmm9,%%zmm5%{%%k2%}\n\t" // turn on bit 5 for all values that passed the prior test.
"vmovdqa32\t\t%%zmm5%{uint8%},\t16(%%r8)\n\t" // save our result.
"add\t$32,\t%%r8\n\t"
"cmp\t$4,\t%%ecx\n\t"
"vpslld\t$1,\t%%zmm8,\t%%zmm8\n\t" // select which bit we want to test for.
"vptestmd\t%%zmm6,\t%%zmm8,\t%%k1\n\t" // perform our test.
"vptestmd\t%%zmm7,\t%%zmm8,\t%%k2\n\t" // perform our test.
"vpsrld\t$4,\t%%zmm0,\t%%zmm4\n\t" // load our even 4 bit sequence into zmm4.
"vpaddd\t%%zmm4,%%zmm9,%%zmm4%{%%k1%}\n\t" // turn on bit 5 for all values that passed the prior test.
"vmovdqa32\t\t%%zmm4%{uint8%},\t(%%r8)\n\t" // save our result.
"vpsrld\t$4,\t%%zmm1,\t%%zmm5\n\t" // load our even 4 bit sequence into zmm5.
"vpaddd\t%%zmm5,%%zmm9,%%zmm5%{%%k2%}\n\t" // turn on bit 5 for all values that passed the prior test.
"vmovdqa32\t\t%%zmm5%{uint8%},\t16(%%r8)\n\t" // save our result.
"je\t2f\n\t"
"vpslld\t$1,\t%%zmm8,\t%%zmm8\n\t" // select which bit we want to test for.
"add\t$32,\t%%r12\n\t"
"add\t$32,\t%%r8\n\t"
"jmp\t1b\n\t"
"2:"
: [DST] "+r" (dst)
: [SRC4] "r" (q4),
[SRC1] "r" (q1),
[MASK] "m" (lowmask),
[M] "m" (m),
[ALL] "m" (allmask),
[BIT5] "m" (bit5)
: "zmm0", "zmm1", "zmm2", "zmm4", "zmm5", "zmm6", "zmm7", "zmm8", "zmm9", "zmm10", "zmm11", "cc", "ecx", "k1", "k2", "r12", "r8", "memory"
);
}
// A function for getting the dot product of two vectors, one of 5 bit resolution, and one of 8.
// Used during inference, if your model prints "llama_model_loader: - type q5_K: XXX tensors", and XXX is not zero. :)
void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
/* interpret X and Y as vectors. */
const block_q5_K * restrict x = vx;
const block_q8_K * restrict y = vy;
/* the number of blocks we will process this in. */
const int nb = n / QK_K;
static const uint32_t kmask1 = 0x3f3f3f3f;
static const uint32_t kmask2 = 0x0f0f0f0f;
static const uint32_t kmask3 = 0x03030303;
uint32_t utmp[4];
const uint8_t * scales = (const uint8_t*)&utmp[0];
const uint8_t * mins = (const uint8_t*)&utmp[2];
float32x16_t sums;
// clear sums.
GGML_F32x16_VEC_ZERO(&sums);
float sumf = 0;
for (int i = 0; i < nb; ++i) {
int8x16_t q8copy [QK_K];
int32x16_t aux32;
uint8x16_t q4copyvec [QK_K/32];
uint8x16_t aux8 [QK_K/16];
// Fill in our 8 bit vector from y[]. required, because there is no good way to align members of y[], And I haven't mastered unaligned assembly yet...
memcpy (q8copy, y[i].qs, QK_K);
// Fill in our 4 bit vector from x[]. required, because there is no good way to align members of x[], And I haven't mastered unaligned assembly yet...
memcpy (q4copyvec, x[i].qs, QK_K/2);
// combine our 4 and 1 bit vector sets into an 8 bit value.
GGML_5bit_Unpack(q4copyvec, x[i].qh, aux8);
// extract scales and mins..
memcpy(utmp, x[i].scales, 12);
utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
const uint32_t uaux = utmp[1] & kmask1;
utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
utmp[2] = uaux;
utmp[0] &= kmask1;
// FIXME: while comparing FMA output to the original output, the original had an error. hunt it down.
GGML_8X_2xI8x16_2xI8x16_MUL_2xI16x16_S_FMA_I32x16(q8copy, aux8, scales, &aux32);
int sumi = 0;
for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
for (int l = 0; l < GGML_F32_EPR; ++l) ((float *)&sums)[l] += d * ((int32_t *)&aux32)[l];
const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
sumf -= dmin * sumi;
}
for (int l = 0; l < GGML_F32_EPR; ++l) sumf += ((float *)&sums)[l];
*s = sumf;
}

View file

@ -0,0 +1,16 @@
// Formatted with: indent -npcs -nlp -i4 -l300
#pragma once
#include "ggml.h"
#ifdef __cplusplus
extern "C"
{
#endif
/* A forward declaration, to keep GCC happy. */
void ggml_vec_dot_q5_K_q8_K(int n, float *restrict s, size_t bs, const void *restrict vx, size_t bx, const void *restrict vy, size_t by, int nrc);
#ifdef __cplusplus
}
#endif

142
ggml-phi-knc.c Normal file
View file

@ -0,0 +1,142 @@
/* Xeon PHI IMCI support. */
/* formatted by using emacs, with (M-x set-variable RET c-basic-offset RET 4 RET) executed. */
#include <stdint.h>
// For size_t
#include <stdio.h>
// For memcpy.
#include <string.h>
// We can fit 16 of these float32s in a single vector register.
#define GGML_F32_EPR 16
// A vector of 16 floats.
typedef float float32x16_t __attribute__((vector_size (64), aligned (64)));
// A forward declaration, to keep GCC happy...
void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc);
inline static void GGML_F32x16_VEC_ZERO(float32x16_t *target)
{
uint8_t zero[4] __attribute__((aligned(64))) = {0,0,0,0};
__asm__ __volatile__ (
"vbroadcastf32x4\t%[Z]%{uint8%},\t%%zmm8\n\t" // use an upscaling operator to clear our value.
"vmovnraps\t\t%%zmm8,\t%[RES]\n\t"
: [RES] "+m" (*target)
: [Z] "m" (zero)
: "zmm8");
}
// Multiply each item in mvec1 with the corresponding item in mvec2, adding the result to the corresponding item in sum. optionally clear the sum before starting.
inline static void GGML_F32x16_VEC_FMA(const float32x16_t *mvec1, const float32x16_t *mvec2, float32x16_t *sumvec, size_t iterations, int clear)
{
uint8_t zero[4] __attribute__((aligned(64))) = {0,0,0,0};
__asm__ __volatile__ (
"mov\t%[ITER],%%r8\n\t" // how many register sized chunks are we responsible for
"mov\t%[VEC1],%%r10\n\t" // where do we start work in mvec1?
"mov\t%[VEC2],%%r12\n\t" // where do we start work in mvec2?
"cmp\t$1,%[CLR]\n\t" // should we clear the sum before we start?
"jne\t4f\n\t"
"vbroadcastf32x4\t%[Z]%{uint8%},\t%%zmm0\n\t" // if so, use an upscaling operator to do it.
"vprefetchnta\t(%%r10)\n\t"
"vprefetchnta\t(%%r12)\n\t"
"vprefetch1\t128(%%r10)\n\t"
"vprefetch1\t128(%%r12)\n\t"
"vprefetch1\t256(%%r10)\n\t"
"vprefetch1\t256(%%r12)\n\t"
"vprefetch1\t384(%%r10)\n\t"
"vprefetch1\t384(%%r12)\n\t"
"vprefetch1\t512(%%r10)\n\t"
"vprefetch1\t512(%%r12)\n\t"
"jmp\t1f\n\t"
"4:\n\t"
"vprefetch0\t(%[RES])\n\t"
"vmovaps\t\t(%[RES]),\t%%zmm0\n\t" // otherwise, load our inital state from sum..
"vprefetchnta\t(%%r10)\n\t"
"vprefetchnta\t(%%r12)\n\t"
"1:\n\t"
"cmp\t$3,\t%%r8\n\t" // Compare iterations to three.
"jnae\t6f\n\t" // If there are not three iterations left, jump to label 6.
"vmovaps\t\t(%%r10),\t%%zmm1\n\t" // Load two vectors.
"vmovaps\t\t(%%r12),\t%%zmm2\n\t"
"sub\t$3,\t%%r8\n\t" // Decrement iterations
"vprefetchnta\t192(%%r10)\n\t" // prefetch the next float32x16_t block (192 bytes ahead)
"vprefetchnta\t192(%%r12)\n\t"
"vmovaps\t\t64(%%r10),\t%%zmm3\n\t" // Load two vectors.
"vmovaps\t\t64(%%r12),\t%%zmm4\n\t"
"vprefetch1\t320(%%r10)\n\t" // prefetch the block after the block after the next float32x16_t block (320 bytes ahead)
"vprefetch1\t320(%%r12)\n\t"
"vmovaps\t\t128(%%r10),\t%%zmm5\n\t" // Load two vectors.
"vmovaps\t\t128(%%r12),\t%%zmm6\n\t"
"vprefetch1\t576(%%r10)\n\t"
"vprefetch1\t576(%%r12)\n\t"
"vprefetch1\t704(%%r10)\n\t"
"vprefetch1\t704(%%r12)\n\t"
"add\t$192,\t%%r10\n\t" // Move to the next float32x16_t block (192 bytes ahead)
"add\t$192,\t%%r12\n\t"
"vfmadd231ps\t%%zmm1,\t%%zmm2,\t%%zmm0\n\t" // Perform a fused multiply add
"vfmadd231ps\t%%zmm3,\t%%zmm4,\t%%zmm0\n\t" // Perform a fused multiply add
"vfmadd231ps\t%%zmm5,\t%%zmm6,\t%%zmm0\n\t" // Perform a fused multiply add
"jmp\t1b\n\t" // Jump back to the start of the loop
"6:\n\t" // we know we are near the tail. handle 2, 1, and 0 cases.
"cmp\t$0,\t%%r8\n\t" // Compare iterations to zero
"je\t2f\n\t" // Jump to label 2 if zero (end of loop)
"cmp\t$1,\t%%r8\n\t" // Compare iterations to one
"vmovaps\t\t(%%r10),\t%%zmm1\n\t" // Load two vectors.
"vmovaps\t\t(%%r12),\t%%zmm2\n\t"
"vfmadd231ps\t%%zmm1,\t%%zmm2,\t%%zmm0\n\t" // Perform a fused multiply add
"je\t2f\n\t" // Jump to label 3 if one (end of loop)
// No compare. we must be two.
"vmovaps\t\t64(%%r10),\t%%zmm3\n\t" // Load two vectors.
"vmovaps\t\t64(%%r12),\t%%zmm4\n\t"
"vfmadd231ps\t%%zmm3,\t%%zmm4,\t%%zmm0\n\t" // Perform a fused multiply add
"2:\n\t" // Label for loop end
"vmovnraps\t\t%%zmm0,\t(%[RES])\n\t" // save our results.
: [RES] "+r" (sumvec)
: [ITER] "r" (iterations),
[VEC1] "r" (mvec1),
[VEC2] "r" (mvec2),
[CLR] "r" (clear),
[Z] "m" (zero)
: "zmm0", "zmm1", "zmm2", "zmm3", "zmm4", "zmm5", "zmm6", "cc", "memory", "r8", "r10", "r12");
}
// NOTE: x and y inputs must be __attribute__((aligned(64)));
void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc)
{
// our sum.
float32x16_t sum;
// the number of vector-sized steps we will need to do.
const uint32_t np = (n & ~(GGML_F32_EPR - 1));
GGML_F32x16_VEC_FMA((const float32x16_t *)x, (const float32x16_t *)y, &sum, np/GGML_F32_EPR, 1);
// add the leftovers, that could not be handled by the vector loop.
if ( n - np != 0 )
{
// our extended last part of x.
float32x16_t v1;
GGML_F32x16_VEC_ZERO(&v1);
// our extended last part of y.
float32x16_t v2;
GGML_F32x16_VEC_ZERO(&v2);
memcpy(&v1, &x[np], (n - np)*sizeof(float));
memcpy(&v2, &y[np], (n - np)*sizeof(float));
GGML_F32x16_VEC_FMA(&v1,
&v2,
&sum, 1, 0);
}
// reduce sum, and store it in s.
for (uint32_t i=0; i <GGML_F32_EPR; ++i)
*s+=((float *)&sum)[i];
}

16
ggml-phi-knc.h Normal file
View file

@ -0,0 +1,16 @@
// Formatted with: indent -npcs -nlp -i4 -l300
#pragma once
#include "ggml.h"
#ifdef __cplusplus
extern "C"
{
#endif
/* A forward declaration, to keep GCC happy. */
void ggml_vec_dot_f32(int n, float *restrict s, size_t bs, const float *restrict x, size_t bx, const float *restrict y, size_t by, int nrc);
#ifdef __cplusplus
}
#endif

View file

@ -4,6 +4,7 @@
#include "ggml-quants.h"
#include "ggml-impl.h"
// FIXME: why do we import this twice?
#define GGML_COMMON_IMPL_C
#include "ggml-common.h"
@ -14,6 +15,52 @@
#include <stdlib.h> // for qsort
#include <stdio.h> // for GGML_ASSERT
#ifdef __ARM_NEON
// if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
//
// $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
//
#include <arm_neon.h>
#else
#ifdef __wasm_simd128__
#include <wasm_simd128.h>
#else
#if defined(__POWER9_VECTOR__) || defined(__powerpc64__)
#include <altivec.h>
#undef bool
#define bool _Bool
#else
#if defined(_MSC_VER) || defined(__MINGW32__)
#include <intrin.h>
#else
#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__)
#if !defined(__riscv)
#include <immintrin.h>
#endif
#endif
#endif
#endif
#endif
#endif
#ifdef __riscv_v_intrinsic
#include <riscv_vector.h>
#endif
// hand assembled replacement functions are cool.
#if defined(__k1om__)
#include <ggml-phi-knc-dot_q5_K_q8_K.h>
#endif
#undef MIN
#undef MAX
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#define MAX(a, b) ((a) > (b) ? (a) : (b))
#define UNUSED GGML_UNUSED
// some compilers don't provide _mm256_set_m128i, e.g. gcc 7
@ -6860,6 +6907,9 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * r
}
#endif
#if defined(__k1om__)
/* We get this from elsewhere. */
#else
#if QK_K == 256
void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
assert(n % QK_K == 0);
@ -7284,7 +7334,7 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * r
#endif
}
#else
#else /* QK_K != 256 */
void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
assert(n % QK_K == 0);
@ -7553,8 +7603,9 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * r
*s = sumf;
#endif
}
#endif
#endif /* end QK_K != 256 */
#endif /* defined(__k1om__) */
#if QK_K == 256
void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {

24
ggml.c
View file

@ -47,6 +47,11 @@
#pragma warning(disable: 4996)
#endif
// hand assembled replacement functions are cool.
#if defined(__k1om__)
#include <ggml-phi-knc.h>
#endif
#if defined(_WIN32)
#define WIN32_LEAN_AND_MEAN
@ -554,7 +559,11 @@ FILE * ggml_fopen(const char * fname, const char * mode) {
static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
#if defined(__k1om__)
// We get this function from elsewhere.
#else
static void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc);
#endif
static void ggml_vec_dot_f16(int n, float * restrict s, size_t bs, ggml_fp16_t * restrict x, size_t bx, ggml_fp16_t * restrict y, size_t by, int nrc);
static void ggml_vec_dot_bf16(int n, float * restrict s, size_t bs, ggml_bf16_t * restrict x, size_t bx, ggml_bf16_t * restrict y, size_t by, int nrc);
@ -1559,6 +1568,9 @@ inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x)
inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; }
inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; }
#if defined(__k1om__)
// we get this function from elsewhere.
#else
static void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc) {
assert(nrc == 1);
UNUSED(nrc);
@ -1601,6 +1613,7 @@ static void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float *
*s = sumf;
}
#endif
static void ggml_vec_dot_bf16(int n, float * restrict s, size_t bs, ggml_bf16_t * restrict x, size_t bx, ggml_bf16_t * restrict y, size_t by, int nrc) {
assert(nrc == 1);
@ -2522,9 +2535,10 @@ void ggml_numa_init(enum ggml_numa_strategy numa_flag) {
# if !defined(SYS_getcpu) && defined(SYS_get_cpu)
# define SYS_getcpu SYS_get_cpu // some older glibc versions use this name
# endif
# if defined(SYS_getcpu)
getcpu_ret = syscall(SYS_getcpu, &current_cpu, &g_state.numa.current_node);
#endif
#endif
if (g_state.numa.n_nodes < 1 || g_state.numa.total_cpus < 1 || getcpu_ret != 0) {
g_state.numa.n_nodes = 0;
return;
@ -22940,4 +22954,12 @@ int ggml_cpu_has_matmul_int8(void) {
#endif
}
int ggml_cpu_is_xeonphi_knc(void) {
#if defined(__k1om__)
return 1;
#else
return 0;
#endif
}
////////////////////////////////////////////////////////////////////////////////

1
ggml.h
View file

@ -2385,6 +2385,7 @@ extern "C" {
GGML_API int ggml_cpu_has_sycl (void);
GGML_API int ggml_cpu_has_vsx (void);
GGML_API int ggml_cpu_has_matmul_int8(void);
GGML_API int ggml_cpu_is_xeonphi_knc (void);
//
// Internal types and functions exposed for tests and benchmarks

View file

@ -17985,6 +17985,7 @@ const char * llama_print_system_info(void) {
s += "SSSE3 = " + std::to_string(ggml_cpu_has_ssse3()) + " | ";
s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
s += "MATMUL_INT8 = " + std::to_string(ggml_cpu_has_matmul_int8()) + " | ";
s += "XEONPHI_KNC = " + std::to_string(ggml_cpu_is_xeonphi_knc()) + " | ";
#ifdef GGML_USE_LLAMAFILE
s += "LLAMAFILE = 1 | ";
#else