merge from upstream
This commit is contained in:
parent
c70b5f211b
commit
d7d679e41a
4 changed files with 121 additions and 74 deletions
13
Makefile
13
Makefile
|
@ -744,6 +744,9 @@ clean:
|
|||
# Helper function that replaces .c, .cpp, and .cu file endings with .o:
|
||||
GET_OBJ_FILE = $(patsubst %.c,%.o,$(patsubst %.cpp,%.o,$(patsubst %.cu,%.o,$(1))))
|
||||
|
||||
# Helper function that replaces .c, .cpp, and .cu file endings with .s:
|
||||
GET_ASM_FILE = $(patsubst %.c,%.s,$(patsubst %.cpp,%.s,$(patsubst %.cu,%.s,$(1))))
|
||||
|
||||
main: examples/main/main.cpp ggml.o llama.o $(COMMON_DEPS) console.o grammar-parser.o $(OBJS)
|
||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||
|
@ -751,6 +754,16 @@ main: examples/main/main.cpp ggml.o llama.o $(C
|
|||
@echo '==== Run ./main -h for help. ===='
|
||||
@echo
|
||||
|
||||
bench-phi-knc.s: bench-phi-knc.c
|
||||
$(CC) $(CFLAGS) -S $< -o $(call GET_ASM_FILE, $<)
|
||||
|
||||
ggml-phi-knc.s: ggml-phi-knc.c
|
||||
$(CC) $(CFLAGS) -S $< -o $(call GET_ASM_FILE, $<)
|
||||
|
||||
bench-phi-knc: bench-phi-knc.c ggml-phi-knc.o
|
||||
$(CC) $(CFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||
$(CC) $(CFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||
|
||||
infill: examples/infill/infill.cpp ggml.o llama.o $(COMMON_DEPS) console.o grammar-parser.o $(OBJS)
|
||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||
|
|
164
ggml-phi-knc.c
164
ggml-phi-knc.c
|
@ -4,9 +4,6 @@
|
|||
|
||||
#include <stdio.h>
|
||||
|
||||
static inline _Bool is_aligned(const void *restrict pointer, size_t byte_count)
|
||||
{ return (uintptr_t)pointer % byte_count == 0; }
|
||||
|
||||
// No, we have an SIMD unit.
|
||||
// #define GGML_SIMD
|
||||
|
||||
|
@ -15,102 +12,135 @@ static inline _Bool is_aligned(const void *restrict pointer, size_t byte_count)
|
|||
// We can fit 16 of these float32s in a single vector register.
|
||||
#define GGML_F32_EPR 16
|
||||
|
||||
// because we are not defining GGML_SIMD, we have to do this ourself.
|
||||
#define GGML_F32_ARR (GGML_F32_STEP/GGML_F32_EPR)
|
||||
|
||||
// a single vector. 128*32=512
|
||||
typedef float float32x16_t __attribute__((vector_size (128)));
|
||||
#define GGML_F32x16 float32x16_t
|
||||
|
||||
// from chatGPT. nuke this later.
|
||||
#include <string.h>
|
||||
// A forward declaration, to keep GCC happy...
|
||||
void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc);
|
||||
|
||||
inline static void GGML_F32x16_VEC_ZERO(float32x16_t *target)
|
||||
{
|
||||
// we only need a mask16, but register sizes...
|
||||
__mmask32 mask=0xFFFFFFFF;
|
||||
|
||||
// FIXME: how do we tell GNU AS to perform upconverts?
|
||||
// FIXME: how do we tell GNU AS to perform upconverts? Could remove two memory reads here...
|
||||
float zero[4] __attribute__((aligned(64))) = {0.0f,0.0f,0.0f,0.0f};
|
||||
|
||||
__asm__ __volatile__ ("movl\t%[M],\t%%eax\n\t"
|
||||
"kmov %%eax,\t%%k1\n\t"
|
||||
"vbroadcastf32x4\t%[Z],\t%%zmm0%{%%k1%}\n\t"
|
||||
"vmovaps\t\t%%zmm0,\t%[RES]%{%%k1%}\n\t"
|
||||
: [RES] "+m" (*target)
|
||||
: [M] "m" (mask),
|
||||
[Z] "m" (zero)
|
||||
: "eax", "k1", "zmm0");
|
||||
}
|
||||
|
||||
// multiply each item in mvec1 with the corresponding item in mvec2, adding the result to the corresponding item in sum.
|
||||
inline static void GGML_F32x16_VEC_FMA(const float32x16_t *mvec1, const float32x16_t *mvec2, float32x16_t *sumvec, size_t iterations)
|
||||
{
|
||||
// we only need a mask16, but register sizes...
|
||||
__mmask32 mask=0xFFFFFFFF;
|
||||
__asm__ __volatile__ (
|
||||
"vmovaps\t\t(%[RES]),\t%%zmm0\n\t" // load our initial state..
|
||||
"1:\n\t"
|
||||
"cmp $0,\t%[ITER]\n\t" // Compare iterations to 0
|
||||
"je\t2f\n\t" // Jump to label 2 if zero (end of loop)
|
||||
"vmovaps\t\t(%[VEC1]),\t%%zmm1\n\t" // Load two vectors.
|
||||
"vmovaps\t\t(%[VEC2]),\t%%zmm2\n\t"
|
||||
"vfmadd231ps\t%%zmm1,\t%%zmm2,\t%%zmm0\n\t" // Perform a fused multiply add.
|
||||
"add $64,\t%[VEC1]\n\t" // Move to the next float32x16_t (64 bytes ahead)
|
||||
"add $64,\t%[VEC2]\n\t"
|
||||
"sub $1,\t%[ITER]\n\t" // Decrement iterations
|
||||
"jmp 1b\n\t" // Jump back to the start of the loop
|
||||
"2: \n\t" // Label for loop end
|
||||
"vmovaps\t\t%%zmm0,\t(%[RES])\n\t" // save our results.
|
||||
: [RES] "+r" (sumvec),
|
||||
[ITER] "+r" (iterations)
|
||||
: [M] "r" (mask),
|
||||
[VEC1] "r" (mvec1),
|
||||
[VEC2] "r" (mvec2)
|
||||
: "zmm0", "zmm1", "zmm2", "cc", "memory");
|
||||
"vbroadcastf32x4\t%[Z],\t%%zmm8\n\t" // use an upscaling operator to clear our value.
|
||||
"vmovaps\t\t%%zmm8,\t%[RES]\n\t"
|
||||
: [RES] "+m" (*target)
|
||||
: [Z] "m" (zero)
|
||||
: "zmm8");
|
||||
}
|
||||
|
||||
|
||||
// NOTE: all inputs must be __attribute__((aligned(64)));
|
||||
float DotProduct_F32(const float * restrict inVec1, const float * restrict inVec2, uint32_t count)
|
||||
// Multiply each item in mvec1 with the corresponding item in mvec2, adding the result to the corresponding item in sum. optionally clear the sum before starting.
|
||||
inline static void GGML_F32x16_VEC_FMA(const float32x16_t *mvec1, const float32x16_t *mvec2, float32x16_t *sumvec, size_t iterations, int clear)
|
||||
{
|
||||
// our single result, in the end.
|
||||
float sumf = 0.0f;
|
||||
// FIXME: how do we tell GNU AS to perform upconverts? Could remove two memory reads here...
|
||||
float zero[4] __attribute__((aligned(64))) = {0.0f,0.0f,0.0f,0.0f};
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"mov\t%[ITER],%%r8\n\t" // how many register sized chunks are we responsible for
|
||||
"mov\t%[VEC1],%%r10\n\t" // where do we start work in mvec1?
|
||||
"mov\t%[VEC2],%%r12\n\t" // where do we start work in mvec2?
|
||||
"cmp\t$1,%[CLR]\n\t" // should we clear the sum before we start?
|
||||
"jne\t4f\n\t"
|
||||
"vbroadcastf32x4\t%[Z],\t%%zmm0\n\t" // if so, use an upscaling operator to do it.
|
||||
"vprefetchnta\t(%%r10)\n\t"
|
||||
"vprefetchnta\t(%%r12)\n\t"
|
||||
"vprefetch1\t128(%%r10)\n\t"
|
||||
"vprefetch1\t128(%%r12)\n\t"
|
||||
"vprefetch1\t256(%%r10)\n\t"
|
||||
"vprefetch1\t256(%%r12)\n\t"
|
||||
"vprefetch1\t384(%%r10)\n\t"
|
||||
"vprefetch1\t384(%%r12)\n\t"
|
||||
"vprefetch1\t512(%%r10)\n\t"
|
||||
"vprefetch1\t512(%%r12)\n\t"
|
||||
"jmp\t1f\n\t"
|
||||
"4:\n\t"
|
||||
"vprefetch0\t(%[RES])\n\t"
|
||||
"vmovaps\t\t(%[RES]),\t%%zmm0\n\t" // otherwise, load our inital state from sum..
|
||||
"vprefetchnta\t(%%r10)\n\t"
|
||||
"vprefetchnta\t(%%r12)\n\t"
|
||||
"1:\n\t"
|
||||
"cmp\t$3,\t%%r8\n\t" // Compare iterations to three.
|
||||
"jnae\t6f\n\t" // If there are not three iterations left, jump to label 6.
|
||||
"vmovaps\t\t(%%r10),\t%%zmm1\n\t" // Load two vectors.
|
||||
"vmovaps\t\t(%%r12),\t%%zmm2\n\t"
|
||||
"sub\t$3,\t%%r8\n\t" // Decrement iterations
|
||||
"vprefetchnta\t192(%%r10)\n\t" // prefetch the next float32x16_t block (192 bytes ahead)
|
||||
"vprefetchnta\t192(%%r12)\n\t"
|
||||
"vmovaps\t\t64(%%r10),\t%%zmm3\n\t" // Load two vectors.
|
||||
"vmovaps\t\t64(%%r12),\t%%zmm4\n\t"
|
||||
"vprefetch1\t320(%%r10)\n\t" // prefetch the block after the block after the next float32x16_t block (320 bytes ahead)
|
||||
"vprefetch1\t320(%%r12)\n\t"
|
||||
"vmovaps\t\t128(%%r10),\t%%zmm5\n\t" // Load two vectors.
|
||||
"vmovaps\t\t128(%%r12),\t%%zmm6\n\t"
|
||||
"vprefetch1\t576(%%r10)\n\t"
|
||||
"vprefetch1\t576(%%r12)\n\t"
|
||||
"vprefetch1\t704(%%r10)\n\t"
|
||||
"vprefetch1\t704(%%r12)\n\t"
|
||||
"add\t$192,\t%%r10\n\t" // Move to the next float32x16_t block (192 bytes ahead)
|
||||
"add\t$192,\t%%r12\n\t"
|
||||
"vfmadd231ps\t%%zmm1,\t%%zmm2,\t%%zmm0\n\t" // Perform a fused multiply add
|
||||
"vfmadd231ps\t%%zmm3,\t%%zmm4,\t%%zmm0\n\t" // Perform a fused multiply add
|
||||
"vfmadd231ps\t%%zmm5,\t%%zmm6,\t%%zmm0\n\t" // Perform a fused multiply add
|
||||
"jmp\t1b\n\t" // Jump back to the start of the loop
|
||||
"6:\n\t" // we know we are near the tail. handle 2, 1, and 0 cases.
|
||||
"cmp\t$0,\t%%r8\n\t" // Compare iterations to zero
|
||||
"je\t2f\n\t" // Jump to label 2 if zero (end of loop)
|
||||
"cmp\t$1,\t%%r8\n\t" // Compare iterations to one
|
||||
"vmovaps\t\t(%%r10),\t%%zmm1\n\t" // Load two vectors.
|
||||
"vmovaps\t\t(%%r12),\t%%zmm2\n\t"
|
||||
"vfmadd231ps\t%%zmm1,\t%%zmm2,\t%%zmm0\n\t" // Perform a fused multiply add
|
||||
"je\t2f\n\t" // Jump to label 3 if one (end of loop)
|
||||
// No compare. we must be two.
|
||||
"vmovaps\t\t64(%%r10),\t%%zmm3\n\t" // Load two vectors.
|
||||
"vmovaps\t\t64(%%r12),\t%%zmm4\n\t"
|
||||
"vfmadd231ps\t%%zmm3,\t%%zmm4,\t%%zmm0\n\t" // Perform a fused multiply add
|
||||
"2:\n\t" // Label for loop end
|
||||
"vmovaps\t\t%%zmm0,\t(%[RES])\n\t" // save our results.
|
||||
: [RES] "+r" (sumvec)
|
||||
: [ITER] "r" (iterations),
|
||||
[VEC1] "r" (mvec1),
|
||||
[VEC2] "r" (mvec2),
|
||||
[CLR] "r" (clear),
|
||||
[Z] "m" (zero)
|
||||
: "zmm0", "zmm1", "zmm2", "zmm3", "zmm4", "zmm5", "zmm6", "cc", "memory", "r8", "r10", "r12");
|
||||
}
|
||||
|
||||
// NOTE: x and y inputs must be __attribute__((aligned(64)));
|
||||
void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc)
|
||||
{
|
||||
// our sum.
|
||||
float32x16_t sum __attribute__((aligned(64)));
|
||||
|
||||
// the number of vector-sized steps we will need to do.
|
||||
const uint32_t np = (count & ~(GGML_F32_EPR - 1));
|
||||
const uint32_t np = (n & ~(GGML_F32_EPR - 1));
|
||||
|
||||
GGML_F32x16_VEC_ZERO(&sum);
|
||||
GGML_F32x16_VEC_FMA((const float32x16_t *)x, (const float32x16_t *)y, &sum, np/GGML_F32_EPR, 1);
|
||||
|
||||
// 0 indexed cycle count
|
||||
// for (uint32_t cycle = 0; cycle < (np/GGML_F32_EPR); ++cycle)
|
||||
GGML_F32x16_VEC_FMA((float32x16_t *)inVec1, (float32x16_t *)inVec2, &sum, np/GGML_F32_EPR);
|
||||
|
||||
if (count != np)
|
||||
// FIXME: replace this with a final round using masked vectors.
|
||||
if ( n - np != 0 )
|
||||
{
|
||||
printf("handling remainder %u\n",count-np);
|
||||
// add the leftovers, that could not be handled by the vector loop.
|
||||
// our extended last part of inVec1.
|
||||
// our extended last part of x.
|
||||
float32x16_t v1 __attribute__((aligned(64)));
|
||||
GGML_F32x16_VEC_ZERO(&v1);
|
||||
// our extended last part of inVec2.
|
||||
// our extended last part of y.
|
||||
float32x16_t v2 __attribute__((aligned(64)));
|
||||
GGML_F32x16_VEC_ZERO(&v2);
|
||||
|
||||
memcpy(&v1, &inVec1[np], (count - np)*sizeof(float));
|
||||
memcpy(&v2, &inVec2[np], (count - np)*sizeof(float));
|
||||
memcpy(&v1, &x[np], (n - np)*sizeof(float));
|
||||
memcpy(&v2, &y[np], (n - np)*sizeof(float));
|
||||
|
||||
GGML_F32x16_VEC_FMA(&v1,
|
||||
&v2,
|
||||
&sum, 1);
|
||||
&sum, 1, 0);
|
||||
|
||||
}
|
||||
|
||||
// reduce sum0..sumX to sumf
|
||||
// reduce sum, and store it in s.
|
||||
for (uint32_t i=0; i <GGML_F32_EPR; ++i)
|
||||
sumf+=((float *)&sum)[i];
|
||||
*s+=((float *)&sum)[i];
|
||||
|
||||
return sumf;
|
||||
}
|
||||
|
|
|
@ -6,11 +6,8 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
GGML_CALL float DotProduct_F32(const float * restrict vec1, const float * restrict vec2, uint32_t count);
|
||||
void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
11
ggml.c
11
ggml.c
|
@ -42,6 +42,7 @@
|
|||
#pragma warning(disable: 4996)
|
||||
#endif
|
||||
|
||||
// hand assembled replacement functions are cool.
|
||||
#if defined(__k1om__)
|
||||
#include <ggml-phi-knc.h>
|
||||
#endif
|
||||
|
@ -500,7 +501,11 @@ FILE * ggml_fopen(const char * fname, const char * mode) {
|
|||
|
||||
static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
|
||||
|
||||
#if defined(__k1om__)
|
||||
// We get this function from elsewhere.
|
||||
#else
|
||||
static void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc);
|
||||
#endif
|
||||
static void ggml_vec_dot_f16(int n, float * restrict s, size_t bs, ggml_fp16_t * restrict x, size_t bx, ggml_fp16_t * restrict y, size_t by, int nrc);
|
||||
|
||||
static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
|
||||
|
@ -1502,6 +1507,9 @@ inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x)
|
|||
inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; }
|
||||
inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; }
|
||||
|
||||
#if defined(__k1om__)
|
||||
// we get this function from elsewhere.
|
||||
#else
|
||||
static void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc) {
|
||||
assert(nrc == 1);
|
||||
UNUSED(nrc);
|
||||
|
@ -1534,8 +1542,6 @@ static void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float *
|
|||
for (int i = np; i < n; ++i) {
|
||||
sumf += x[i]*y[i];
|
||||
}
|
||||
#elif defined(__k1om__)
|
||||
float sumf = DotProduct_F32(x, y, n);
|
||||
#else
|
||||
// scalar
|
||||
ggml_float sumf = 0.0;
|
||||
|
@ -1546,6 +1552,7 @@ static void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float *
|
|||
|
||||
*s = sumf;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void ggml_vec_dot_f16(int n, float * restrict s, size_t bs, ggml_fp16_t * restrict x, size_t bx, ggml_fp16_t * restrict y, size_t by, int nrc) {
|
||||
assert(nrc == 1);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue