Merge pull request #1 from julialongtin/k1om
K1om initial support. Round 1.
This commit is contained in:
commit
cb4422625a
8 changed files with 511 additions and 2 deletions
16
Makefile
16
Makefile
|
@ -291,6 +291,9 @@ ifeq "${K1OM}" ""
|
||||||
# Usage SSSE3-only (Not is SSE3!)
|
# Usage SSSE3-only (Not is SSE3!)
|
||||||
#MK_CFLAGS += -mssse3
|
#MK_CFLAGS += -mssse3
|
||||||
#MK_CXXFLAGS += -mssse3
|
#MK_CXXFLAGS += -mssse3
|
||||||
|
else
|
||||||
|
OBJS += ggml-phi-knc.o ggml-phi-knc-dot_q5_K_q8_K.o
|
||||||
|
MK_CFLAGS += -march=knc -mtune=knc
|
||||||
endif
|
endif
|
||||||
|
|
||||||
endif
|
endif
|
||||||
|
@ -688,6 +691,9 @@ clean:
|
||||||
# Helper function that replaces .c, .cpp, and .cu file endings with .o:
|
# Helper function that replaces .c, .cpp, and .cu file endings with .o:
|
||||||
GET_OBJ_FILE = $(patsubst %.c,%.o,$(patsubst %.cpp,%.o,$(patsubst %.cu,%.o,$(1))))
|
GET_OBJ_FILE = $(patsubst %.c,%.o,$(patsubst %.cpp,%.o,$(patsubst %.cu,%.o,$(1))))
|
||||||
|
|
||||||
|
# Helper function that replaces .c, .cpp, and .cu file endings with .s:
|
||||||
|
GET_ASM_FILE = $(patsubst %.c,%.s,$(patsubst %.cpp,%.s,$(patsubst %.cu,%.s,$(1))))
|
||||||
|
|
||||||
main: examples/main/main.cpp ggml.o llama.o $(COMMON_DEPS) console.o grammar-parser.o $(OBJS)
|
main: examples/main/main.cpp ggml.o llama.o $(COMMON_DEPS) console.o grammar-parser.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
@ -695,6 +701,16 @@ main: examples/main/main.cpp ggml.o llama.o $(C
|
||||||
@echo '==== Run ./main -h for help. ===='
|
@echo '==== Run ./main -h for help. ===='
|
||||||
@echo
|
@echo
|
||||||
|
|
||||||
|
bench-phi-knc.s: bench-phi-knc.c
|
||||||
|
$(CC) $(CFLAGS) -S $< -o $(call GET_ASM_FILE, $<)
|
||||||
|
|
||||||
|
ggml-phi-knc.s: ggml-phi-knc.c
|
||||||
|
$(CC) $(CFLAGS) -S $< -o $(call GET_ASM_FILE, $<)
|
||||||
|
|
||||||
|
bench-phi-knc: bench-phi-knc.c ggml-phi-knc.o
|
||||||
|
$(CC) $(CFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
|
$(CC) $(CFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
infill: examples/infill/infill.cpp ggml.o llama.o $(COMMON_DEPS) console.o grammar-parser.o $(OBJS)
|
infill: examples/infill/infill.cpp ggml.o llama.o $(COMMON_DEPS) console.o grammar-parser.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
64
bench-phi-knc.c
Normal file
64
bench-phi-knc.c
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
#include <immintrin.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <stdint.h>
|
||||||
|
#include <unistd.h> /*for CLOCK_REALTIME? */
|
||||||
|
#include <time.h>
|
||||||
|
|
||||||
|
#include "ggml-phi-knc.h"
|
||||||
|
|
||||||
|
#define MAXVEC 1024768
|
||||||
|
#define RUNTOTAL 12
|
||||||
|
#define RUNS
|
||||||
|
int main(void)
|
||||||
|
{
|
||||||
|
struct timespec start, middle, end;
|
||||||
|
double vector_time;
|
||||||
|
double scalar_time;
|
||||||
|
float scalar = 0.0f;
|
||||||
|
float vector = 0.0f;
|
||||||
|
int vecRuns[RUNTOTAL] = {10, 16, 17, 32, 33, 48, 49, 64, 65, 80, 81, 1024768};
|
||||||
|
|
||||||
|
for (uint32_t runCount = 0; runCount < RUNTOTAL; ++runCount)
|
||||||
|
{
|
||||||
|
// Generate random input vector of [-1, 1] values.
|
||||||
|
float vec1[MAXVEC] __attribute__((aligned(64)));
|
||||||
|
for (int i = 0; i < vecRuns[runCount]; i++)
|
||||||
|
vec1[i] = 2 * (0.5 - rand() / (float)RAND_MAX);
|
||||||
|
|
||||||
|
// Generate a second random input vector of [-1, 1] values.
|
||||||
|
float vec2[MAXVEC] __attribute__((aligned(64)));
|
||||||
|
for (int i = 0; i < vecRuns[runCount]; i++)
|
||||||
|
vec2[i] = 2 * (0.5 - rand() / (float)RAND_MAX);
|
||||||
|
|
||||||
|
// on your mark..
|
||||||
|
clock_gettime(CLOCK_MONOTONIC, &start);
|
||||||
|
|
||||||
|
// call dot product
|
||||||
|
ggml_vec_dot_f32(vecRuns[runCount], &vector, 0, vec1, 0, vec2, 0, 0);
|
||||||
|
|
||||||
|
// save the middle point..
|
||||||
|
clock_gettime(CLOCK_MONOTONIC, &middle);
|
||||||
|
|
||||||
|
// do the same work by hand;
|
||||||
|
for (int i = 0; i < vecRuns[runCount]; ++i)
|
||||||
|
scalar += vec1[i]*vec2[i];
|
||||||
|
|
||||||
|
clock_gettime(CLOCK_MONOTONIC, &end);
|
||||||
|
|
||||||
|
printf("vector\tvs\tscalar (%d items)\n", vecRuns[runCount]);
|
||||||
|
printf("%.9f\tvs\t%.9f\n", vector, scalar);
|
||||||
|
|
||||||
|
vector_time = middle.tv_sec - start.tv_sec;
|
||||||
|
vector_time += (middle.tv_nsec - start.tv_nsec) / 1000000000.0;
|
||||||
|
|
||||||
|
scalar_time = end.tv_sec - middle.tv_sec;
|
||||||
|
scalar_time += (end.tv_nsec - middle.tv_nsec) / 1000000000.0;
|
||||||
|
|
||||||
|
printf("%.9f\tvs\t%.9f\n", vector_time, scalar_time);
|
||||||
|
}
|
||||||
|
|
||||||
|
fflush(stdout);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
235
ggml-phi-knc-dot_q5_K_q8_K.c
Normal file
235
ggml-phi-knc-dot_q5_K_q8_K.c
Normal file
|
@ -0,0 +1,235 @@
|
||||||
|
// For uint32_t
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
// For size_t
|
||||||
|
#include <stdio.h>
|
||||||
|
|
||||||
|
// Yes, we have to tell this header to actually export stuff.
|
||||||
|
#define GGML_COMMON_IMPL_C
|
||||||
|
#include "ggml-common.h"
|
||||||
|
#include "ggml-quants.h"
|
||||||
|
#include "ggml-impl.h"
|
||||||
|
|
||||||
|
// FIXME: why do we have to import this twice?
|
||||||
|
#define GGML_COMMON_IMPL_C
|
||||||
|
// For block_q5_K and block_q8_K. only given the second time.
|
||||||
|
#include "ggml-common.h"
|
||||||
|
|
||||||
|
// This SIMD unit can work with 32 float32s at once.
|
||||||
|
#define GGML_F32_STEP 32
|
||||||
|
// We can fit 16 of these float32s in a single vector register.
|
||||||
|
#define GGML_F32_EPR 16
|
||||||
|
|
||||||
|
/* we force an alignment, because i haven't written unaligned forms of the assembly functions, yet.. */
|
||||||
|
typedef float float32x16_t __attribute__((vector_size (64), aligned(64)));
|
||||||
|
typedef int8_t int8x16_t __attribute__((vector_size (16), aligned(16)));
|
||||||
|
typedef uint8_t uint8x16_t __attribute__((vector_size (16), aligned(16)));
|
||||||
|
typedef int32_t int32x16_t __attribute__((vector_size (64), aligned(64)));
|
||||||
|
|
||||||
|
/* A forward declaration, to keep GCC happy. */
|
||||||
|
void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc);
|
||||||
|
|
||||||
|
/* clear a vector of 16 floats. */
|
||||||
|
inline static void GGML_F32x16_VEC_ZERO(float32x16_t *target)
|
||||||
|
{
|
||||||
|
uint8_t zero=0;
|
||||||
|
|
||||||
|
__asm__ __volatile__ (
|
||||||
|
"vbroadcastss\t%[Z]%{uint8%},\t%%zmm8\n\t" // use an upscaling operator to clear our register.
|
||||||
|
"vmovaps\t\t%%zmm8,\t%[RES]\n\t"
|
||||||
|
: [RES] "+m" (*target)
|
||||||
|
: [Z] "m" (zero)
|
||||||
|
: "zmm8", "memory");
|
||||||
|
}
|
||||||
|
|
||||||
|
// This function perform two multiplies of an I8x16 and an I8x16 vector into two I16x16 vectors. then does an FMA on the scaled result of multiplying the two I16x16 vectors, adding the result into an I32x16.
|
||||||
|
// it loops 8 times. well, actually four, with an unroll.
|
||||||
|
inline static void GGML_8X_2xI8x16_2xI8x16_MUL_2xI16x16_S_FMA_I32x16 (int8x16_t *src11, uint8x16_t *src21, const uint8_t *scale, int32x16_t *res)
|
||||||
|
{
|
||||||
|
uint8_t zero = 0;
|
||||||
|
|
||||||
|
__asm__ __volatile__ (
|
||||||
|
"vprefetche0\t(%[SRC11])\n\t"
|
||||||
|
"vprefetche0\t(%[SRC21])\n\t"
|
||||||
|
"vprefetche0\t(%[SCALE])\n\t"
|
||||||
|
"mov\t$0,\t%%ecx\n\t"
|
||||||
|
"mov\t%[SRC11],\t%%r12\n\t"
|
||||||
|
"mov\t%[SRC21],\t%%r8\n\t"
|
||||||
|
"mov\t%[SCALE],\t%%r9\n\t"
|
||||||
|
"vpbroadcastd\t%[Z]%{uint8%},\t%%zmm7\n\t" // empty our result.
|
||||||
|
|
||||||
|
"1:\n\t"
|
||||||
|
"inc\t%%ecx\n\t" // we are in our loop, increment our counter.
|
||||||
|
"cmp\t$4,\t%%ecx\n\t" // see if this is our last run-through.
|
||||||
|
"vmovdqa32\t\t(%%r12)%{sint8%},\t%%zmm0\n\t" // load the item we will be multiplying from. upscale it from int8 to int32.
|
||||||
|
"vmovdqa32\t\t(%%r8)%{uint8%},\t%%zmm1\n\t" // load the item we will be multiplying with. upscale it from int8 to int32.
|
||||||
|
"vpmulld\t%%zmm0,\t%%zmm1,\t%%zmm2\n\t" // perform our 64 bit multiply, low side.
|
||||||
|
"vpbroadcastd\t(%%r9)%{uint8%},\t%%zmm6\n\t" // load the item we will be multiplying by.
|
||||||
|
"vpmadd231d\t%%zmm2,\t%%zmm6,\t%%zmm7\n\t" // perform our multiply-add.
|
||||||
|
"vmovdqa32\t\t16(%%r12)%{sint8%},\t%%zmm3\n\t" // load the item we will be multiplying from. upscale it from int8 to int32.
|
||||||
|
"vmovdqa32\t\t16(%%r8)%{uint8%},\t%%zmm4\n\t" // load the item we will be multiplying with. upscale it from int8 to int32.
|
||||||
|
"vpmulld\t%%zmm3,\t%%zmm4,\t%%zmm5\n\t" // perform our 64 bit multiply, low side.
|
||||||
|
"vpmadd231d\t%%zmm5,\t%%zmm6,\t%%zmm7\n\t" // perform our multiply-add.
|
||||||
|
"vmovdqa32\t\t32(%%r12)%{sint8%},\t%%zmm8\n\t" // load the item we will be multiplying from. upscale it from int8 to int32.
|
||||||
|
"vmovdqa32\t\t32(%%r8)%{uint8%},\t%%zmm1\n\t" // load the item we will be multiplying with. upscale it from int8 to int32.
|
||||||
|
"vpmulld\t%%zmm8,\t%%zmm1,\t%%zmm2\n\t" // perform our 64 bit multiply, low side.
|
||||||
|
"vpbroadcastd\t1(%%r9)%{uint8%},\t%%zmm6\n\t" // load the item we will be multiplying by.
|
||||||
|
"vpmadd231d\t%%zmm2,\t%%zmm6,\t%%zmm7\n\t" // perform our multiply-add.
|
||||||
|
"vmovdqa32\t\t48(%%r12)%{sint8%},\t%%zmm3\n\t" // load the item we will be multiplying from. upscale it from int8 to int32.
|
||||||
|
"vmovdqa32\t\t48(%%r8)%{uint8%},\t%%zmm4\n\t" // load the item we will be multiplying with. upscale it from int8 to int32.
|
||||||
|
"vpmulld\t%%zmm3,\t%%zmm4,\t%%zmm5\n\t" // perform our 64 bit multiply, low side.
|
||||||
|
"vpmadd231d\t%%zmm5,\t%%zmm6,\t%%zmm7\n\t" // perform our multiply-add.
|
||||||
|
"je\t2f\n\t" // if this is the last time through our loop, jump to 2.
|
||||||
|
"vprefetche0\t64(%%r12)\n\t" // otherwise, prepare for another run-through.
|
||||||
|
"vprefetche0\t64(%%r8)\n\t"
|
||||||
|
"vprefetche2\t128(%%r12)\n\t"
|
||||||
|
"vprefetche2\t128(%%r8)\n\t"
|
||||||
|
"add\t$64,\t%%r12\n\t"
|
||||||
|
"add\t$64,\t%%r8\n\t"
|
||||||
|
"add\t$2,\t%%r9\n\t"
|
||||||
|
"jmp\t1b\n\t"
|
||||||
|
"2:\n\t"
|
||||||
|
"vmovdqa32\t\t%%zmm7,\t(%[RES])\n\t" // save the result.
|
||||||
|
: [RES] "+r" (res)
|
||||||
|
: [SRC11] "r" (src11),
|
||||||
|
[SRC21] "r" (src21),
|
||||||
|
[SCALE] "r" (scale),
|
||||||
|
[Z] "m" (zero)
|
||||||
|
: "zmm0", "zmm1", "zmm2", "zmm3", "zmm4", "zmm5", "zmm6", "zmm7", "zmm8", "ecx", "r8", "r9", "r12", "memory");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unpack 256 unsigned 5 bit values into an 8 bit vector.
|
||||||
|
inline static void GGML_5bit_Unpack (const uint8x16_t * q4, const uint8_t * q1, uint8x16_t * dst)
|
||||||
|
{
|
||||||
|
uint8_t lowmask = 0x0F;
|
||||||
|
uint32_t allmask=0xFFFFFFFF;
|
||||||
|
uint8_t m=1;
|
||||||
|
uint8_t bit5 = 0x10;
|
||||||
|
|
||||||
|
__asm__ __volatile__ (
|
||||||
|
"vprefetche0\t(%[SRC1])\n\t"
|
||||||
|
"vprefetche0\t(%[SRC4])\n\t"
|
||||||
|
"vprefetche1\t64(%[SRC4])\n\t"
|
||||||
|
"mov\t%[SRC4],\t%%r12\n\t" // load the address of the head of our 4-bit list.
|
||||||
|
"mov\t%[DST],\t%%r8\n\t" // load the address of the head of our destination list.
|
||||||
|
"mov\t$0,%%ecx\n\t" // initialize our counter.
|
||||||
|
"vmovdqa32\t(%[SRC1])%{uint8%},\t%%zmm6\n\t" // move 16 packed sets of single bits into the lower 8 bits of zmm6.
|
||||||
|
"vmovdqa32\t16(%[SRC1])%{uint8%},\t%%zmm7\n\t" // move the next 16 packed sets of single bits into the lower 8 bits of zmm7.
|
||||||
|
"vpbroadcastd\t%[MASK]%{uint8%},\t%%zmm2\n\t " // load our mask.
|
||||||
|
"vpbroadcastd\t%[BIT5]%{uint8},\t%%zmm9\n\t" // load the bit we want to add (conditionally).
|
||||||
|
"vpbroadcastd\t%[M]%{uint8%},\t%%zmm8\n\t" // select which bit we want to test for.
|
||||||
|
|
||||||
|
"1:\n\t"
|
||||||
|
"inc\t%%ecx\n\t" // we are in the loop. increment the counter.
|
||||||
|
|
||||||
|
"vptestmd\t%%zmm6,\t%%zmm8,\t%%k1\n\t" // perform our test.
|
||||||
|
"vptestmd\t%%zmm7,\t%%zmm8,\t%%k2\n\t" // perform our test.
|
||||||
|
"vmovdqa32\t\t(%%r12)%{uint8%},\t%%zmm0\n\t" // load our odd 4 bit sequences. note that it loads two 4 bit sequences into each zmm value.
|
||||||
|
"vpandd\t%%zmm0,\t%%zmm2,\t%%zmm4\n\t" // apply a mask, storing the low four bits of vector zmm0 into zmm4.
|
||||||
|
"vpaddd\t%%zmm4,%%zmm9,%%zmm4%{%%k1%}\n\t" // turn on bit 5 for all values that passed the prior test.
|
||||||
|
"vmovdqa32\t\t%%zmm4%{uint8%},\t(%%r8)\n\t" // save our result.
|
||||||
|
"vmovdqa32\t\t16(%%r12)%{uint8%},\t%%zmm1\n\t" // load our odd 4 bit sequences. note that it loads two 4 bit sequences into each zmm value.
|
||||||
|
"vpandd\t%%zmm1,\t%%zmm2,\t%%zmm5\n\t" // apply a mask, storing the next low four bits of vector zmm1 into zmm5.
|
||||||
|
"vpaddd\t%%zmm5,%%zmm9,%%zmm5%{%%k2%}\n\t" // turn on bit 5 for all values that passed the prior test.
|
||||||
|
"vmovdqa32\t\t%%zmm5%{uint8%},\t16(%%r8)\n\t" // save our result.
|
||||||
|
|
||||||
|
"add\t$32,\t%%r8\n\t"
|
||||||
|
"cmp\t$4,\t%%ecx\n\t"
|
||||||
|
"vpslld\t$1,\t%%zmm8,\t%%zmm8\n\t" // select which bit we want to test for.
|
||||||
|
|
||||||
|
"vptestmd\t%%zmm6,\t%%zmm8,\t%%k1\n\t" // perform our test.
|
||||||
|
"vptestmd\t%%zmm7,\t%%zmm8,\t%%k2\n\t" // perform our test.
|
||||||
|
"vpsrld\t$4,\t%%zmm0,\t%%zmm4\n\t" // load our even 4 bit sequence into zmm4.
|
||||||
|
"vpaddd\t%%zmm4,%%zmm9,%%zmm4%{%%k1%}\n\t" // turn on bit 5 for all values that passed the prior test.
|
||||||
|
"vmovdqa32\t\t%%zmm4%{uint8%},\t(%%r8)\n\t" // save our result.
|
||||||
|
"vpsrld\t$4,\t%%zmm1,\t%%zmm5\n\t" // load our even 4 bit sequence into zmm5.
|
||||||
|
"vpaddd\t%%zmm5,%%zmm9,%%zmm5%{%%k2%}\n\t" // turn on bit 5 for all values that passed the prior test.
|
||||||
|
"vmovdqa32\t\t%%zmm5%{uint8%},\t16(%%r8)\n\t" // save our result.
|
||||||
|
|
||||||
|
"je\t2f\n\t"
|
||||||
|
|
||||||
|
"vpslld\t$1,\t%%zmm8,\t%%zmm8\n\t" // select which bit we want to test for.
|
||||||
|
"add\t$32,\t%%r12\n\t"
|
||||||
|
"add\t$32,\t%%r8\n\t"
|
||||||
|
"jmp\t1b\n\t"
|
||||||
|
"2:"
|
||||||
|
: [DST] "+r" (dst)
|
||||||
|
: [SRC4] "r" (q4),
|
||||||
|
[SRC1] "r" (q1),
|
||||||
|
[MASK] "m" (lowmask),
|
||||||
|
[M] "m" (m),
|
||||||
|
[ALL] "m" (allmask),
|
||||||
|
[BIT5] "m" (bit5)
|
||||||
|
: "zmm0", "zmm1", "zmm2", "zmm4", "zmm5", "zmm6", "zmm7", "zmm8", "zmm9", "zmm10", "zmm11", "ecx", "k1", "k2", "r12", "r8", "memory"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// A function for getting the dot product of two vectors, one of 5 bit resolution, and one of 8.
|
||||||
|
// Used during inference, if your model prints "llama_model_loader: - type q5_K: XXX tensors", and XXX is not zero. :)
|
||||||
|
void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
|
||||||
|
|
||||||
|
/* interpret X and Y as vectors. */
|
||||||
|
const block_q5_K * restrict x = vx;
|
||||||
|
const block_q8_K * restrict y = vy;
|
||||||
|
|
||||||
|
/* the number of blocks we will process this in. */
|
||||||
|
const int nb = n / QK_K;
|
||||||
|
|
||||||
|
static const uint32_t kmask1 = 0x3f3f3f3f;
|
||||||
|
static const uint32_t kmask2 = 0x0f0f0f0f;
|
||||||
|
static const uint32_t kmask3 = 0x03030303;
|
||||||
|
|
||||||
|
uint32_t utmp[4];
|
||||||
|
|
||||||
|
const uint8_t * scales = (const uint8_t*)&utmp[0];
|
||||||
|
const uint8_t * mins = (const uint8_t*)&utmp[2];
|
||||||
|
|
||||||
|
float32x16_t sums;
|
||||||
|
|
||||||
|
// clear sums.
|
||||||
|
GGML_F32x16_VEC_ZERO(&sums);
|
||||||
|
|
||||||
|
float sumf = 0;
|
||||||
|
for (int i = 0; i < nb; ++i) {
|
||||||
|
int8x16_t q8copy [QK_K];
|
||||||
|
int32x16_t aux32;
|
||||||
|
uint8x16_t q4copyvec [QK_K/32];
|
||||||
|
uint8x16_t aux8 [QK_K/16];
|
||||||
|
|
||||||
|
// Fill in our 8 bit vector from y[]. required, because there is no good way to align members of y[], And I haven't mastered unaligned assembly yet...
|
||||||
|
memcpy (q8copy, y[i].qs, QK_K);
|
||||||
|
|
||||||
|
// Fill in our 4 bit vector from x[]. required, because there is no good way to align members of x[], And I haven't mastered unaligned assembly yet...
|
||||||
|
memcpy (q4copyvec, x[i].qs, QK_K/2);
|
||||||
|
|
||||||
|
// combine our 4 and 1 bit vector sets into an 8 bit value.
|
||||||
|
GGML_5bit_Unpack(q4copyvec, x[i].qh, aux8);
|
||||||
|
|
||||||
|
// extract scales and mins..
|
||||||
|
memcpy(utmp, x[i].scales, 12);
|
||||||
|
utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
|
||||||
|
const uint32_t uaux = utmp[1] & kmask1;
|
||||||
|
utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
|
||||||
|
utmp[2] = uaux;
|
||||||
|
utmp[0] &= kmask1;
|
||||||
|
|
||||||
|
a = (int8_t * restrict)aux8;
|
||||||
|
|
||||||
|
int sumi = 0;
|
||||||
|
|
||||||
|
GGML_I32x16_VEC_ZERO(&aux32);
|
||||||
|
|
||||||
|
// FIXME: while comparing FMA output to the original output, the original had an error. hunt it down.
|
||||||
|
GGML_8X_2xI8x16_2xI8x16_MUL_2xI16x16_S_FMA_I32x16(q8copy, aux8, scales, &aux32);
|
||||||
|
|
||||||
|
int sumi = 0;
|
||||||
|
for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
|
||||||
|
const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
|
||||||
|
for (int l = 0; l < 16; ++l) ((float *)&sums)[l] += d * ((int32_t *)&aux32)[l];
|
||||||
|
const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
|
||||||
|
sumf -= dmin * sumi;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int l = 0; l < 16; ++l) sumf += ((float *)&sums)[l];
|
||||||
|
*s = sumf;
|
||||||
|
}
|
14
ggml-phi-knc-dot_q5_K_q8_K.h
Normal file
14
ggml-phi-knc-dot_q5_K_q8_K.h
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "ggml.h"
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* A forward declaration, to keep GCC happy. */
|
||||||
|
void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
143
ggml-phi-knc.c
Normal file
143
ggml-phi-knc.c
Normal file
|
@ -0,0 +1,143 @@
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
// For size_t
|
||||||
|
#include <stdio.h>
|
||||||
|
|
||||||
|
// For memcpy.
|
||||||
|
#include <string.h>
|
||||||
|
|
||||||
|
// This SIMD unit can work with 32 float32s at once.
|
||||||
|
#define GGML_F32_STEP 32
|
||||||
|
// We can fit 16 of these float32s in a single vector register.
|
||||||
|
#define GGML_F32_EPR 16
|
||||||
|
|
||||||
|
// a single vector. 128*32=512
|
||||||
|
typedef float float32x16_t __attribute__((vector_size (128)));
|
||||||
|
#define GGML_F32x16 float32x16_t
|
||||||
|
|
||||||
|
// A forward declaration, to keep GCC happy...
|
||||||
|
void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc);
|
||||||
|
|
||||||
|
inline static void GGML_F32x16_VEC_ZERO(float32x16_t *target)
|
||||||
|
{
|
||||||
|
uint8_t zero[4] __attribute__((aligned(64))) = {0,0,0,0};
|
||||||
|
|
||||||
|
__asm__ __volatile__ (
|
||||||
|
"vbroadcastf32x4\t%[Z]%{uint8%},\t%%zmm8\n\t" // use an upscaling operator to clear our value.
|
||||||
|
"vmovnraps\t\t%%zmm8,\t%[RES]\n\t"
|
||||||
|
: [RES] "+m" (*target)
|
||||||
|
: [Z] "m" (zero)
|
||||||
|
: "zmm8");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multiply each item in mvec1 with the corresponding item in mvec2, adding the result to the corresponding item in sum. optionally clear the sum before starting.
|
||||||
|
inline static void GGML_F32x16_VEC_FMA(const float32x16_t *mvec1, const float32x16_t *mvec2, float32x16_t *sumvec, size_t iterations, int clear)
|
||||||
|
{
|
||||||
|
uint8_t zero[4] __attribute__((aligned(64))) = {0,0,0,0};
|
||||||
|
|
||||||
|
__asm__ __volatile__ (
|
||||||
|
"mov\t%[ITER],%%r8\n\t" // how many register sized chunks are we responsible for
|
||||||
|
"mov\t%[VEC1],%%r10\n\t" // where do we start work in mvec1?
|
||||||
|
"mov\t%[VEC2],%%r12\n\t" // where do we start work in mvec2?
|
||||||
|
"cmp\t$1,%[CLR]\n\t" // should we clear the sum before we start?
|
||||||
|
"jne\t4f\n\t"
|
||||||
|
"vbroadcastf32x4\t%[Z]%{uint8%},\t%%zmm0\n\t" // if so, use an upscaling operator to do it.
|
||||||
|
"vprefetchnta\t(%%r10)\n\t"
|
||||||
|
"vprefetchnta\t(%%r12)\n\t"
|
||||||
|
"vprefetch1\t128(%%r10)\n\t"
|
||||||
|
"vprefetch1\t128(%%r12)\n\t"
|
||||||
|
"vprefetch1\t256(%%r10)\n\t"
|
||||||
|
"vprefetch1\t256(%%r12)\n\t"
|
||||||
|
"vprefetch1\t384(%%r10)\n\t"
|
||||||
|
"vprefetch1\t384(%%r12)\n\t"
|
||||||
|
"vprefetch1\t512(%%r10)\n\t"
|
||||||
|
"vprefetch1\t512(%%r12)\n\t"
|
||||||
|
"jmp\t1f\n\t"
|
||||||
|
"4:\n\t"
|
||||||
|
"vprefetch0\t(%[RES])\n\t"
|
||||||
|
"vmovaps\t\t(%[RES]),\t%%zmm0\n\t" // otherwise, load our inital state from sum..
|
||||||
|
"vprefetchnta\t(%%r10)\n\t"
|
||||||
|
"vprefetchnta\t(%%r12)\n\t"
|
||||||
|
"1:\n\t"
|
||||||
|
"cmp\t$3,\t%%r8\n\t" // Compare iterations to three.
|
||||||
|
"jnae\t6f\n\t" // If there are not three iterations left, jump to label 6.
|
||||||
|
"vmovaps\t\t(%%r10),\t%%zmm1\n\t" // Load two vectors.
|
||||||
|
"vmovaps\t\t(%%r12),\t%%zmm2\n\t"
|
||||||
|
"sub\t$3,\t%%r8\n\t" // Decrement iterations
|
||||||
|
"vprefetchnta\t192(%%r10)\n\t" // prefetch the next float32x16_t block (192 bytes ahead)
|
||||||
|
"vprefetchnta\t192(%%r12)\n\t"
|
||||||
|
"vmovaps\t\t64(%%r10),\t%%zmm3\n\t" // Load two vectors.
|
||||||
|
"vmovaps\t\t64(%%r12),\t%%zmm4\n\t"
|
||||||
|
"vprefetch1\t320(%%r10)\n\t" // prefetch the block after the block after the next float32x16_t block (320 bytes ahead)
|
||||||
|
"vprefetch1\t320(%%r12)\n\t"
|
||||||
|
"vmovaps\t\t128(%%r10),\t%%zmm5\n\t" // Load two vectors.
|
||||||
|
"vmovaps\t\t128(%%r12),\t%%zmm6\n\t"
|
||||||
|
"vprefetch1\t576(%%r10)\n\t"
|
||||||
|
"vprefetch1\t576(%%r12)\n\t"
|
||||||
|
"vprefetch1\t704(%%r10)\n\t"
|
||||||
|
"vprefetch1\t704(%%r12)\n\t"
|
||||||
|
"add\t$192,\t%%r10\n\t" // Move to the next float32x16_t block (192 bytes ahead)
|
||||||
|
"add\t$192,\t%%r12\n\t"
|
||||||
|
"vfmadd231ps\t%%zmm1,\t%%zmm2,\t%%zmm0\n\t" // Perform a fused multiply add
|
||||||
|
"vfmadd231ps\t%%zmm3,\t%%zmm4,\t%%zmm0\n\t" // Perform a fused multiply add
|
||||||
|
"vfmadd231ps\t%%zmm5,\t%%zmm6,\t%%zmm0\n\t" // Perform a fused multiply add
|
||||||
|
"jmp\t1b\n\t" // Jump back to the start of the loop
|
||||||
|
"6:\n\t" // we know we are near the tail. handle 2, 1, and 0 cases.
|
||||||
|
"cmp\t$0,\t%%r8\n\t" // Compare iterations to zero
|
||||||
|
"je\t2f\n\t" // Jump to label 2 if zero (end of loop)
|
||||||
|
"cmp\t$1,\t%%r8\n\t" // Compare iterations to one
|
||||||
|
"vmovaps\t\t(%%r10),\t%%zmm1\n\t" // Load two vectors.
|
||||||
|
"vmovaps\t\t(%%r12),\t%%zmm2\n\t"
|
||||||
|
"vfmadd231ps\t%%zmm1,\t%%zmm2,\t%%zmm0\n\t" // Perform a fused multiply add
|
||||||
|
"je\t2f\n\t" // Jump to label 3 if one (end of loop)
|
||||||
|
// No compare. we must be two.
|
||||||
|
"vmovaps\t\t64(%%r10),\t%%zmm3\n\t" // Load two vectors.
|
||||||
|
"vmovaps\t\t64(%%r12),\t%%zmm4\n\t"
|
||||||
|
"vfmadd231ps\t%%zmm3,\t%%zmm4,\t%%zmm0\n\t" // Perform a fused multiply add
|
||||||
|
"2:\n\t" // Label for loop end
|
||||||
|
"vmovnraps\t\t%%zmm0,\t(%[RES])\n\t" // save our results.
|
||||||
|
: [RES] "+r" (sumvec)
|
||||||
|
: [ITER] "r" (iterations),
|
||||||
|
[VEC1] "r" (mvec1),
|
||||||
|
[VEC2] "r" (mvec2),
|
||||||
|
[CLR] "r" (clear),
|
||||||
|
[Z] "m" (zero)
|
||||||
|
: "zmm0", "zmm1", "zmm2", "zmm3", "zmm4", "zmm5", "zmm6", "cc", "memory", "r8", "r10", "r12");
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: x and y inputs must be __attribute__((aligned(64)));
|
||||||
|
void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc)
|
||||||
|
{
|
||||||
|
// our sum.
|
||||||
|
float32x16_t sum __attribute__((aligned(64)));
|
||||||
|
|
||||||
|
// the number of vector-sized steps we will need to do.
|
||||||
|
const uint32_t np = (n & ~(GGML_F32_EPR - 1));
|
||||||
|
|
||||||
|
GGML_F32x16_VEC_FMA((const float32x16_t *)x, (const float32x16_t *)y, &sum, np/GGML_F32_EPR, 1);
|
||||||
|
|
||||||
|
// FIXME: replace this with a final round using masked vectors.
|
||||||
|
if ( n - np != 0 )
|
||||||
|
{
|
||||||
|
// add the leftovers, that could not be handled by the vector loop.
|
||||||
|
// our extended last part of x.
|
||||||
|
float32x16_t v1 __attribute__((aligned(64)));
|
||||||
|
GGML_F32x16_VEC_ZERO(&v1);
|
||||||
|
// our extended last part of y.
|
||||||
|
float32x16_t v2 __attribute__((aligned(64)));
|
||||||
|
GGML_F32x16_VEC_ZERO(&v2);
|
||||||
|
|
||||||
|
memcpy(&v1, &x[np], (n - np)*sizeof(float));
|
||||||
|
memcpy(&v2, &y[np], (n - np)*sizeof(float));
|
||||||
|
|
||||||
|
GGML_F32x16_VEC_FMA(&v1,
|
||||||
|
&v2,
|
||||||
|
&sum, 1, 0);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// reduce sum, and store it in s.
|
||||||
|
for (uint32_t i=0; i <GGML_F32_EPR; ++i)
|
||||||
|
*s+=((float *)&sum)[i];
|
||||||
|
|
||||||
|
}
|
14
ggml-phi-knc.h
Normal file
14
ggml-phi-knc.h
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "ggml.h"
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* A forward declaration, to keep GCC happy. */
|
||||||
|
void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
|
@ -4,6 +4,7 @@
|
||||||
#include "ggml-quants.h"
|
#include "ggml-quants.h"
|
||||||
#include "ggml-impl.h"
|
#include "ggml-impl.h"
|
||||||
|
|
||||||
|
// FIXME: why do we import this twice?
|
||||||
#define GGML_COMMON_IMPL_C
|
#define GGML_COMMON_IMPL_C
|
||||||
#include "ggml-common.h"
|
#include "ggml-common.h"
|
||||||
|
|
||||||
|
@ -49,6 +50,11 @@
|
||||||
#include <riscv_vector.h>
|
#include <riscv_vector.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
// hand assembled replacement functions are cool.
|
||||||
|
#if defined(__k1om__)
|
||||||
|
#include <ggml-phi-knc-dot_q5_K_q8_K.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
#undef MIN
|
#undef MIN
|
||||||
#undef MAX
|
#undef MAX
|
||||||
|
|
||||||
|
@ -7094,6 +7100,9 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * r
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if defined(__k1om__)
|
||||||
|
/* We get this from elsewhere. */
|
||||||
|
#else
|
||||||
#if QK_K == 256
|
#if QK_K == 256
|
||||||
void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
|
void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
|
||||||
assert(n % QK_K == 0);
|
assert(n % QK_K == 0);
|
||||||
|
@ -7518,7 +7527,7 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * r
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else /* QK_K != 256 */
|
||||||
|
|
||||||
void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
|
void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
|
||||||
assert(n % QK_K == 0);
|
assert(n % QK_K == 0);
|
||||||
|
@ -7787,8 +7796,9 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * r
|
||||||
*s = sumf;
|
*s = sumf;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
#endif
|
#endif /* end QK_K != 256 */
|
||||||
|
|
||||||
|
#endif /* defined(__k1om__) */
|
||||||
|
|
||||||
#if QK_K == 256
|
#if QK_K == 256
|
||||||
void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
|
void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) {
|
||||||
|
|
13
ggml.c
13
ggml.c
|
@ -41,6 +41,11 @@
|
||||||
#pragma warning(disable: 4996)
|
#pragma warning(disable: 4996)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
// hand assembled replacement functions are cool.
|
||||||
|
#if defined(__k1om__)
|
||||||
|
#include <ggml-phi-knc.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(_WIN32)
|
#if defined(_WIN32)
|
||||||
|
|
||||||
#include <windows.h>
|
#include <windows.h>
|
||||||
|
@ -448,7 +453,11 @@ int64_t ggml_cycles_per_ms(void) {
|
||||||
|
|
||||||
static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
|
static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
|
||||||
|
|
||||||
|
#if defined(__k1om__)
|
||||||
|
// We get this function from elsewhere.
|
||||||
|
#else
|
||||||
static void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc);
|
static void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc);
|
||||||
|
#endif
|
||||||
static void ggml_vec_dot_f16(int n, float * restrict s, size_t bs, ggml_fp16_t * restrict x, size_t bx, ggml_fp16_t * restrict y, size_t by, int nrc);
|
static void ggml_vec_dot_f16(int n, float * restrict s, size_t bs, ggml_fp16_t * restrict x, size_t bx, ggml_fp16_t * restrict y, size_t by, int nrc);
|
||||||
|
|
||||||
static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
|
static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
|
||||||
|
@ -1330,6 +1339,9 @@ inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x)
|
||||||
inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; }
|
inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; }
|
||||||
inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; }
|
inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; }
|
||||||
|
|
||||||
|
#if defined(__k1om__)
|
||||||
|
// we get this function from elsewhere.
|
||||||
|
#else
|
||||||
static void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc) {
|
static void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc) {
|
||||||
assert(nrc == 1);
|
assert(nrc == 1);
|
||||||
UNUSED(nrc);
|
UNUSED(nrc);
|
||||||
|
@ -1372,6 +1384,7 @@ static void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float *
|
||||||
|
|
||||||
*s = sumf;
|
*s = sumf;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static void ggml_vec_dot_f16(int n, float * restrict s, size_t bs, ggml_fp16_t * restrict x, size_t bx, ggml_fp16_t * restrict y, size_t by, int nrc) {
|
static void ggml_vec_dot_f16(int n, float * restrict s, size_t bs, ggml_fp16_t * restrict x, size_t bx, ggml_fp16_t * restrict y, size_t by, int nrc) {
|
||||||
assert(nrc == 1);
|
assert(nrc == 1);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue