formatting changes.
This commit is contained in:
parent
96fdd214c8
commit
6f67ea886f
1 changed files with 36 additions and 39 deletions
|
@ -6,14 +6,11 @@
|
||||||
// For memcpy.
|
// For memcpy.
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
|
||||||
// This SIMD unit can work with 32 float32s at once.
|
|
||||||
#define GGML_F32_STEP 32
|
|
||||||
// We can fit 16 of these float32s in a single vector register.
|
// We can fit 16 of these float32s in a single vector register.
|
||||||
#define GGML_F32_EPR 16
|
#define GGML_F32_EPR 16
|
||||||
|
|
||||||
// a single vector. 128*32=512
|
// A vector of 16 floats.
|
||||||
typedef float float32x16_t __attribute__((vector_size (128)));
|
typedef float float32x16_t __attribute__((vector_size (64), aligned (64)));
|
||||||
#define GGML_F32x16 float32x16_t
|
|
||||||
|
|
||||||
// A forward declaration, to keep GCC happy...
|
// A forward declaration, to keep GCC happy...
|
||||||
void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc);
|
void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc);
|
||||||
|
@ -23,7 +20,7 @@ inline static void GGML_F32x16_VEC_ZERO(float32x16_t *target)
|
||||||
uint8_t zero[4] __attribute__((aligned(64))) = {0,0,0,0};
|
uint8_t zero[4] __attribute__((aligned(64))) = {0,0,0,0};
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"vbroadcastf32x4\t%[Z]%{uint8%},\t%%zmm8\n\t" // use an upscaling operator to clear our value.
|
"vbroadcastf32x4\t%[Z]%{uint8%},\t%%zmm8\n\t" // use an upscaling operator to clear our value.
|
||||||
"vmovnraps\t\t%%zmm8,\t%[RES]\n\t"
|
"vmovnraps\t\t%%zmm8,\t%[RES]\n\t"
|
||||||
: [RES] "+m" (*target)
|
: [RES] "+m" (*target)
|
||||||
: [Z] "m" (zero)
|
: [Z] "m" (zero)
|
||||||
|
@ -36,10 +33,10 @@ inline static void GGML_F32x16_VEC_FMA(const float32x16_t *mvec1, const float32x
|
||||||
uint8_t zero[4] __attribute__((aligned(64))) = {0,0,0,0};
|
uint8_t zero[4] __attribute__((aligned(64))) = {0,0,0,0};
|
||||||
|
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"mov\t%[ITER],%%r8\n\t" // how many register sized chunks are we responsible for
|
"mov\t%[ITER],%%r8\n\t" // how many register sized chunks are we responsible for
|
||||||
"mov\t%[VEC1],%%r10\n\t" // where do we start work in mvec1?
|
"mov\t%[VEC1],%%r10\n\t" // where do we start work in mvec1?
|
||||||
"mov\t%[VEC2],%%r12\n\t" // where do we start work in mvec2?
|
"mov\t%[VEC2],%%r12\n\t" // where do we start work in mvec2?
|
||||||
"cmp\t$1,%[CLR]\n\t" // should we clear the sum before we start?
|
"cmp\t$1,%[CLR]\n\t" // should we clear the sum before we start?
|
||||||
"jne\t4f\n\t"
|
"jne\t4f\n\t"
|
||||||
"vbroadcastf32x4\t%[Z]%{uint8%},\t%%zmm0\n\t" // if so, use an upscaling operator to do it.
|
"vbroadcastf32x4\t%[Z]%{uint8%},\t%%zmm0\n\t" // if so, use an upscaling operator to do it.
|
||||||
"vprefetchnta\t(%%r10)\n\t"
|
"vprefetchnta\t(%%r10)\n\t"
|
||||||
|
@ -55,47 +52,47 @@ inline static void GGML_F32x16_VEC_FMA(const float32x16_t *mvec1, const float32x
|
||||||
"jmp\t1f\n\t"
|
"jmp\t1f\n\t"
|
||||||
"4:\n\t"
|
"4:\n\t"
|
||||||
"vprefetch0\t(%[RES])\n\t"
|
"vprefetch0\t(%[RES])\n\t"
|
||||||
"vmovaps\t\t(%[RES]),\t%%zmm0\n\t" // otherwise, load our inital state from sum..
|
"vmovaps\t\t(%[RES]),\t%%zmm0\n\t" // otherwise, load our inital state from sum..
|
||||||
"vprefetchnta\t(%%r10)\n\t"
|
"vprefetchnta\t(%%r10)\n\t"
|
||||||
"vprefetchnta\t(%%r12)\n\t"
|
"vprefetchnta\t(%%r12)\n\t"
|
||||||
"1:\n\t"
|
"1:\n\t"
|
||||||
"cmp\t$3,\t%%r8\n\t" // Compare iterations to three.
|
"cmp\t$3,\t%%r8\n\t" // Compare iterations to three.
|
||||||
"jnae\t6f\n\t" // If there are not three iterations left, jump to label 6.
|
"jnae\t6f\n\t" // If there are not three iterations left, jump to label 6.
|
||||||
"vmovaps\t\t(%%r10),\t%%zmm1\n\t" // Load two vectors.
|
"vmovaps\t\t(%%r10),\t%%zmm1\n\t" // Load two vectors.
|
||||||
"vmovaps\t\t(%%r12),\t%%zmm2\n\t"
|
"vmovaps\t\t(%%r12),\t%%zmm2\n\t"
|
||||||
"sub\t$3,\t%%r8\n\t" // Decrement iterations
|
"sub\t$3,\t%%r8\n\t" // Decrement iterations
|
||||||
"vprefetchnta\t192(%%r10)\n\t" // prefetch the next float32x16_t block (192 bytes ahead)
|
"vprefetchnta\t192(%%r10)\n\t" // prefetch the next float32x16_t block (192 bytes ahead)
|
||||||
"vprefetchnta\t192(%%r12)\n\t"
|
"vprefetchnta\t192(%%r12)\n\t"
|
||||||
"vmovaps\t\t64(%%r10),\t%%zmm3\n\t" // Load two vectors.
|
"vmovaps\t\t64(%%r10),\t%%zmm3\n\t" // Load two vectors.
|
||||||
"vmovaps\t\t64(%%r12),\t%%zmm4\n\t"
|
"vmovaps\t\t64(%%r12),\t%%zmm4\n\t"
|
||||||
"vprefetch1\t320(%%r10)\n\t" // prefetch the block after the block after the next float32x16_t block (320 bytes ahead)
|
"vprefetch1\t320(%%r10)\n\t" // prefetch the block after the block after the next float32x16_t block (320 bytes ahead)
|
||||||
"vprefetch1\t320(%%r12)\n\t"
|
"vprefetch1\t320(%%r12)\n\t"
|
||||||
"vmovaps\t\t128(%%r10),\t%%zmm5\n\t" // Load two vectors.
|
"vmovaps\t\t128(%%r10),\t%%zmm5\n\t" // Load two vectors.
|
||||||
"vmovaps\t\t128(%%r12),\t%%zmm6\n\t"
|
"vmovaps\t\t128(%%r12),\t%%zmm6\n\t"
|
||||||
"vprefetch1\t576(%%r10)\n\t"
|
"vprefetch1\t576(%%r10)\n\t"
|
||||||
"vprefetch1\t576(%%r12)\n\t"
|
"vprefetch1\t576(%%r12)\n\t"
|
||||||
"vprefetch1\t704(%%r10)\n\t"
|
"vprefetch1\t704(%%r10)\n\t"
|
||||||
"vprefetch1\t704(%%r12)\n\t"
|
"vprefetch1\t704(%%r12)\n\t"
|
||||||
"add\t$192,\t%%r10\n\t" // Move to the next float32x16_t block (192 bytes ahead)
|
"add\t$192,\t%%r10\n\t" // Move to the next float32x16_t block (192 bytes ahead)
|
||||||
"add\t$192,\t%%r12\n\t"
|
"add\t$192,\t%%r12\n\t"
|
||||||
"vfmadd231ps\t%%zmm1,\t%%zmm2,\t%%zmm0\n\t" // Perform a fused multiply add
|
"vfmadd231ps\t%%zmm1,\t%%zmm2,\t%%zmm0\n\t" // Perform a fused multiply add
|
||||||
"vfmadd231ps\t%%zmm3,\t%%zmm4,\t%%zmm0\n\t" // Perform a fused multiply add
|
"vfmadd231ps\t%%zmm3,\t%%zmm4,\t%%zmm0\n\t" // Perform a fused multiply add
|
||||||
"vfmadd231ps\t%%zmm5,\t%%zmm6,\t%%zmm0\n\t" // Perform a fused multiply add
|
"vfmadd231ps\t%%zmm5,\t%%zmm6,\t%%zmm0\n\t" // Perform a fused multiply add
|
||||||
"jmp\t1b\n\t" // Jump back to the start of the loop
|
"jmp\t1b\n\t" // Jump back to the start of the loop
|
||||||
"6:\n\t" // we know we are near the tail. handle 2, 1, and 0 cases.
|
"6:\n\t" // we know we are near the tail. handle 2, 1, and 0 cases.
|
||||||
"cmp\t$0,\t%%r8\n\t" // Compare iterations to zero
|
"cmp\t$0,\t%%r8\n\t" // Compare iterations to zero
|
||||||
"je\t2f\n\t" // Jump to label 2 if zero (end of loop)
|
"je\t2f\n\t" // Jump to label 2 if zero (end of loop)
|
||||||
"cmp\t$1,\t%%r8\n\t" // Compare iterations to one
|
"cmp\t$1,\t%%r8\n\t" // Compare iterations to one
|
||||||
"vmovaps\t\t(%%r10),\t%%zmm1\n\t" // Load two vectors.
|
"vmovaps\t\t(%%r10),\t%%zmm1\n\t" // Load two vectors.
|
||||||
"vmovaps\t\t(%%r12),\t%%zmm2\n\t"
|
"vmovaps\t\t(%%r12),\t%%zmm2\n\t"
|
||||||
"vfmadd231ps\t%%zmm1,\t%%zmm2,\t%%zmm0\n\t" // Perform a fused multiply add
|
"vfmadd231ps\t%%zmm1,\t%%zmm2,\t%%zmm0\n\t" // Perform a fused multiply add
|
||||||
"je\t2f\n\t" // Jump to label 3 if one (end of loop)
|
"je\t2f\n\t" // Jump to label 3 if one (end of loop)
|
||||||
// No compare. we must be two.
|
// No compare. we must be two.
|
||||||
"vmovaps\t\t64(%%r10),\t%%zmm3\n\t" // Load two vectors.
|
"vmovaps\t\t64(%%r10),\t%%zmm3\n\t" // Load two vectors.
|
||||||
"vmovaps\t\t64(%%r12),\t%%zmm4\n\t"
|
"vmovaps\t\t64(%%r12),\t%%zmm4\n\t"
|
||||||
"vfmadd231ps\t%%zmm3,\t%%zmm4,\t%%zmm0\n\t" // Perform a fused multiply add
|
"vfmadd231ps\t%%zmm3,\t%%zmm4,\t%%zmm0\n\t" // Perform a fused multiply add
|
||||||
"2:\n\t" // Label for loop end
|
"2:\n\t" // Label for loop end
|
||||||
"vmovnraps\t\t%%zmm0,\t(%[RES])\n\t" // save our results.
|
"vmovnraps\t\t%%zmm0,\t(%[RES])\n\t" // save our results.
|
||||||
: [RES] "+r" (sumvec)
|
: [RES] "+r" (sumvec)
|
||||||
: [ITER] "r" (iterations),
|
: [ITER] "r" (iterations),
|
||||||
[VEC1] "r" (mvec1),
|
[VEC1] "r" (mvec1),
|
||||||
|
@ -109,7 +106,7 @@ inline static void GGML_F32x16_VEC_FMA(const float32x16_t *mvec1, const float32x
|
||||||
void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc)
|
void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restrict x, size_t bx, const float * restrict y, size_t by, int nrc)
|
||||||
{
|
{
|
||||||
// our sum.
|
// our sum.
|
||||||
float32x16_t sum __attribute__((aligned(64)));
|
float32x16_t sum;
|
||||||
|
|
||||||
// the number of vector-sized steps we will need to do.
|
// the number of vector-sized steps we will need to do.
|
||||||
const uint32_t np = (n & ~(GGML_F32_EPR - 1));
|
const uint32_t np = (n & ~(GGML_F32_EPR - 1));
|
||||||
|
@ -121,10 +118,10 @@ void ggml_vec_dot_f32(int n, float * restrict s, size_t bs, const float * restri
|
||||||
{
|
{
|
||||||
// add the leftovers, that could not be handled by the vector loop.
|
// add the leftovers, that could not be handled by the vector loop.
|
||||||
// our extended last part of x.
|
// our extended last part of x.
|
||||||
float32x16_t v1 __attribute__((aligned(64)));
|
float32x16_t v1;
|
||||||
GGML_F32x16_VEC_ZERO(&v1);
|
GGML_F32x16_VEC_ZERO(&v1);
|
||||||
// our extended last part of y.
|
// our extended last part of y.
|
||||||
float32x16_t v2 __attribute__((aligned(64)));
|
float32x16_t v2;
|
||||||
GGML_F32x16_VEC_ZERO(&v2);
|
GGML_F32x16_VEC_ZERO(&v2);
|
||||||
|
|
||||||
memcpy(&v1, &x[np], (n - np)*sizeof(float));
|
memcpy(&v1, &x[np], (n - np)*sizeof(float));
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue