clang format

This commit is contained in:
lihan 2024-11-28 17:16:08 +08:00
parent 6a6c954ddb
commit 1e645678e7
4 changed files with 190 additions and 223 deletions

View file

@ -1,14 +1,10 @@
#include "ssm_conv.cuh"
template <int block_size>
static __global__ void ssm_conv_f32(const float *__restrict__ src0,
const float *__restrict__ src1,
const int src0_nb0, const int src0_nb1,
const int src0_nb2, const int src1_nb1,
float *__restrict__ dst, const int dst_nb0,
const int dst_nb1, const int dst_nb2,
const int nc, const int ncs, const int nr,
const int n_t, const int n_s) {
static __global__ void ssm_conv_f32(const float * __restrict__ src0, const float * __restrict__ src1,
const int src0_nb0, const int src0_nb1, const int src0_nb2, const int src1_nb1,
float * __restrict__ dst, const int dst_nb0, const int dst_nb1, const int dst_nb2,
const int nc, const int ncs, const int nr, const int n_t, const int n_s) {
const int tid = blockIdx.y;
const int i3 = blockIdx.x;
const int i2 = threadIdx.x;
@ -26,13 +22,10 @@ static __global__ void ssm_conv_f32(const float *__restrict__ src0,
// {d_conv - 1 + n_t, d_inner, n_seqs}
// sliding window
const float *s =
(const float *)((const char *)src0 + ir0 * src0_nb1 + i2 * src0_nb0 +
const float * s = (const float *) ((const char *) src0 + ir0 * src0_nb1 + i2 * src0_nb0 +
i3 * src0_nb2); // {d_conv, d_inner, n_s}
const float *c = (const float *)((const char *)src1 +
ir0 * src1_nb1); // {d_conv, d_inner}
float *x = (float *)((char *)dst + ir0 * dst_nb0 + i2 * dst_nb1 +
i3 * dst_nb2); // {d_inner, n_t, n_s}
const float * c = (const float *) ((const char *) src1 + ir0 * src1_nb1); // {d_conv, d_inner}
float * x = (float *) ((char *) dst + ir0 * dst_nb0 + i2 * dst_nb1 + i3 * dst_nb2); // {d_inner, n_t, n_s}
// TODO: transpose the output for smaller strides for big batches?
// d_inner
@ -50,20 +43,16 @@ static __global__ void ssm_conv_f32(const float *__restrict__ src0,
}
}
static void ssm_conv_f32_cuda(const float *src0, const float *src1,
const int src0_nb0, const int src0_nb1,
const int src0_nb2, const int src1_nb1,
float *dst, const int dst_nb0, const int dst_nb1,
const int dst_nb2, const int nc, const int ncs,
const int nr, const int n_t, const int n_s,
cudaStream_t stream) {
static void ssm_conv_f32_cuda(const float * src0, const float * src1, const int src0_nb0, const int src0_nb1,
const int src0_nb2, const int src1_nb1, float * dst, const int dst_nb0, const int dst_nb1,
const int dst_nb2, const int nc, const int ncs, const int nr, const int n_t,
const int n_s, cudaStream_t stream) {
const dim3 block_dims(n_t, 1, 1);
// const int nblocks = n_s; // TODO
const dim3 grid_dims(n_s, WARP_SIZE, 1);
ssm_conv_f32<WARP_SIZE><<<grid_dims, block_dims, 0, stream>>>(
src0, src1, src0_nb0, src0_nb1, src0_nb2, src1_nb1, dst, dst_nb0, dst_nb1,
dst_nb2, nc, ncs, nr, n_t, n_s);
src0, src1, src0_nb0, src0_nb1, src0_nb2, src1_nb1, dst, dst_nb0, dst_nb1, dst_nb2, nc, ncs, nr, n_t, n_s);
}
void ggml_cuda_op_ssm_conv(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
@ -88,7 +77,6 @@ void ggml_cuda_op_ssm_conv(ggml_backend_cuda_context &ctx, ggml_tensor *dst) {
GGML_ASSERT(src0->type == GGML_TYPE_F32);
GGML_ASSERT(dst->type == GGML_TYPE_F32);
ssm_conv_f32_cuda(src0_d, src1_d, src0->nb[0], src0->nb[1], src0->nb[2],
src1->nb[1], dst_d, dst->nb[0], dst->nb[1], dst->nb[2], nc,
ncs, nr, n_t, n_s, stream);
ssm_conv_f32_cuda(src0_d, src1_d, src0->nb[0], src0->nb[1], src0->nb[2], src1->nb[1], dst_d, dst->nb[0], dst->nb[1],
dst->nb[2], nc, ncs, nr, n_t, n_s, stream);
}

View file

@ -7,16 +7,12 @@
template <size_t splitD, size_t N>
__global__ void __launch_bounds__(splitD, 2)
ssm_scan_f32(const float *__restrict__ src0, const float *__restrict__ src1,
const float *__restrict__ src2, const float *__restrict__ src3,
const float *__restrict__ src4, const float *__restrict__ src5,
const int src0_nb1, const int src0_nb2, const int src1_nb0,
const int src1_nb1, const int src1_nb2, const int src1_nb3,
const int src2_nb0, const int src2_nb1, const int src2_nb2,
const int src3_nb1, const int src4_nb1, const int src4_nb2,
const int src5_nb1, const int src5_nb2,
float *__restrict__ dst, const int D, const int L,
const int B) {
ssm_scan_f32(const float * __restrict__ src0, const float * __restrict__ src1, const float * __restrict__ src2,
const float * __restrict__ src3, const float * __restrict__ src4, const float * __restrict__ src5,
const int src0_nb1, const int src0_nb2, const int src1_nb0, const int src1_nb1, const int src1_nb2,
const int src1_nb3, const int src2_nb0, const int src2_nb1, const int src2_nb2, const int src3_nb1,
const int src4_nb1, const int src4_nb2, const int src5_nb1, const int src5_nb2,
float * __restrict__ dst, const int D, const int L, const int B) {
const int bidx = blockIdx.x; // split along B
const int bidy = blockIdx.y; // split along D
const int tid = threadIdx.x;
@ -29,20 +25,14 @@ __global__ void __launch_bounds__(splitD, 2)
float * smem_A = smem;
float * smem_s0 = smem_A + splitD * stride_sA;
const float *s0_block = (const float *)((char *)src0 + bidx * src0_nb2 +
bidy * splitD * src0_nb1);
const float *x_block = (const float *)((char *)src1 + (bidx * src1_nb2) +
bidy * splitD * sizeof(float));
const float *dt_block = (const float *)((char *)src2 + (bidx * src2_nb2) +
bidy * splitD * sizeof(float));
const float *A_block =
(const float *)((char *)src3 + bidy * splitD * src3_nb1);
const float * s0_block = (const float *) ((char *) src0 + bidx * src0_nb2 + bidy * splitD * src0_nb1);
const float * x_block = (const float *) ((char *) src1 + (bidx * src1_nb2) + bidy * splitD * sizeof(float));
const float * dt_block = (const float *) ((char *) src2 + (bidx * src2_nb2) + bidy * splitD * sizeof(float));
const float * A_block = (const float *) ((char *) src3 + bidy * splitD * src3_nb1);
const float * B_block = (const float *) ((char *) src4 + (bidx * src4_nb2));
const float * C_block = (const float *) ((char *) src5 + (bidx * src5_nb2));
float *y_block = (float *)((char *)dst + (bidx * src1_nb2) +
bidy * splitD * sizeof(float));
float *s_block = (float *)((char *)dst + src1_nb3 + bidx * src0_nb2 +
bidy * splitD * src0_nb1);
float * y_block = (float *) ((char *) dst + (bidx * src1_nb2) + bidy * splitD * sizeof(float));
float * s_block = (float *) ((char *) dst + src1_nb3 + bidx * src0_nb2 + bidy * splitD * src0_nb1);
const int stride_s0 = src0_nb1 / sizeof(float);
const int stride_x = src1_nb1 / sizeof(float);
@ -61,14 +51,12 @@ __global__ void __launch_bounds__(splitD, 2)
// todo: bank conflict
// I am always confused with how to use the swizzling method to solve
// bank conflit. Hoping somebody can tell me.
smem_A[(wid * warpSize + i) * stride_sA + wtid +
((wtid / 16) > 0 ? 1 : 0)] = value;
smem_A[(wid * warpSize + i) * stride_sA + wtid + ((wtid / 16) > 0 ? 1 : 0)] = value;
}
#pragma unroll
for (int i = 0; i < splitD / 4; i += 2) {
float value = s0_block[(wid * warpSize + i) * stride_s0 + wtid];
smem_s0[(wid * warpSize + i) * stride_ss0 + wtid +
((wtid / 16) > 0 ? 1 : 0)] = value;
smem_s0[(wid * warpSize + i) * stride_ss0 + wtid + ((wtid / 16) > 0 ? 1 : 0)] = value;
}
}
@ -84,8 +72,7 @@ __global__ void __launch_bounds__(splitD, 2)
#pragma unroll
for (int j = 0; j < N; j++) {
float state = (smem_s0[(wid * warpSize + wtid) * stride_ss0 + j] *
expf(dt_soft_plus *
smem_A[(wid * warpSize + wtid) * stride_sA + j])) +
expf(dt_soft_plus * smem_A[(wid * warpSize + wtid) * stride_sA + j])) +
(B_block[i * stride_B + j] * x_dt);
sumf += state * C_block[i * stride_C + j];
if (i == L - 1) {
@ -99,15 +86,12 @@ __global__ void __launch_bounds__(splitD, 2)
}
}
static void ssm_scan_f32_cuda(
const float *src0, const float *src1, const float *src2, const float *src3,
const float *src4, const float *src5, const int src0_nb1,
const int src0_nb2, const int src1_nb0, const int src1_nb1,
const int src1_nb2, const int src1_nb3, const int src2_nb0,
const int src2_nb1, const int src2_nb2, const int src3_nb1,
const int src4_nb1, const int src4_nb2, const int src5_nb1,
const int src5_nb2, float *dst, const int N, const int D, const int L,
const int B, cudaStream_t stream) {
static void ssm_scan_f32_cuda(const float * src0, const float * src1, const float * src2, const float * src3,
const float * src4, const float * src5, const int src0_nb1, const int src0_nb2,
const int src1_nb0, const int src1_nb1, const int src1_nb2, const int src1_nb3,
const int src2_nb0, const int src2_nb1, const int src2_nb2, const int src3_nb1,
const int src4_nb1, const int src4_nb2, const int src5_nb1, const int src5_nb2,
float * dst, const int N, const int D, const int L, const int B, cudaStream_t stream) {
const int threads = 128;
// todo: consider D cannot be divided,does this situation exist?
GGML_ASSERT(D % threads == 0);
@ -115,9 +99,8 @@ static void ssm_scan_f32_cuda(
const int smem_size = (threads * (N + 1) * 2) * sizeof(float);
if (N == 16) {
ssm_scan_f32<128, 16><<<blocks, threads, smem_size, stream>>>(
src0, src1, src2, src3, src4, src5, src0_nb1, src0_nb2, src1_nb0,
src1_nb1, src1_nb2, src1_nb3, src2_nb0, src2_nb1, src2_nb2, src3_nb1,
src4_nb1, src4_nb2, src5_nb1, src5_nb2, dst, D, L, B);
src0, src1, src2, src3, src4, src5, src0_nb1, src0_nb2, src1_nb0, src1_nb1, src1_nb2, src1_nb3, src2_nb0,
src2_nb1, src2_nb2, src3_nb1, src4_nb1, src4_nb2, src5_nb1, src5_nb2, dst, D, L, B);
} else {
GGML_ABORT("doesn't support N!=16.");
}
@ -141,8 +124,7 @@ void ggml_cuda_op_ssm_scan(ggml_backend_cuda_context &ctx, ggml_tensor *dst) {
const int64_t n_t = src1->ne[1]; // number of tokens per sequence
const int64_t n_s = src0->ne[2]; // number of sequences in the batch
GGML_ASSERT(ggml_nelements(src1) + ggml_nelements(src0) ==
ggml_nelements(dst));
GGML_ASSERT(ggml_nelements(src1) + ggml_nelements(src0) == ggml_nelements(dst));
GGML_ASSERT(src0->nb[0] == sizeof(float));
GGML_ASSERT(src1->nb[0] == sizeof(float));
GGML_ASSERT(src2->nb[0] == sizeof(float));
@ -154,8 +136,7 @@ void ggml_cuda_op_ssm_scan(ggml_backend_cuda_context &ctx, ggml_tensor *dst) {
// required for per-sequence offsets for states
GGML_ASSERT(src0->nb[2] == src0->ne[0] * src0->ne[1] * sizeof(float));
// required to get correct offset for state destination (i.e. src1->nb[3])
GGML_ASSERT(src1->nb[3] ==
src1->ne[0] * src1->ne[1] * src1->ne[2] * sizeof(float));
GGML_ASSERT(src1->nb[3] == src1->ne[0] * src1->ne[1] * src1->ne[2] * sizeof(float));
const float * src0_d = (const float *) src0->data;
const float * src1_d = (const float *) src1->data;
@ -169,9 +150,7 @@ void ggml_cuda_op_ssm_scan(ggml_backend_cuda_context &ctx, ggml_tensor *dst) {
GGML_ASSERT(src0->type == GGML_TYPE_F32);
GGML_ASSERT(dst->type == GGML_TYPE_F32);
ssm_scan_f32_cuda(src0_d, src1_d, src2_d, src3_d, src4_d, src5_d, src0->nb[1],
src0->nb[2], src1->nb[0], src1->nb[1], src1->nb[2],
src1->nb[3], src2->nb[0], src2->nb[1], src2->nb[2],
src3->nb[1], src4->nb[1], src4->nb[2], src5->nb[1],
src5->nb[2], dst_d, nc, nr, n_t, n_s, stream);
ssm_scan_f32_cuda(src0_d, src1_d, src2_d, src3_d, src4_d, src5_d, src0->nb[1], src0->nb[2], src1->nb[0],
src1->nb[1], src1->nb[2], src1->nb[3], src2->nb[0], src2->nb[1], src2->nb[2], src3->nb[1],
src4->nb[1], src4->nb[2], src5->nb[1], src5->nb[2], dst_d, nc, nr, n_t, n_s, stream);
}