linux-stable/arch/sparc/include/asm/xor_32.h
Ard Biesheuvel 297565aa22 lib/xor: make xor prototypes more friendly to compiler vectorization
Modern compilers are perfectly capable of extracting parallelism from
the XOR routines, provided that the prototypes reflect the nature of the
input accurately, in particular, the fact that the input vectors are
expected not to overlap. This is not documented explicitly, but is
implied by the interchangeability of the various C routines, some of
which use temporary variables while others don't: this means that these
routines only behave identically for non-overlapping inputs.

So let's decorate these input vectors with the __restrict modifier,
which informs the compiler that there is no overlap. While at it, make
the input-only vectors pointer-to-const as well.

Tested-by: Nathan Chancellor <nathan@kernel.org>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
Link: https://github.com/ClangBuiltLinux/linux/issues/563
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2022-02-11 20:39:39 +11:00

268 lines
7.1 KiB
C

/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* include/asm/xor.h
*
* Optimized RAID-5 checksumming functions for 32-bit Sparc.
*/
/*
* High speed xor_block operation for RAID4/5 utilizing the
* ldd/std SPARC instructions.
*
* Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
static void
sparc_2(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2)
{
int lines = bytes / (sizeof (long)) / 8;
do {
__asm__ __volatile__(
"ldd [%0 + 0x00], %%g2\n\t"
"ldd [%0 + 0x08], %%g4\n\t"
"ldd [%0 + 0x10], %%o0\n\t"
"ldd [%0 + 0x18], %%o2\n\t"
"ldd [%1 + 0x00], %%o4\n\t"
"ldd [%1 + 0x08], %%l0\n\t"
"ldd [%1 + 0x10], %%l2\n\t"
"ldd [%1 + 0x18], %%l4\n\t"
"xor %%g2, %%o4, %%g2\n\t"
"xor %%g3, %%o5, %%g3\n\t"
"xor %%g4, %%l0, %%g4\n\t"
"xor %%g5, %%l1, %%g5\n\t"
"xor %%o0, %%l2, %%o0\n\t"
"xor %%o1, %%l3, %%o1\n\t"
"xor %%o2, %%l4, %%o2\n\t"
"xor %%o3, %%l5, %%o3\n\t"
"std %%g2, [%0 + 0x00]\n\t"
"std %%g4, [%0 + 0x08]\n\t"
"std %%o0, [%0 + 0x10]\n\t"
"std %%o2, [%0 + 0x18]\n"
:
: "r" (p1), "r" (p2)
: "g2", "g3", "g4", "g5",
"o0", "o1", "o2", "o3", "o4", "o5",
"l0", "l1", "l2", "l3", "l4", "l5");
p1 += 8;
p2 += 8;
} while (--lines > 0);
}
static void
sparc_3(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2,
const unsigned long * __restrict p3)
{
int lines = bytes / (sizeof (long)) / 8;
do {
__asm__ __volatile__(
"ldd [%0 + 0x00], %%g2\n\t"
"ldd [%0 + 0x08], %%g4\n\t"
"ldd [%0 + 0x10], %%o0\n\t"
"ldd [%0 + 0x18], %%o2\n\t"
"ldd [%1 + 0x00], %%o4\n\t"
"ldd [%1 + 0x08], %%l0\n\t"
"ldd [%1 + 0x10], %%l2\n\t"
"ldd [%1 + 0x18], %%l4\n\t"
"xor %%g2, %%o4, %%g2\n\t"
"xor %%g3, %%o5, %%g3\n\t"
"ldd [%2 + 0x00], %%o4\n\t"
"xor %%g4, %%l0, %%g4\n\t"
"xor %%g5, %%l1, %%g5\n\t"
"ldd [%2 + 0x08], %%l0\n\t"
"xor %%o0, %%l2, %%o0\n\t"
"xor %%o1, %%l3, %%o1\n\t"
"ldd [%2 + 0x10], %%l2\n\t"
"xor %%o2, %%l4, %%o2\n\t"
"xor %%o3, %%l5, %%o3\n\t"
"ldd [%2 + 0x18], %%l4\n\t"
"xor %%g2, %%o4, %%g2\n\t"
"xor %%g3, %%o5, %%g3\n\t"
"xor %%g4, %%l0, %%g4\n\t"
"xor %%g5, %%l1, %%g5\n\t"
"xor %%o0, %%l2, %%o0\n\t"
"xor %%o1, %%l3, %%o1\n\t"
"xor %%o2, %%l4, %%o2\n\t"
"xor %%o3, %%l5, %%o3\n\t"
"std %%g2, [%0 + 0x00]\n\t"
"std %%g4, [%0 + 0x08]\n\t"
"std %%o0, [%0 + 0x10]\n\t"
"std %%o2, [%0 + 0x18]\n"
:
: "r" (p1), "r" (p2), "r" (p3)
: "g2", "g3", "g4", "g5",
"o0", "o1", "o2", "o3", "o4", "o5",
"l0", "l1", "l2", "l3", "l4", "l5");
p1 += 8;
p2 += 8;
p3 += 8;
} while (--lines > 0);
}
static void
sparc_4(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2,
const unsigned long * __restrict p3,
const unsigned long * __restrict p4)
{
int lines = bytes / (sizeof (long)) / 8;
do {
__asm__ __volatile__(
"ldd [%0 + 0x00], %%g2\n\t"
"ldd [%0 + 0x08], %%g4\n\t"
"ldd [%0 + 0x10], %%o0\n\t"
"ldd [%0 + 0x18], %%o2\n\t"
"ldd [%1 + 0x00], %%o4\n\t"
"ldd [%1 + 0x08], %%l0\n\t"
"ldd [%1 + 0x10], %%l2\n\t"
"ldd [%1 + 0x18], %%l4\n\t"
"xor %%g2, %%o4, %%g2\n\t"
"xor %%g3, %%o5, %%g3\n\t"
"ldd [%2 + 0x00], %%o4\n\t"
"xor %%g4, %%l0, %%g4\n\t"
"xor %%g5, %%l1, %%g5\n\t"
"ldd [%2 + 0x08], %%l0\n\t"
"xor %%o0, %%l2, %%o0\n\t"
"xor %%o1, %%l3, %%o1\n\t"
"ldd [%2 + 0x10], %%l2\n\t"
"xor %%o2, %%l4, %%o2\n\t"
"xor %%o3, %%l5, %%o3\n\t"
"ldd [%2 + 0x18], %%l4\n\t"
"xor %%g2, %%o4, %%g2\n\t"
"xor %%g3, %%o5, %%g3\n\t"
"ldd [%3 + 0x00], %%o4\n\t"
"xor %%g4, %%l0, %%g4\n\t"
"xor %%g5, %%l1, %%g5\n\t"
"ldd [%3 + 0x08], %%l0\n\t"
"xor %%o0, %%l2, %%o0\n\t"
"xor %%o1, %%l3, %%o1\n\t"
"ldd [%3 + 0x10], %%l2\n\t"
"xor %%o2, %%l4, %%o2\n\t"
"xor %%o3, %%l5, %%o3\n\t"
"ldd [%3 + 0x18], %%l4\n\t"
"xor %%g2, %%o4, %%g2\n\t"
"xor %%g3, %%o5, %%g3\n\t"
"xor %%g4, %%l0, %%g4\n\t"
"xor %%g5, %%l1, %%g5\n\t"
"xor %%o0, %%l2, %%o0\n\t"
"xor %%o1, %%l3, %%o1\n\t"
"xor %%o2, %%l4, %%o2\n\t"
"xor %%o3, %%l5, %%o3\n\t"
"std %%g2, [%0 + 0x00]\n\t"
"std %%g4, [%0 + 0x08]\n\t"
"std %%o0, [%0 + 0x10]\n\t"
"std %%o2, [%0 + 0x18]\n"
:
: "r" (p1), "r" (p2), "r" (p3), "r" (p4)
: "g2", "g3", "g4", "g5",
"o0", "o1", "o2", "o3", "o4", "o5",
"l0", "l1", "l2", "l3", "l4", "l5");
p1 += 8;
p2 += 8;
p3 += 8;
p4 += 8;
} while (--lines > 0);
}
static void
sparc_5(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2,
const unsigned long * __restrict p3,
const unsigned long * __restrict p4,
const unsigned long * __restrict p5)
{
int lines = bytes / (sizeof (long)) / 8;
do {
__asm__ __volatile__(
"ldd [%0 + 0x00], %%g2\n\t"
"ldd [%0 + 0x08], %%g4\n\t"
"ldd [%0 + 0x10], %%o0\n\t"
"ldd [%0 + 0x18], %%o2\n\t"
"ldd [%1 + 0x00], %%o4\n\t"
"ldd [%1 + 0x08], %%l0\n\t"
"ldd [%1 + 0x10], %%l2\n\t"
"ldd [%1 + 0x18], %%l4\n\t"
"xor %%g2, %%o4, %%g2\n\t"
"xor %%g3, %%o5, %%g3\n\t"
"ldd [%2 + 0x00], %%o4\n\t"
"xor %%g4, %%l0, %%g4\n\t"
"xor %%g5, %%l1, %%g5\n\t"
"ldd [%2 + 0x08], %%l0\n\t"
"xor %%o0, %%l2, %%o0\n\t"
"xor %%o1, %%l3, %%o1\n\t"
"ldd [%2 + 0x10], %%l2\n\t"
"xor %%o2, %%l4, %%o2\n\t"
"xor %%o3, %%l5, %%o3\n\t"
"ldd [%2 + 0x18], %%l4\n\t"
"xor %%g2, %%o4, %%g2\n\t"
"xor %%g3, %%o5, %%g3\n\t"
"ldd [%3 + 0x00], %%o4\n\t"
"xor %%g4, %%l0, %%g4\n\t"
"xor %%g5, %%l1, %%g5\n\t"
"ldd [%3 + 0x08], %%l0\n\t"
"xor %%o0, %%l2, %%o0\n\t"
"xor %%o1, %%l3, %%o1\n\t"
"ldd [%3 + 0x10], %%l2\n\t"
"xor %%o2, %%l4, %%o2\n\t"
"xor %%o3, %%l5, %%o3\n\t"
"ldd [%3 + 0x18], %%l4\n\t"
"xor %%g2, %%o4, %%g2\n\t"
"xor %%g3, %%o5, %%g3\n\t"
"ldd [%4 + 0x00], %%o4\n\t"
"xor %%g4, %%l0, %%g4\n\t"
"xor %%g5, %%l1, %%g5\n\t"
"ldd [%4 + 0x08], %%l0\n\t"
"xor %%o0, %%l2, %%o0\n\t"
"xor %%o1, %%l3, %%o1\n\t"
"ldd [%4 + 0x10], %%l2\n\t"
"xor %%o2, %%l4, %%o2\n\t"
"xor %%o3, %%l5, %%o3\n\t"
"ldd [%4 + 0x18], %%l4\n\t"
"xor %%g2, %%o4, %%g2\n\t"
"xor %%g3, %%o5, %%g3\n\t"
"xor %%g4, %%l0, %%g4\n\t"
"xor %%g5, %%l1, %%g5\n\t"
"xor %%o0, %%l2, %%o0\n\t"
"xor %%o1, %%l3, %%o1\n\t"
"xor %%o2, %%l4, %%o2\n\t"
"xor %%o3, %%l5, %%o3\n\t"
"std %%g2, [%0 + 0x00]\n\t"
"std %%g4, [%0 + 0x08]\n\t"
"std %%o0, [%0 + 0x10]\n\t"
"std %%o2, [%0 + 0x18]\n"
:
: "r" (p1), "r" (p2), "r" (p3), "r" (p4), "r" (p5)
: "g2", "g3", "g4", "g5",
"o0", "o1", "o2", "o3", "o4", "o5",
"l0", "l1", "l2", "l3", "l4", "l5");
p1 += 8;
p2 += 8;
p3 += 8;
p4 += 8;
p5 += 8;
} while (--lines > 0);
}
static struct xor_block_template xor_block_SPARC = {
.name = "SPARC",
.do_2 = sparc_2,
.do_3 = sparc_3,
.do_4 = sparc_4,
.do_5 = sparc_5,
};
/* For grins, also test the generic routines. */
#include <asm-generic/xor.h>
#undef XOR_TRY_TEMPLATES
#define XOR_TRY_TEMPLATES \
do { \
xor_speed(&xor_block_8regs); \
xor_speed(&xor_block_32regs); \
xor_speed(&xor_block_SPARC); \
} while (0)