linux-stable/arch/mips/crypto/crc32-mips.c

351 lines
7.9 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0
/*
* crc32-mips.c - CRC32 and CRC32C using optional MIPSr6 instructions
*
* Module based on arm64/crypto/crc32-arm.c
*
* Copyright (C) 2014 Linaro Ltd <yazen.ghannam@linaro.org>
* Copyright (C) 2018 MIPS Tech, LLC
*/
#include <linux/cpufeature.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <asm/mipsregs.h>
asm-generic: unaligned always use struct helpers As found by Vineet Gupta and Linus Torvalds, gcc has somewhat unexpected behavior when faced with overlapping unaligned pointers. The kernel's unaligned/access-ok.h header technically invokes undefined behavior that happens to usually work on the architectures using it, but if the compiler optimizes code based on the assumption that undefined behavior doesn't happen, it can create output that actually causes data corruption. A related problem was previously found on 32-bit ARMv7, where most instructions can be used on unaligned data, but 64-bit ldrd/strd causes an exception. The workaround was to always use the unaligned/le_struct.h helper instead of unaligned/access-ok.h, in commit 1cce91dfc8f7 ("ARM: 8715/1: add a private asm/unaligned.h"). The same solution should work on all other architectures as well, so remove the access-ok.h variant and use the other one unconditionally on all architectures, picking either the big-endian or little-endian version. With this, the arm specific header can be removed as well, and the only file including linux/unaligned/access_ok.h gets moved to including the normal file. Fortunately, this made almost no difference to the object code produced by gcc-11. On x86, s390, powerpc, and arc, the resulting binary appears to be identical to the previous version, while on arm64 and m68k there are minimal differences that looks like an optimization pass went into a different direction, usually using fewer stack spills on the new version. Signed-off-by: Arnd Bergmann <arnd@arndb.de> Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100363
2021-05-07 22:07:52 +00:00
#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
enum crc_op_size {
b, h, w, d,
};
enum crc_type {
crc32,
crc32c,
};
#ifndef TOOLCHAIN_SUPPORTS_CRC
#define _ASM_SET_CRC(OP, SZ, TYPE) \
_ASM_MACRO_3R(OP, rt, rs, rt2, \
".ifnc \\rt, \\rt2\n\t" \
".error \"invalid operands \\\"" #OP " \\rt,\\rs,\\rt2\\\"\"\n\t" \
".endif\n\t" \
_ASM_INSN_IF_MIPS(0x7c00000f | (__rt << 16) | (__rs << 21) | \
((SZ) << 6) | ((TYPE) << 8)) \
_ASM_INSN32_IF_MM(0x00000030 | (__rs << 16) | (__rt << 21) | \
((SZ) << 14) | ((TYPE) << 3)))
#define _ASM_UNSET_CRC(op, SZ, TYPE) ".purgem " #op "\n\t"
#else /* !TOOLCHAIN_SUPPORTS_CRC */
#define _ASM_SET_CRC(op, SZ, TYPE) ".set\tcrc\n\t"
#define _ASM_UNSET_CRC(op, SZ, TYPE)
#endif
#define __CRC32(crc, value, op, SZ, TYPE) \
do { \
__asm__ __volatile__( \
".set push\n\t" \
_ASM_SET_CRC(op, SZ, TYPE) \
#op " %0, %1, %0\n\t" \
_ASM_UNSET_CRC(op, SZ, TYPE) \
".set pop" \
: "+r" (crc) \
: "r" (value)); \
} while (0)
#define _CRC32_crc32b(crc, value) __CRC32(crc, value, crc32b, 0, 0)
#define _CRC32_crc32h(crc, value) __CRC32(crc, value, crc32h, 1, 0)
#define _CRC32_crc32w(crc, value) __CRC32(crc, value, crc32w, 2, 0)
#define _CRC32_crc32d(crc, value) __CRC32(crc, value, crc32d, 3, 0)
#define _CRC32_crc32cb(crc, value) __CRC32(crc, value, crc32cb, 0, 1)
#define _CRC32_crc32ch(crc, value) __CRC32(crc, value, crc32ch, 1, 1)
#define _CRC32_crc32cw(crc, value) __CRC32(crc, value, crc32cw, 2, 1)
#define _CRC32_crc32cd(crc, value) __CRC32(crc, value, crc32cd, 3, 1)
#define _CRC32(crc, value, size, op) \
_CRC32_##op##size(crc, value)
#define CRC32(crc, value, size) \
_CRC32(crc, value, size, crc32)
#define CRC32C(crc, value, size) \
_CRC32(crc, value, size, crc32c)
static u32 crc32_mips_le_hw(u32 crc_, const u8 *p, unsigned int len)
{
u32 crc = crc_;
#ifdef CONFIG_64BIT
while (len >= sizeof(u64)) {
u64 value = get_unaligned_le64(p);
CRC32(crc, value, d);
p += sizeof(u64);
len -= sizeof(u64);
}
if (len & sizeof(u32)) {
#else /* !CONFIG_64BIT */
while (len >= sizeof(u32)) {
#endif
u32 value = get_unaligned_le32(p);
CRC32(crc, value, w);
p += sizeof(u32);
len -= sizeof(u32);
}
if (len & sizeof(u16)) {
u16 value = get_unaligned_le16(p);
CRC32(crc, value, h);
p += sizeof(u16);
}
if (len & sizeof(u8)) {
u8 value = *p++;
CRC32(crc, value, b);
}
return crc;
}
static u32 crc32c_mips_le_hw(u32 crc_, const u8 *p, unsigned int len)
{
u32 crc = crc_;
#ifdef CONFIG_64BIT
while (len >= sizeof(u64)) {
u64 value = get_unaligned_le64(p);
CRC32C(crc, value, d);
p += sizeof(u64);
len -= sizeof(u64);
}
if (len & sizeof(u32)) {
#else /* !CONFIG_64BIT */
while (len >= sizeof(u32)) {
#endif
u32 value = get_unaligned_le32(p);
CRC32C(crc, value, w);
p += sizeof(u32);
len -= sizeof(u32);
}
if (len & sizeof(u16)) {
u16 value = get_unaligned_le16(p);
CRC32C(crc, value, h);
p += sizeof(u16);
}
if (len & sizeof(u8)) {
u8 value = *p++;
CRC32C(crc, value, b);
}
return crc;
}
#define CHKSUM_BLOCK_SIZE 1
#define CHKSUM_DIGEST_SIZE 4
struct chksum_ctx {
u32 key;
};
struct chksum_desc_ctx {
u32 crc;
};
static int chksum_init(struct shash_desc *desc)
{
struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
ctx->crc = mctx->key;
return 0;
}
/*
* Setting the seed allows arbitrary accumulators and flexible XOR policy
* If your algorithm starts with ~0, then XOR with ~0 before you set
* the seed.
*/
static int chksum_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
struct chksum_ctx *mctx = crypto_shash_ctx(tfm);
crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN The CRYPTO_TFM_RES_BAD_KEY_LEN flag was apparently meant as a way to make the ->setkey() functions provide more information about errors. However, no one actually checks for this flag, which makes it pointless. Also, many algorithms fail to set this flag when given a bad length key. Reviewing just the generic implementations, this is the case for aes-fixed-time, cbcmac, echainiv, nhpoly1305, pcrypt, rfc3686, rfc4309, rfc7539, rfc7539esp, salsa20, seqiv, and xcbc. But there are probably many more in arch/*/crypto/ and drivers/crypto/. Some algorithms can even set this flag when the key is the correct length. For example, authenc and authencesn set it when the key payload is malformed in any way (not just a bad length), the atmel-sha and ccree drivers can set it if a memory allocation fails, and the chelsio driver sets it for bad auth tag lengths, not just bad key lengths. So even if someone actually wanted to start checking this flag (which seems unlikely, since it's been unused for a long time), there would be a lot of work needed to get it working correctly. But it would probably be much better to go back to the drawing board and just define different return values, like -EINVAL if the key is invalid for the algorithm vs. -EKEYREJECTED if the key was rejected by a policy like "no weak keys". That would be much simpler, less error-prone, and easier to test. So just remove this flag. Signed-off-by: Eric Biggers <ebiggers@google.com> Reviewed-by: Horia Geantă <horia.geanta@nxp.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-12-31 03:19:36 +00:00
if (keylen != sizeof(mctx->key))
return -EINVAL;
mctx->key = get_unaligned_le32(key);
return 0;
}
static int chksum_update(struct shash_desc *desc, const u8 *data,
unsigned int length)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
ctx->crc = crc32_mips_le_hw(ctx->crc, data, length);
return 0;
}
static int chksumc_update(struct shash_desc *desc, const u8 *data,
unsigned int length)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
ctx->crc = crc32c_mips_le_hw(ctx->crc, data, length);
return 0;
}
static int chksum_final(struct shash_desc *desc, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
put_unaligned_le32(ctx->crc, out);
return 0;
}
static int chksumc_final(struct shash_desc *desc, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
put_unaligned_le32(~ctx->crc, out);
return 0;
}
static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
{
put_unaligned_le32(crc32_mips_le_hw(crc, data, len), out);
return 0;
}
static int __chksumc_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
{
put_unaligned_le32(~crc32c_mips_le_hw(crc, data, len), out);
return 0;
}
static int chksum_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
return __chksum_finup(ctx->crc, data, len, out);
}
static int chksumc_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
return __chksumc_finup(ctx->crc, data, len, out);
}
static int chksum_digest(struct shash_desc *desc, const u8 *data,
unsigned int length, u8 *out)
{
struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
return __chksum_finup(mctx->key, data, length, out);
}
static int chksumc_digest(struct shash_desc *desc, const u8 *data,
unsigned int length, u8 *out)
{
struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
return __chksumc_finup(mctx->key, data, length, out);
}
static int chksum_cra_init(struct crypto_tfm *tfm)
{
struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->key = ~0;
return 0;
}
static struct shash_alg crc32_alg = {
.digestsize = CHKSUM_DIGEST_SIZE,
.setkey = chksum_setkey,
.init = chksum_init,
.update = chksum_update,
.final = chksum_final,
.finup = chksum_finup,
.digest = chksum_digest,
.descsize = sizeof(struct chksum_desc_ctx),
.base = {
.cra_name = "crc32",
.cra_driver_name = "crc32-mips-hw",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct chksum_ctx),
.cra_module = THIS_MODULE,
.cra_init = chksum_cra_init,
}
};
static struct shash_alg crc32c_alg = {
.digestsize = CHKSUM_DIGEST_SIZE,
.setkey = chksum_setkey,
.init = chksum_init,
.update = chksumc_update,
.final = chksumc_final,
.finup = chksumc_finup,
.digest = chksumc_digest,
.descsize = sizeof(struct chksum_desc_ctx),
.base = {
.cra_name = "crc32c",
.cra_driver_name = "crc32c-mips-hw",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct chksum_ctx),
.cra_module = THIS_MODULE,
.cra_init = chksum_cra_init,
}
};
static int __init crc32_mod_init(void)
{
int err;
err = crypto_register_shash(&crc32_alg);
if (err)
return err;
err = crypto_register_shash(&crc32c_alg);
if (err) {
crypto_unregister_shash(&crc32_alg);
return err;
}
return 0;
}
static void __exit crc32_mod_exit(void)
{
crypto_unregister_shash(&crc32_alg);
crypto_unregister_shash(&crc32c_alg);
}
MODULE_AUTHOR("Marcin Nowakowski <marcin.nowakowski@mips.com");
MODULE_DESCRIPTION("CRC32 and CRC32C using optional MIPS instructions");
MODULE_LICENSE("GPL v2");
module_cpu_feature_match(MIPS_CRC32, crc32_mod_init);
module_exit(crc32_mod_exit);