This update includes the following changes:

API:
 
 - Test setkey in no-SIMD context.
 - Add skcipher speed test for user-specified algorithm.
 
 Algorithms:
 
 - Add x25519 support on ppc64le.
 - Add VAES and AVX512 / AVX10 optimized AES-GCM on x86.
 - Remove sm2 algorithm.
 
 Drivers:
 
 - Add Allwinner H616 support to sun8i-ce.
 - Use DMA in stm32.
 - Add Exynos850 hwrng support to exynos.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEn51F/lCuNhUwmDeSxycdCkmxi6cFAmaZFsgACgkQxycdCkmx
 i6f76Q//ej7akY9fo6/qsn8UFK16O0SCEMkx7TrkxqHV8R6uwy4ret3+b5dbckY6
 hBjDabiL/BAdNzo8hvta+BOtN6ToEqquSVwNCpX0U3YMLf9dIzcMA4Uri3LbxUHi
 x9Qa8klI5x62Kg+RW+ovaJC4C11oKTpjVeDn4S57MudlBnhEa3DYcEADKiUowkEz
 aigtLx8HrZYjwkQxwgWeS0xzeojhW1P20yaghOd6hTCD7vKw18JaKdD8r4YFGOBu
 39eDaM/0vR+wWokk3NNl6NmXieBT8qLFt+OIbQs6b3gX9K37daahRs1VoShcL+ix
 l8GaqLpo1n1llVrV1OWzyVLVLtYK849QEo6OmlusnbK7e5pQKEOXoACQ0VB8ElNE
 1u7KNW6CBWGzr33dWPgl9yYBrT3BmMXABIK4dNmTicJsK2zk2FPKbLDZNi8fWah/
 D46mv7Rb8EtTdhN56EzceUJpd1ZfmP9S4vY1Hu8YdmI1pxex11US/XppKLoyymqp
 vNOzf85VuZ/GkUPfHdyWAFBnTaCjXtSBrlXD6+0nxavU9KGli0PLLX5tKNNWGw0l
 51Z0tbNsDbo3Z+sMmtfvBXR2V8NwiAT5f775W0lLvpq/44mbDpdN3jGvfy9y9C7u
 1DUC6F0XtUhZjR7e6/EhvHh3lB/a3w/m3+XC+XzDeox/VYTrC3Q=
 =x80X
 -----END PGP SIGNATURE-----

Merge tag 'v6.11-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto update from Herbert Xu:
 "API:
   - Test setkey in no-SIMD context
   - Add skcipher speed test for user-specified algorithm

  Algorithms:
   - Add x25519 support on ppc64le
   - Add VAES and AVX512 / AVX10 optimized AES-GCM on x86
   - Remove sm2 algorithm

  Drivers:
   - Add Allwinner H616 support to sun8i-ce
   - Use DMA in stm32
   - Add Exynos850 hwrng support to exynos"

* tag 'v6.11-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (81 commits)
  hwrng: core - remove (un)register_miscdev()
  crypto: lib/mpi - delete unnecessary condition
  crypto: testmgr - generate power-of-2 lengths more often
  crypto: mxs-dcp - Ensure payload is zero when using key slot
  hwrng: Kconfig - Do not enable by default CN10K driver
  crypto: starfive - Fix nent assignment in rsa dec
  crypto: starfive - Align rsa input data to 32-bit
  crypto: qat - fix unintentional re-enabling of error interrupts
  crypto: qat - extend scope of lock in adf_cfg_add_key_value_param()
  Documentation: qat: fix auto_reset attribute details
  crypto: sun8i-ce - add Allwinner H616 support
  crypto: sun8i-ce - wrap accesses to descriptor address fields
  dt-bindings: crypto: sun8i-ce: Add compatible for H616
  hwrng: core - Fix wrong quality calculation at hw rng registration
  hwrng: exynos - Enable Exynos850 support
  hwrng: exynos - Add SMC based TRNG operation
  hwrng: exynos - Implement bus clock control
  hwrng: exynos - Use devm_clk_get_enabled() to get the clock
  hwrng: exynos - Improve coding style
  dt-bindings: rng: Add Exynos850 support to exynos-trng
  ...
This commit is contained in:
Linus Torvalds 2024-07-19 08:52:58 -07:00
commit c434e25b62
114 changed files with 5480 additions and 5893 deletions

View file

@ -143,8 +143,8 @@ Description:
This attribute is only available for qat_4xxx devices.
What: /sys/bus/pci/devices/<BDF>/qat/auto_reset
Date: March 2024
KernelVersion: 6.8
Date: May 2024
KernelVersion: 6.9
Contact: qat-linux@intel.com
Description: (RW) Reports the current state of the autoreset feature
for a QAT device

View file

@ -18,6 +18,7 @@ properties:
- allwinner,sun50i-a64-crypto
- allwinner,sun50i-h5-crypto
- allwinner,sun50i-h6-crypto
- allwinner,sun50i-h616-crypto
reg:
maxItems: 1
@ -49,6 +50,7 @@ if:
compatible:
enum:
- allwinner,sun20i-d1-crypto
- allwinner,sun50i-h616-crypto
then:
properties:
clocks:

View file

@ -26,6 +26,9 @@ properties:
items:
- const: core
power-domains:
maxItems: 1
required:
- compatible
- reg

View file

@ -12,14 +12,17 @@ maintainers:
properties:
compatible:
const: samsung,exynos5250-trng
enum:
- samsung,exynos5250-trng
- samsung,exynos850-trng
clocks:
maxItems: 1
minItems: 1
maxItems: 2
clock-names:
items:
- const: secss
minItems: 1
maxItems: 2
reg:
maxItems: 1
@ -30,6 +33,35 @@ required:
- clock-names
- reg
allOf:
- if:
properties:
compatible:
contains:
const: samsung,exynos850-trng
then:
properties:
clocks:
items:
- description: SSS (Security Sub System) operating clock
- description: SSS (Security Sub System) bus clock
clock-names:
items:
- const: secss
- const: pclk
else:
properties:
clocks:
items:
- description: SSS (Security Sub System) operating clock
clock-names:
items:
- const: secss
additionalProperties: false
examples:

View file

@ -980,6 +980,12 @@ F: include/uapi/linux/psp-dbc.h
F: tools/crypto/ccp/*.c
F: tools/crypto/ccp/*.py
AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER - HSTI SUPPORT
M: Mario Limonciello <mario.limonciello@amd.com>
L: linux-crypto@vger.kernel.org
S: Supported
F: drivers/crypto/ccp/hsti.*
AMD DISPLAY CORE
M: Harry Wentland <harry.wentland@amd.com>
M: Leo Li <sunpeng.li@amd.com>

View file

@ -17,6 +17,7 @@
#include <linux/module.h>
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_DESCRIPTION("Bit sliced AES using NEON instructions");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("ecb(aes)");

View file

@ -48,6 +48,7 @@
*/
#include <linux/linkage.h>
#include <linux/cfi_types.h>
#include <asm/assembler.h>
.text
@ -123,11 +124,12 @@
* uint crc32_pmull_le(unsigned char const *buffer,
* size_t len, uint crc32)
*/
ENTRY(crc32_pmull_le)
SYM_FUNC_START(crc32_pmull_le)
adr r3, .Lcrc32_constants
b 0f
SYM_FUNC_END(crc32_pmull_le)
ENTRY(crc32c_pmull_le)
SYM_FUNC_START(crc32c_pmull_le)
adr r3, .Lcrc32c_constants
0: bic LEN, LEN, #15
@ -236,8 +238,7 @@ fold_64:
vmov r0, s5
bx lr
ENDPROC(crc32_pmull_le)
ENDPROC(crc32c_pmull_le)
SYM_FUNC_END(crc32c_pmull_le)
.macro __crc32, c
subs ip, r2, #8
@ -296,11 +297,11 @@ ARM_BE8(rev16 r3, r3 )
.endm
.align 5
ENTRY(crc32_armv8_le)
SYM_TYPED_FUNC_START(crc32_armv8_le)
__crc32
ENDPROC(crc32_armv8_le)
SYM_FUNC_END(crc32_armv8_le)
.align 5
ENTRY(crc32c_armv8_le)
SYM_TYPED_FUNC_START(crc32c_armv8_le)
__crc32 c
ENDPROC(crc32c_armv8_le)
SYM_FUNC_END(crc32c_armv8_le)

View file

@ -241,6 +241,7 @@ module_init(crc32_pmull_mod_init);
module_exit(crc32_pmull_mod_exit);
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_DESCRIPTION("Accelerated CRC32(C) using ARM CRC, NEON and Crypto Extensions");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("crc32");
MODULE_ALIAS_CRYPTO("crc32c");

View file

@ -84,5 +84,6 @@ module_init(crc_t10dif_mod_init);
module_exit(crc_t10dif_mod_exit);
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_DESCRIPTION("Accelerated CRC-T10DIF using ARM NEON and Crypto Extensions");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("crct10dif");

View file

@ -133,4 +133,5 @@ module_exit(arm_curve25519_exit);
MODULE_ALIAS_CRYPTO("curve25519");
MODULE_ALIAS_CRYPTO("curve25519-neon");
MODULE_DESCRIPTION("Public key crypto: Curve25519 (NEON-accelerated)");
MODULE_LICENSE("GPL v2");

View file

@ -267,6 +267,7 @@ static void __exit arm_poly1305_mod_exit(void)
module_init(arm_poly1305_mod_init);
module_exit(arm_poly1305_mod_exit);
MODULE_DESCRIPTION("Accelerated Poly1305 transform for ARM");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("poly1305");
MODULE_ALIAS_CRYPTO("poly1305-arm");

View file

@ -16,6 +16,7 @@
#include <linux/module.h>
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_DESCRIPTION("Bit sliced AES using NEON instructions");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("ecb(aes)");

View file

@ -98,7 +98,7 @@ static struct shash_alg crc_t10dif_alg[] = {{
.base.cra_name = "crct10dif",
.base.cra_driver_name = "crct10dif-arm64-neon",
.base.cra_priority = 100,
.base.cra_priority = 150,
.base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
.base.cra_module = THIS_MODULE,
}, {
@ -138,6 +138,7 @@ module_cpu_feature_match(ASIMD, crc_t10dif_mod_init);
module_exit(crc_t10dif_mod_exit);
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_DESCRIPTION("CRC-T10DIF using arm64 NEON and Crypto Extensions");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("crct10dif");
MODULE_ALIAS_CRYPTO("crct10dif-arm64-ce");

View file

@ -226,6 +226,7 @@ static void __exit neon_poly1305_mod_exit(void)
module_init(neon_poly1305_mod_init);
module_exit(neon_poly1305_mod_exit);
MODULE_DESCRIPTION("Poly1305 transform using NEON instructions");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("poly1305");
MODULE_ALIAS_CRYPTO("poly1305-neon");

View file

@ -2,6 +2,17 @@
menu "Accelerated Cryptographic Algorithms for CPU (powerpc)"
config CRYPTO_CURVE25519_PPC64
tristate "Public key crypto: Curve25519 (PowerPC64)"
depends on PPC64 && CPU_LITTLE_ENDIAN
select CRYPTO_LIB_CURVE25519_GENERIC
select CRYPTO_ARCH_HAVE_LIB_CURVE25519
help
Curve25519 algorithm
Architecture: PowerPC64
- Little-endian
config CRYPTO_CRC32C_VPMSUM
tristate "CRC32c"
depends on PPC64 && ALTIVEC

View file

@ -17,6 +17,7 @@ obj-$(CONFIG_CRYPTO_AES_GCM_P10) += aes-gcm-p10-crypto.o
obj-$(CONFIG_CRYPTO_CHACHA20_P10) += chacha-p10-crypto.o
obj-$(CONFIG_CRYPTO_POLY1305_P10) += poly1305-p10-crypto.o
obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o
obj-$(CONFIG_CRYPTO_CURVE25519_PPC64) += curve25519-ppc64le.o
aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o
md5-ppc-y := md5-asm.o md5-glue.o
@ -29,6 +30,7 @@ aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp10-ppc.o aesp10-p
chacha-p10-crypto-y := chacha-p10-glue.o chacha-p10le-8x.o
poly1305-p10-crypto-y := poly1305-p10-glue.o poly1305-p10le_64.o
vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o
curve25519-ppc64le-y := curve25519-ppc64le-core.o curve25519-ppc64le_asm.o
ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
override flavour := linux-ppc64le

View file

@ -0,0 +1,299 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2024- IBM Corp.
*
* X25519 scalar multiplication with 51 bits limbs for PPC64le.
* Based on RFC7748 and AArch64 optimized implementation for X25519
* - Algorithm 1 Scalar multiplication of a variable point
*/
#include <crypto/curve25519.h>
#include <crypto/internal/kpp.h>
#include <linux/types.h>
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/cpufeature.h>
#include <linux/processor.h>
typedef uint64_t fe51[5];
asmlinkage void x25519_fe51_mul(fe51 h, const fe51 f, const fe51 g);
asmlinkage void x25519_fe51_sqr(fe51 h, const fe51 f);
asmlinkage void x25519_fe51_mul121666(fe51 h, fe51 f);
asmlinkage void x25519_fe51_sqr_times(fe51 h, const fe51 f, int n);
asmlinkage void x25519_fe51_frombytes(fe51 h, const uint8_t *s);
asmlinkage void x25519_fe51_tobytes(uint8_t *s, const fe51 h);
asmlinkage void x25519_cswap(fe51 p, fe51 q, unsigned int bit);
#define fmul x25519_fe51_mul
#define fsqr x25519_fe51_sqr
#define fmul121666 x25519_fe51_mul121666
#define fe51_tobytes x25519_fe51_tobytes
static void fadd(fe51 h, const fe51 f, const fe51 g)
{
h[0] = f[0] + g[0];
h[1] = f[1] + g[1];
h[2] = f[2] + g[2];
h[3] = f[3] + g[3];
h[4] = f[4] + g[4];
}
/*
* Prime = 2 ** 255 - 19, 255 bits
* (0x7fffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffff ffffffed)
*
* Prime in 5 51-bit limbs
*/
static fe51 prime51 = { 0x7ffffffffffed, 0x7ffffffffffff, 0x7ffffffffffff, 0x7ffffffffffff, 0x7ffffffffffff};
static void fsub(fe51 h, const fe51 f, const fe51 g)
{
h[0] = (f[0] + ((prime51[0] * 2))) - g[0];
h[1] = (f[1] + ((prime51[1] * 2))) - g[1];
h[2] = (f[2] + ((prime51[2] * 2))) - g[2];
h[3] = (f[3] + ((prime51[3] * 2))) - g[3];
h[4] = (f[4] + ((prime51[4] * 2))) - g[4];
}
static void fe51_frombytes(fe51 h, const uint8_t *s)
{
/*
* Make sure 64-bit aligned.
*/
unsigned char sbuf[32+8];
unsigned char *sb = PTR_ALIGN((void *)sbuf, 8);
memcpy(sb, s, 32);
x25519_fe51_frombytes(h, sb);
}
static void finv(fe51 o, const fe51 i)
{
fe51 a0, b, c, t00;
fsqr(a0, i);
x25519_fe51_sqr_times(t00, a0, 2);
fmul(b, t00, i);
fmul(a0, b, a0);
fsqr(t00, a0);
fmul(b, t00, b);
x25519_fe51_sqr_times(t00, b, 5);
fmul(b, t00, b);
x25519_fe51_sqr_times(t00, b, 10);
fmul(c, t00, b);
x25519_fe51_sqr_times(t00, c, 20);
fmul(t00, t00, c);
x25519_fe51_sqr_times(t00, t00, 10);
fmul(b, t00, b);
x25519_fe51_sqr_times(t00, b, 50);
fmul(c, t00, b);
x25519_fe51_sqr_times(t00, c, 100);
fmul(t00, t00, c);
x25519_fe51_sqr_times(t00, t00, 50);
fmul(t00, t00, b);
x25519_fe51_sqr_times(t00, t00, 5);
fmul(o, t00, a0);
}
static void curve25519_fe51(uint8_t out[32], const uint8_t scalar[32],
const uint8_t point[32])
{
fe51 x1, x2, z2, x3, z3;
uint8_t s[32];
unsigned int swap = 0;
int i;
memcpy(s, scalar, 32);
s[0] &= 0xf8;
s[31] &= 0x7f;
s[31] |= 0x40;
fe51_frombytes(x1, point);
z2[0] = z2[1] = z2[2] = z2[3] = z2[4] = 0;
x3[0] = x1[0];
x3[1] = x1[1];
x3[2] = x1[2];
x3[3] = x1[3];
x3[4] = x1[4];
x2[0] = z3[0] = 1;
x2[1] = z3[1] = 0;
x2[2] = z3[2] = 0;
x2[3] = z3[3] = 0;
x2[4] = z3[4] = 0;
for (i = 254; i >= 0; --i) {
unsigned int k_t = 1 & (s[i / 8] >> (i & 7));
fe51 a, b, c, d, e;
fe51 da, cb, aa, bb;
fe51 dacb_p, dacb_m;
swap ^= k_t;
x25519_cswap(x2, x3, swap);
x25519_cswap(z2, z3, swap);
swap = k_t;
fsub(b, x2, z2); // B = x_2 - z_2
fadd(a, x2, z2); // A = x_2 + z_2
fsub(d, x3, z3); // D = x_3 - z_3
fadd(c, x3, z3); // C = x_3 + z_3
fsqr(bb, b); // BB = B^2
fsqr(aa, a); // AA = A^2
fmul(da, d, a); // DA = D * A
fmul(cb, c, b); // CB = C * B
fsub(e, aa, bb); // E = AA - BB
fmul(x2, aa, bb); // x2 = AA * BB
fadd(dacb_p, da, cb); // DA + CB
fsub(dacb_m, da, cb); // DA - CB
fmul121666(z3, e); // 121666 * E
fsqr(z2, dacb_m); // (DA - CB)^2
fsqr(x3, dacb_p); // x3 = (DA + CB)^2
fadd(b, bb, z3); // BB + 121666 * E
fmul(z3, x1, z2); // z3 = x1 * (DA - CB)^2
fmul(z2, e, b); // z2 = e * (BB + (DA + CB)^2)
}
finv(z2, z2);
fmul(x2, x2, z2);
fe51_tobytes(out, x2);
}
void curve25519_arch(u8 mypublic[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE],
const u8 basepoint[CURVE25519_KEY_SIZE])
{
curve25519_fe51(mypublic, secret, basepoint);
}
EXPORT_SYMBOL(curve25519_arch);
void curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE])
{
curve25519_fe51(pub, secret, curve25519_base_point);
}
EXPORT_SYMBOL(curve25519_base_arch);
static int curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
u8 *secret = kpp_tfm_ctx(tfm);
if (!len)
curve25519_generate_secret(secret);
else if (len == CURVE25519_KEY_SIZE &&
crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE))
memcpy(secret, buf, CURVE25519_KEY_SIZE);
else
return -EINVAL;
return 0;
}
static int curve25519_generate_public_key(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
const u8 *secret = kpp_tfm_ctx(tfm);
u8 buf[CURVE25519_KEY_SIZE];
int copied, nbytes;
if (req->src)
return -EINVAL;
curve25519_base_arch(buf, secret);
/* might want less than we've got */
nbytes = min_t(size_t, CURVE25519_KEY_SIZE, req->dst_len);
copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst,
nbytes),
buf, nbytes);
if (copied != nbytes)
return -EINVAL;
return 0;
}
static int curve25519_compute_shared_secret(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
const u8 *secret = kpp_tfm_ctx(tfm);
u8 public_key[CURVE25519_KEY_SIZE];
u8 buf[CURVE25519_KEY_SIZE];
int copied, nbytes;
if (!req->src)
return -EINVAL;
copied = sg_copy_to_buffer(req->src,
sg_nents_for_len(req->src,
CURVE25519_KEY_SIZE),
public_key, CURVE25519_KEY_SIZE);
if (copied != CURVE25519_KEY_SIZE)
return -EINVAL;
curve25519_arch(buf, secret, public_key);
/* might want less than we've got */
nbytes = min_t(size_t, CURVE25519_KEY_SIZE, req->dst_len);
copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst,
nbytes),
buf, nbytes);
if (copied != nbytes)
return -EINVAL;
return 0;
}
static unsigned int curve25519_max_size(struct crypto_kpp *tfm)
{
return CURVE25519_KEY_SIZE;
}
static struct kpp_alg curve25519_alg = {
.base.cra_name = "curve25519",
.base.cra_driver_name = "curve25519-ppc64le",
.base.cra_priority = 200,
.base.cra_module = THIS_MODULE,
.base.cra_ctxsize = CURVE25519_KEY_SIZE,
.set_secret = curve25519_set_secret,
.generate_public_key = curve25519_generate_public_key,
.compute_shared_secret = curve25519_compute_shared_secret,
.max_size = curve25519_max_size,
};
static int __init curve25519_mod_init(void)
{
return IS_REACHABLE(CONFIG_CRYPTO_KPP) ?
crypto_register_kpp(&curve25519_alg) : 0;
}
static void __exit curve25519_mod_exit(void)
{
if (IS_REACHABLE(CONFIG_CRYPTO_KPP))
crypto_unregister_kpp(&curve25519_alg);
}
module_init(curve25519_mod_init);
module_exit(curve25519_mod_exit);
MODULE_ALIAS_CRYPTO("curve25519");
MODULE_ALIAS_CRYPTO("curve25519-ppc64le");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Danny Tsen <dtsen@us.ibm.com>");

View file

@ -0,0 +1,671 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#
# This code is taken from CRYPTOGAMs[1] and is included here using the option
# in the license to distribute the code under the GPL. Therefore this program
# is free software; you can redistribute it and/or modify it under the terms of
# the GNU General Public License version 2 as published by the Free Software
# Foundation.
#
# [1] https://github.com/dot-asm/cryptogams/
# Copyright (c) 2006-2017, CRYPTOGAMS by <appro@openssl.org>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain copyright notices,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# * Neither the name of the CRYPTOGAMS nor the names of its
# copyright holder and contributors may be used to endorse or
# promote products derived from this software without specific
# prior written permission.
#
# ALTERNATIVELY, provided that this notice is retained in full, this
# product may be distributed under the terms of the GNU General Public
# License (GPL), in which case the provisions of the GPL apply INSTEAD OF
# those given above.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ====================================================================
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see https://www.openssl.org/~appro/cryptogams/.
# ====================================================================
#
# ====================================================================
# Written and Modified by Danny Tsen <dtsen@us.ibm.com>
# - Added x25519_fe51_sqr_times, x25519_fe51_frombytes, x25519_fe51_tobytes
# and x25519_cswap
#
# Copyright 2024- IBM Corp.
#
# X25519 lower-level primitives for PPC64.
#
#include <linux/linkage.h>
.text
.align 5
SYM_FUNC_START(x25519_fe51_mul)
stdu 1,-144(1)
std 21,56(1)
std 22,64(1)
std 23,72(1)
std 24,80(1)
std 25,88(1)
std 26,96(1)
std 27,104(1)
std 28,112(1)
std 29,120(1)
std 30,128(1)
std 31,136(1)
ld 6,0(5)
ld 7,0(4)
ld 8,8(4)
ld 9,16(4)
ld 10,24(4)
ld 11,32(4)
mulld 22,7,6
mulhdu 23,7,6
mulld 24,8,6
mulhdu 25,8,6
mulld 30,11,6
mulhdu 31,11,6
ld 4,8(5)
mulli 11,11,19
mulld 26,9,6
mulhdu 27,9,6
mulld 28,10,6
mulhdu 29,10,6
mulld 12,11,4
mulhdu 21,11,4
addc 22,22,12
adde 23,23,21
mulld 12,7,4
mulhdu 21,7,4
addc 24,24,12
adde 25,25,21
mulld 12,10,4
mulhdu 21,10,4
ld 6,16(5)
mulli 10,10,19
addc 30,30,12
adde 31,31,21
mulld 12,8,4
mulhdu 21,8,4
addc 26,26,12
adde 27,27,21
mulld 12,9,4
mulhdu 21,9,4
addc 28,28,12
adde 29,29,21
mulld 12,10,6
mulhdu 21,10,6
addc 22,22,12
adde 23,23,21
mulld 12,11,6
mulhdu 21,11,6
addc 24,24,12
adde 25,25,21
mulld 12,9,6
mulhdu 21,9,6
ld 4,24(5)
mulli 9,9,19
addc 30,30,12
adde 31,31,21
mulld 12,7,6
mulhdu 21,7,6
addc 26,26,12
adde 27,27,21
mulld 12,8,6
mulhdu 21,8,6
addc 28,28,12
adde 29,29,21
mulld 12,9,4
mulhdu 21,9,4
addc 22,22,12
adde 23,23,21
mulld 12,10,4
mulhdu 21,10,4
addc 24,24,12
adde 25,25,21
mulld 12,8,4
mulhdu 21,8,4
ld 6,32(5)
mulli 8,8,19
addc 30,30,12
adde 31,31,21
mulld 12,11,4
mulhdu 21,11,4
addc 26,26,12
adde 27,27,21
mulld 12,7,4
mulhdu 21,7,4
addc 28,28,12
adde 29,29,21
mulld 12,8,6
mulhdu 21,8,6
addc 22,22,12
adde 23,23,21
mulld 12,9,6
mulhdu 21,9,6
addc 24,24,12
adde 25,25,21
mulld 12,10,6
mulhdu 21,10,6
addc 26,26,12
adde 27,27,21
mulld 12,11,6
mulhdu 21,11,6
addc 28,28,12
adde 29,29,21
mulld 12,7,6
mulhdu 21,7,6
addc 30,30,12
adde 31,31,21
.Lfe51_reduce:
li 0,-1
srdi 0,0,13
srdi 12,26,51
and 9,26,0
insrdi 12,27,51,0
srdi 21,22,51
and 7,22,0
insrdi 21,23,51,0
addc 28,28,12
addze 29,29
addc 24,24,21
addze 25,25
srdi 12,28,51
and 10,28,0
insrdi 12,29,51,0
srdi 21,24,51
and 8,24,0
insrdi 21,25,51,0
addc 30,30,12
addze 31,31
add 9,9,21
srdi 12,30,51
and 11,30,0
insrdi 12,31,51,0
mulli 12,12,19
add 7,7,12
srdi 21,9,51
and 9,9,0
add 10,10,21
srdi 12,7,51
and 7,7,0
add 8,8,12
std 9,16(3)
std 10,24(3)
std 11,32(3)
std 7,0(3)
std 8,8(3)
ld 21,56(1)
ld 22,64(1)
ld 23,72(1)
ld 24,80(1)
ld 25,88(1)
ld 26,96(1)
ld 27,104(1)
ld 28,112(1)
ld 29,120(1)
ld 30,128(1)
ld 31,136(1)
addi 1,1,144
blr
SYM_FUNC_END(x25519_fe51_mul)
.align 5
SYM_FUNC_START(x25519_fe51_sqr)
stdu 1,-144(1)
std 21,56(1)
std 22,64(1)
std 23,72(1)
std 24,80(1)
std 25,88(1)
std 26,96(1)
std 27,104(1)
std 28,112(1)
std 29,120(1)
std 30,128(1)
std 31,136(1)
ld 7,0(4)
ld 8,8(4)
ld 9,16(4)
ld 10,24(4)
ld 11,32(4)
add 6,7,7
mulli 21,11,19
mulld 22,7,7
mulhdu 23,7,7
mulld 24,8,6
mulhdu 25,8,6
mulld 26,9,6
mulhdu 27,9,6
mulld 28,10,6
mulhdu 29,10,6
mulld 30,11,6
mulhdu 31,11,6
add 6,8,8
mulld 12,11,21
mulhdu 11,11,21
addc 28,28,12
adde 29,29,11
mulli 5,10,19
mulld 12,8,8
mulhdu 11,8,8
addc 26,26,12
adde 27,27,11
mulld 12,9,6
mulhdu 11,9,6
addc 28,28,12
adde 29,29,11
mulld 12,10,6
mulhdu 11,10,6
addc 30,30,12
adde 31,31,11
mulld 12,21,6
mulhdu 11,21,6
add 6,10,10
addc 22,22,12
adde 23,23,11
mulld 12,10,5
mulhdu 10,10,5
addc 24,24,12
adde 25,25,10
mulld 12,6,21
mulhdu 10,6,21
add 6,9,9
addc 26,26,12
adde 27,27,10
mulld 12,9,9
mulhdu 10,9,9
addc 30,30,12
adde 31,31,10
mulld 12,5,6
mulhdu 10,5,6
addc 22,22,12
adde 23,23,10
mulld 12,21,6
mulhdu 10,21,6
addc 24,24,12
adde 25,25,10
b .Lfe51_reduce
SYM_FUNC_END(x25519_fe51_sqr)
.align 5
SYM_FUNC_START(x25519_fe51_mul121666)
stdu 1,-144(1)
std 21,56(1)
std 22,64(1)
std 23,72(1)
std 24,80(1)
std 25,88(1)
std 26,96(1)
std 27,104(1)
std 28,112(1)
std 29,120(1)
std 30,128(1)
std 31,136(1)
lis 6,1
ori 6,6,56130
ld 7,0(4)
ld 8,8(4)
ld 9,16(4)
ld 10,24(4)
ld 11,32(4)
mulld 22,7,6
mulhdu 23,7,6
mulld 24,8,6
mulhdu 25,8,6
mulld 26,9,6
mulhdu 27,9,6
mulld 28,10,6
mulhdu 29,10,6
mulld 30,11,6
mulhdu 31,11,6
b .Lfe51_reduce
SYM_FUNC_END(x25519_fe51_mul121666)
.align 5
SYM_FUNC_START(x25519_fe51_sqr_times)
stdu 1,-144(1)
std 21,56(1)
std 22,64(1)
std 23,72(1)
std 24,80(1)
std 25,88(1)
std 26,96(1)
std 27,104(1)
std 28,112(1)
std 29,120(1)
std 30,128(1)
std 31,136(1)
ld 7,0(4)
ld 8,8(4)
ld 9,16(4)
ld 10,24(4)
ld 11,32(4)
mtctr 5
.Lsqr_times_loop:
add 6,7,7
mulli 21,11,19
mulld 22,7,7
mulhdu 23,7,7
mulld 24,8,6
mulhdu 25,8,6
mulld 26,9,6
mulhdu 27,9,6
mulld 28,10,6
mulhdu 29,10,6
mulld 30,11,6
mulhdu 31,11,6
add 6,8,8
mulld 12,11,21
mulhdu 11,11,21
addc 28,28,12
adde 29,29,11
mulli 5,10,19
mulld 12,8,8
mulhdu 11,8,8
addc 26,26,12
adde 27,27,11
mulld 12,9,6
mulhdu 11,9,6
addc 28,28,12
adde 29,29,11
mulld 12,10,6
mulhdu 11,10,6
addc 30,30,12
adde 31,31,11
mulld 12,21,6
mulhdu 11,21,6
add 6,10,10
addc 22,22,12
adde 23,23,11
mulld 12,10,5
mulhdu 10,10,5
addc 24,24,12
adde 25,25,10
mulld 12,6,21
mulhdu 10,6,21
add 6,9,9
addc 26,26,12
adde 27,27,10
mulld 12,9,9
mulhdu 10,9,9
addc 30,30,12
adde 31,31,10
mulld 12,5,6
mulhdu 10,5,6
addc 22,22,12
adde 23,23,10
mulld 12,21,6
mulhdu 10,21,6
addc 24,24,12
adde 25,25,10
# fe51_reduce
li 0,-1
srdi 0,0,13
srdi 12,26,51
and 9,26,0
insrdi 12,27,51,0
srdi 21,22,51
and 7,22,0
insrdi 21,23,51,0
addc 28,28,12
addze 29,29
addc 24,24,21
addze 25,25
srdi 12,28,51
and 10,28,0
insrdi 12,29,51,0
srdi 21,24,51
and 8,24,0
insrdi 21,25,51,0
addc 30,30,12
addze 31,31
add 9,9,21
srdi 12,30,51
and 11,30,0
insrdi 12,31,51,0
mulli 12,12,19
add 7,7,12
srdi 21,9,51
and 9,9,0
add 10,10,21
srdi 12,7,51
and 7,7,0
add 8,8,12
bdnz .Lsqr_times_loop
std 9,16(3)
std 10,24(3)
std 11,32(3)
std 7,0(3)
std 8,8(3)
ld 21,56(1)
ld 22,64(1)
ld 23,72(1)
ld 24,80(1)
ld 25,88(1)
ld 26,96(1)
ld 27,104(1)
ld 28,112(1)
ld 29,120(1)
ld 30,128(1)
ld 31,136(1)
addi 1,1,144
blr
SYM_FUNC_END(x25519_fe51_sqr_times)
.align 5
SYM_FUNC_START(x25519_fe51_frombytes)
li 12, -1
srdi 12, 12, 13 # 0x7ffffffffffff
ld 5, 0(4)
ld 6, 8(4)
ld 7, 16(4)
ld 8, 24(4)
srdi 10, 5, 51
and 5, 5, 12 # h0
sldi 11, 6, 13
or 11, 10, 11 # h1t
srdi 10, 6, 38
and 6, 11, 12 # h1
sldi 11, 7, 26
or 10, 10, 11 # h2t
srdi 11, 7, 25
and 7, 10, 12 # h2
sldi 10, 8, 39
or 11, 11, 10 # h3t
srdi 9, 8, 12
and 8, 11, 12 # h3
and 9, 9, 12 # h4
std 5, 0(3)
std 6, 8(3)
std 7, 16(3)
std 8, 24(3)
std 9, 32(3)
blr
SYM_FUNC_END(x25519_fe51_frombytes)
.align 5
SYM_FUNC_START(x25519_fe51_tobytes)
ld 5, 0(4)
ld 6, 8(4)
ld 7, 16(4)
ld 8, 24(4)
ld 9, 32(4)
li 12, -1
srdi 12, 12, 13 # 0x7ffffffffffff
# Full reducuction
addi 10, 5, 19
srdi 10, 10, 51
add 10, 10, 6
srdi 10, 10, 51
add 10, 10, 7
srdi 10, 10, 51
add 10, 10, 8
srdi 10, 10, 51
add 10, 10, 9
srdi 10, 10, 51
mulli 10, 10, 19
add 5, 5, 10
srdi 11, 5, 51
add 6, 6, 11
srdi 11, 6, 51
add 7, 7, 11
srdi 11, 7, 51
add 8, 8, 11
srdi 11, 8, 51
add 9, 9, 11
and 5, 5, 12
and 6, 6, 12
and 7, 7, 12
and 8, 8, 12
and 9, 9, 12
sldi 10, 6, 51
or 5, 5, 10 # s0
srdi 11, 6, 13
sldi 10, 7, 38
or 6, 11, 10 # s1
srdi 11, 7, 26
sldi 10, 8, 25
or 7, 11, 10 # s2
srdi 11, 8, 39
sldi 10, 9, 12
or 8, 11, 10 # s4
std 5, 0(3)
std 6, 8(3)
std 7, 16(3)
std 8, 24(3)
blr
SYM_FUNC_END(x25519_fe51_tobytes)
.align 5
SYM_FUNC_START(x25519_cswap)
li 7, 5
neg 6, 5
mtctr 7
.Lswap_loop:
ld 8, 0(3)
ld 9, 0(4)
xor 10, 8, 9
and 10, 10, 6
xor 11, 8, 10
xor 12, 9, 10
std 11, 0(3)
addi 3, 3, 8
std 12, 0(4)
addi 4, 4, 8
bdnz .Lswap_loop
blr
SYM_FUNC_END(x25519_cswap)

View file

@ -18,6 +18,7 @@ config CRYPTO_AES_NI_INTEL
depends on X86
select CRYPTO_AEAD
select CRYPTO_LIB_AES
select CRYPTO_LIB_GF128MUL
select CRYPTO_ALGAPI
select CRYPTO_SKCIPHER
select CRYPTO_SIMD

View file

@ -48,8 +48,12 @@ chacha-x86_64-$(CONFIG_AS_AVX512) += chacha-avx512vl-x86_64.o
obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o
aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o
aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o \
aes_ctrby8_avx-x86_64.o aes-xts-avx-x86_64.o
aesni-intel-$(CONFIG_64BIT) += aes_ctrby8_avx-x86_64.o \
aes-gcm-aesni-x86_64.o \
aes-xts-avx-x86_64.o
ifeq ($(CONFIG_AS_VAES)$(CONFIG_AS_VPCLMULQDQ),yy)
aesni-intel-$(CONFIG_64BIT) += aes-gcm-avx10-x86_64.o
endif
obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o
sha1-ssse3-y := sha1_avx2_x86_64_asm.o sha1_ssse3_asm.o sha1_ssse3_glue.o

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -195,6 +195,7 @@ module_init(crc32_pclmul_mod_init);
module_exit(crc32_pclmul_mod_fini);
MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
MODULE_DESCRIPTION("CRC32 algorithm (IEEE 802.3) accelerated with PCLMULQDQ");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("crc32");

View file

@ -1720,5 +1720,6 @@ module_exit(curve25519_mod_exit);
MODULE_ALIAS_CRYPTO("curve25519");
MODULE_ALIAS_CRYPTO("curve25519-x86");
MODULE_DESCRIPTION("Curve25519 algorithm, ADX optimized");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");

View file

@ -12,7 +12,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sizes.h>
#include <asm/intel-family.h>
#include <asm/cpu_device_id.h>
#include <asm/simd.h>
asmlinkage void poly1305_init_x86_64(void *ctx,
@ -269,7 +269,7 @@ static int __init poly1305_simd_mod_init(void)
boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_AVX512F) &&
cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | XFEATURE_MASK_AVX512, NULL) &&
/* Skylake downclocks unacceptably much when using zmm, but later generations are fast. */
boot_cpu_data.x86_model != INTEL_FAM6_SKYLAKE_X)
boot_cpu_data.x86_vfm != INTEL_SKYLAKE_X)
static_branch_enable(&poly1305_use_avx512);
return IS_REACHABLE(CONFIG_CRYPTO_HASH) ? crypto_register_shash(&alg) : 0;
}

View file

@ -5,6 +5,7 @@
* Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
*/
#include <asm/cpu_device_id.h>
#include <crypto/algapi.h>
#include <crypto/twofish.h>
#include <linux/crypto.h>
@ -107,10 +108,10 @@ static bool is_blacklisted_cpu(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return false;
if (boot_cpu_data.x86 == 0x06 &&
(boot_cpu_data.x86_model == 0x1c ||
boot_cpu_data.x86_model == 0x26 ||
boot_cpu_data.x86_model == 0x36)) {
switch (boot_cpu_data.x86_vfm) {
case INTEL_ATOM_BONNELL:
case INTEL_ATOM_BONNELL_MID:
case INTEL_ATOM_SALTWELL:
/*
* On Atom, twofish-3way is slower than original assembler
* implementation. Twofish-3way trades off some performance in

View file

@ -313,24 +313,6 @@ config CRYPTO_ECRDSA
One of the Russian cryptographic standard algorithms (called GOST
algorithms). Only signature verification is implemented.
config CRYPTO_SM2
tristate "SM2 (ShangMi 2)"
select CRYPTO_SM3
select CRYPTO_AKCIPHER
select CRYPTO_MANAGER
select MPILIB
select ASN1
help
SM2 (ShangMi 2) public key algorithm
Published by State Encryption Management Bureau, China,
as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012.
References:
https://datatracker.ietf.org/doc/draft-shen-sm2-ecdsa/
http://www.oscca.gov.cn/sca/xxgk/2010-12/17/content_1002386.shtml
http://www.gmbz.org.cn/main/bzlb.html
config CRYPTO_CURVE25519
tristate "Curve25519"
select CRYPTO_KPP

View file

@ -50,14 +50,6 @@ rsa_generic-y += rsa_helper.o
rsa_generic-y += rsa-pkcs1pad.o
obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
$(obj)/sm2signature.asn1.o: $(obj)/sm2signature.asn1.c $(obj)/sm2signature.asn1.h
$(obj)/sm2.o: $(obj)/sm2signature.asn1.h
sm2_generic-y += sm2signature.asn1.o
sm2_generic-y += sm2.o
obj-$(CONFIG_CRYPTO_SM2) += sm2_generic.o
$(obj)/ecdsasignature.asn1.o: $(obj)/ecdsasignature.asn1.c $(obj)/ecdsasignature.asn1.h
$(obj)/ecdsa.o: $(obj)/ecdsasignature.asn1.h
ecdsa_generic-y += ecdsa.o

View file

@ -1317,5 +1317,6 @@ static void __exit af_alg_exit(void)
module_init(af_alg_init);
module_exit(af_alg_exit);
MODULE_DESCRIPTION("Crypto userspace interface");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(AF_ALG);

View file

@ -1056,6 +1056,9 @@ EXPORT_SYMBOL_GPL(crypto_type_has_alg);
static void __init crypto_start_tests(void)
{
if (!IS_BUILTIN(CONFIG_CRYPTO_ALGAPI))
return;
if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))
return;

View file

@ -471,4 +471,5 @@ static void __exit algif_hash_exit(void)
module_init(algif_hash_init);
module_exit(algif_hash_exit);
MODULE_DESCRIPTION("Userspace interface for hash algorithms");
MODULE_LICENSE("GPL");

View file

@ -437,4 +437,5 @@ static void __exit algif_skcipher_exit(void)
module_init(algif_skcipher_init);
module_exit(algif_skcipher_exit);
MODULE_DESCRIPTION("Userspace interface for skcipher algorithms");
MODULE_LICENSE("GPL");

View file

@ -31,9 +31,9 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem);
BLOCKING_NOTIFIER_HEAD(crypto_chain);
EXPORT_SYMBOL_GPL(crypto_chain);
#ifndef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
#if IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) && \
!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS)
DEFINE_STATIC_KEY_FALSE(__crypto_boot_test_finished);
EXPORT_SYMBOL_GPL(__crypto_boot_test_finished);
#endif
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);

View file

@ -292,10 +292,6 @@ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen,
ctx->sinfo->sig->pkey_algo = "ecdsa";
ctx->sinfo->sig->encoding = "x962";
break;
case OID_SM2_with_SM3:
ctx->sinfo->sig->pkey_algo = "sm2";
ctx->sinfo->sig->encoding = "raw";
break;
case OID_gost2012PKey256:
case OID_gost2012PKey512:
ctx->sinfo->sig->pkey_algo = "ecrdsa";

View file

@ -124,13 +124,6 @@ software_key_determine_akcipher(const struct public_key *pkey,
strcmp(hash_algo, "sha3-384") != 0 &&
strcmp(hash_algo, "sha3-512") != 0)
return -EINVAL;
} else if (strcmp(pkey->pkey_algo, "sm2") == 0) {
if (strcmp(encoding, "raw") != 0)
return -EINVAL;
if (!hash_algo)
return -EINVAL;
if (strcmp(hash_algo, "sm3") != 0)
return -EINVAL;
} else if (strcmp(pkey->pkey_algo, "ecrdsa") == 0) {
if (strcmp(encoding, "raw") != 0)
return -EINVAL;

View file

@ -257,10 +257,6 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag,
case OID_gost2012Signature512:
ctx->cert->sig->hash_algo = "streebog512";
goto ecrdsa;
case OID_SM2_with_SM3:
ctx->cert->sig->hash_algo = "sm3";
goto sm2;
}
rsa_pkcs1:
@ -273,11 +269,6 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag,
ctx->cert->sig->encoding = "raw";
ctx->sig_algo = ctx->last_oid;
return 0;
sm2:
ctx->cert->sig->pkey_algo = "sm2";
ctx->cert->sig->encoding = "raw";
ctx->sig_algo = ctx->last_oid;
return 0;
ecdsa:
ctx->cert->sig->pkey_algo = "ecdsa";
ctx->cert->sig->encoding = "x962";
@ -309,7 +300,6 @@ int x509_note_signature(void *context, size_t hdrlen,
if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0 ||
strcmp(ctx->cert->sig->pkey_algo, "ecrdsa") == 0 ||
strcmp(ctx->cert->sig->pkey_algo, "sm2") == 0 ||
strcmp(ctx->cert->sig->pkey_algo, "ecdsa") == 0) {
/* Discard the BIT STRING metadata */
if (vlen < 1 || *(const u8 *)value != 0)
@ -514,17 +504,11 @@ int x509_extract_key_data(void *context, size_t hdrlen,
case OID_gost2012PKey512:
ctx->cert->pub->pkey_algo = "ecrdsa";
break;
case OID_sm2:
ctx->cert->pub->pkey_algo = "sm2";
break;
case OID_id_ecPublicKey:
if (parse_OID(ctx->params, ctx->params_size, &oid) != 0)
return -EBADMSG;
switch (oid) {
case OID_sm2:
ctx->cert->pub->pkey_algo = "sm2";
break;
case OID_id_prime192v1:
ctx->cert->pub->pkey_algo = "ecdsa-nist-p192";
break;

View file

@ -7,7 +7,6 @@
#define pr_fmt(fmt) "X.509: "fmt
#include <crypto/hash.h>
#include <crypto/sm2.h>
#include <keys/asymmetric-parser.h>
#include <keys/asymmetric-subtype.h>
#include <keys/system_keyring.h>
@ -64,20 +63,8 @@ int x509_get_sig_params(struct x509_certificate *cert)
desc->tfm = tfm;
if (strcmp(cert->pub->pkey_algo, "sm2") == 0) {
ret = strcmp(sig->hash_algo, "sm3") != 0 ? -EINVAL :
crypto_shash_init(desc) ?:
sm2_compute_z_digest(desc, cert->pub->key,
cert->pub->keylen, sig->digest) ?:
crypto_shash_init(desc) ?:
crypto_shash_update(desc, sig->digest,
sig->digest_size) ?:
crypto_shash_finup(desc, cert->tbs, cert->tbs_size,
sig->digest);
} else {
ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size,
sig->digest);
}
ret = crypto_shash_digest(desc, cert->tbs, cert->tbs_size,
sig->digest);
if (ret < 0)
goto error_2;

View file

@ -282,4 +282,5 @@ __visible const u32 cast_s4[256] = {
};
EXPORT_SYMBOL_GPL(cast_s4);
MODULE_DESCRIPTION("Common lookup tables for CAST-128 (cast5) and CAST-256 (cast6)");
MODULE_LICENSE("GPL");

View file

@ -87,4 +87,5 @@ module_exit(curve25519_exit);
MODULE_ALIAS_CRYPTO("curve25519");
MODULE_ALIAS_CRYPTO("curve25519-generic");
MODULE_DESCRIPTION("Curve25519 elliptic curve (RFC7748)");
MODULE_LICENSE("GPL");

View file

@ -311,3 +311,4 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Deflate Compression Algorithm for IPCOMP");
MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
MODULE_ALIAS_CRYPTO("deflate");
MODULE_ALIAS_CRYPTO("deflate-generic");

View file

@ -78,7 +78,7 @@ void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes,
/* diff > 0: not enough input bytes: set most significant digits to 0 */
if (diff > 0) {
ndigits -= diff;
memset(&out[ndigits - 1], 0, diff * sizeof(u64));
memset(&out[ndigits], 0, diff * sizeof(u64));
}
if (o) {
@ -1715,4 +1715,5 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
}
EXPORT_SYMBOL(crypto_ecdh_shared_secret);
MODULE_DESCRIPTION("core elliptic curve module");
MODULE_LICENSE("Dual BSD/GPL");

View file

@ -38,7 +38,6 @@ static int ecdsa_get_signature_rs(u64 *dest, size_t hdrlen, unsigned char tag,
size_t bufsize = ndigits * sizeof(u64);
ssize_t diff = vlen - bufsize;
const char *d = value;
u8 rs[ECC_MAX_BYTES];
if (!value || !vlen)
return -EINVAL;
@ -46,7 +45,7 @@ static int ecdsa_get_signature_rs(u64 *dest, size_t hdrlen, unsigned char tag,
/* diff = 0: 'value' has exacly the right size
* diff > 0: 'value' has too many bytes; one leading zero is allowed that
* makes the value a positive integer; error on more
* diff < 0: 'value' is missing leading zeros, which we add
* diff < 0: 'value' is missing leading zeros
*/
if (diff > 0) {
/* skip over leading zeros that make 'value' a positive int */
@ -61,14 +60,7 @@ static int ecdsa_get_signature_rs(u64 *dest, size_t hdrlen, unsigned char tag,
if (-diff >= bufsize)
return -EINVAL;
if (diff) {
/* leading zeros not given in 'value' */
memset(rs, 0, -diff);
}
memcpy(&rs[-diff], d, vlen);
ecc_swap_digits((u64 *)rs, dest, ndigits);
ecc_digits_from_bytes(d, vlen, dest, ndigits);
return 0;
}
@ -142,10 +134,8 @@ static int ecdsa_verify(struct akcipher_request *req)
struct ecdsa_signature_ctx sig_ctx = {
.curve = ctx->curve,
};
u8 rawhash[ECC_MAX_BYTES];
u64 hash[ECC_MAX_DIGITS];
unsigned char *buffer;
ssize_t diff;
int ret;
if (unlikely(!ctx->pub_key_set))
@ -164,18 +154,11 @@ static int ecdsa_verify(struct akcipher_request *req)
if (ret < 0)
goto error;
/* if the hash is shorter then we will add leading zeros to fit to ndigits */
diff = bufsize - req->dst_len;
if (diff >= 0) {
if (diff)
memset(rawhash, 0, diff);
memcpy(&rawhash[diff], buffer + req->src_len, req->dst_len);
} else if (diff < 0) {
/* given hash is longer, we take the left-most bytes */
memcpy(&rawhash, buffer + req->src_len, bufsize);
}
if (bufsize > req->dst_len)
bufsize = req->dst_len;
ecc_swap_digits((u64 *)rawhash, hash, ctx->curve->g.ndigits);
ecc_digits_from_bytes(buffer + req->src_len, bufsize,
hash, ctx->curve->g.ndigits);
ret = _ecdsa_verify(ctx, hash, sig_ctx.r, sig_ctx.s);
@ -215,9 +198,8 @@ static int ecdsa_ecc_ctx_reset(struct ecc_ctx *ctx)
}
/*
* Set the public key given the raw uncompressed key data from an X509
* certificate. The key data contain the concatenated X and Y coordinates of
* the public key.
* Set the public ECC key as defined by RFC5480 section 2.2 "Subject Public
* Key". Only the uncompressed format is supported.
*/
static int ecdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen)
{

View file

@ -66,7 +66,8 @@ extern struct blocking_notifier_head crypto_chain;
int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
#if !IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) || \
IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS)
static inline bool crypto_boot_test_finished(void)
{
return true;
@ -84,7 +85,9 @@ static inline void set_crypto_boot_test_finished(void)
{
static_branch_enable(&__crypto_boot_test_finished);
}
#endif /* !CONFIG_CRYPTO_MANAGER_DISABLE_TESTS */
#endif /* !IS_BUILTIN(CONFIG_CRYPTO_ALGAPI) ||
* IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS)
*/
#ifdef CONFIG_PROC_FS
void __init crypto_init_proc(void);

View file

@ -523,4 +523,5 @@ void simd_unregister_aeads(struct aead_alg *algs, int count,
}
EXPORT_SYMBOL_GPL(simd_unregister_aeads);
MODULE_DESCRIPTION("Shared crypto SIMD helpers");
MODULE_LICENSE("GPL");

View file

@ -1,498 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SM2 asymmetric public-key algorithm
* as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012 SM2 and
* described at https://tools.ietf.org/html/draft-shen-sm2-ecdsa-02
*
* Copyright (c) 2020, Alibaba Group.
* Authors: Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
*/
#include <linux/module.h>
#include <linux/mpi.h>
#include <crypto/internal/akcipher.h>
#include <crypto/akcipher.h>
#include <crypto/hash.h>
#include <crypto/rng.h>
#include <crypto/sm2.h>
#include "sm2signature.asn1.h"
/* The default user id as specified in GM/T 0009-2012 */
#define SM2_DEFAULT_USERID "1234567812345678"
#define SM2_DEFAULT_USERID_LEN 16
#define MPI_NBYTES(m) ((mpi_get_nbits(m) + 7) / 8)
struct ecc_domain_parms {
const char *desc; /* Description of the curve. */
unsigned int nbits; /* Number of bits. */
unsigned int fips:1; /* True if this is a FIPS140-2 approved curve */
/* The model describing this curve. This is mainly used to select
* the group equation.
*/
enum gcry_mpi_ec_models model;
/* The actual ECC dialect used. This is used for curve specific
* optimizations and to select encodings etc.
*/
enum ecc_dialects dialect;
const char *p; /* The prime defining the field. */
const char *a, *b; /* The coefficients. For Twisted Edwards
* Curves b is used for d. For Montgomery
* Curves (a,b) has ((A-2)/4,B^-1).
*/
const char *n; /* The order of the base point. */
const char *g_x, *g_y; /* Base point. */
unsigned int h; /* Cofactor. */
};
static const struct ecc_domain_parms sm2_ecp = {
.desc = "sm2p256v1",
.nbits = 256,
.fips = 0,
.model = MPI_EC_WEIERSTRASS,
.dialect = ECC_DIALECT_STANDARD,
.p = "0xfffffffeffffffffffffffffffffffffffffffff00000000ffffffffffffffff",
.a = "0xfffffffeffffffffffffffffffffffffffffffff00000000fffffffffffffffc",
.b = "0x28e9fa9e9d9f5e344d5a9e4bcf6509a7f39789f515ab8f92ddbcbd414d940e93",
.n = "0xfffffffeffffffffffffffffffffffff7203df6b21c6052b53bbf40939d54123",
.g_x = "0x32c4ae2c1f1981195f9904466a39c9948fe30bbff2660be1715a4589334c74c7",
.g_y = "0xbc3736a2f4f6779c59bdcee36b692153d0a9877cc62a474002df32e52139f0a0",
.h = 1
};
static int __sm2_set_pub_key(struct mpi_ec_ctx *ec,
const void *key, unsigned int keylen);
static int sm2_ec_ctx_init(struct mpi_ec_ctx *ec)
{
const struct ecc_domain_parms *ecp = &sm2_ecp;
MPI p, a, b;
MPI x, y;
int rc = -EINVAL;
p = mpi_scanval(ecp->p);
a = mpi_scanval(ecp->a);
b = mpi_scanval(ecp->b);
if (!p || !a || !b)
goto free_p;
x = mpi_scanval(ecp->g_x);
y = mpi_scanval(ecp->g_y);
if (!x || !y)
goto free;
rc = -ENOMEM;
ec->Q = mpi_point_new(0);
if (!ec->Q)
goto free;
/* mpi_ec_setup_elliptic_curve */
ec->G = mpi_point_new(0);
if (!ec->G) {
mpi_point_release(ec->Q);
goto free;
}
mpi_set(ec->G->x, x);
mpi_set(ec->G->y, y);
mpi_set_ui(ec->G->z, 1);
rc = -EINVAL;
ec->n = mpi_scanval(ecp->n);
if (!ec->n) {
mpi_point_release(ec->Q);
mpi_point_release(ec->G);
goto free;
}
ec->h = ecp->h;
ec->name = ecp->desc;
mpi_ec_init(ec, ecp->model, ecp->dialect, 0, p, a, b);
rc = 0;
free:
mpi_free(x);
mpi_free(y);
free_p:
mpi_free(p);
mpi_free(a);
mpi_free(b);
return rc;
}
static void sm2_ec_ctx_deinit(struct mpi_ec_ctx *ec)
{
mpi_ec_deinit(ec);
memset(ec, 0, sizeof(*ec));
}
/* RESULT must have been initialized and is set on success to the
* point given by VALUE.
*/
static int sm2_ecc_os2ec(MPI_POINT result, MPI value)
{
int rc;
size_t n;
unsigned char *buf;
MPI x, y;
n = MPI_NBYTES(value);
buf = kmalloc(n, GFP_KERNEL);
if (!buf)
return -ENOMEM;
rc = mpi_print(GCRYMPI_FMT_USG, buf, n, &n, value);
if (rc)
goto err_freebuf;
rc = -EINVAL;
if (n < 1 || ((n - 1) % 2))
goto err_freebuf;
/* No support for point compression */
if (*buf != 0x4)
goto err_freebuf;
rc = -ENOMEM;
n = (n - 1) / 2;
x = mpi_read_raw_data(buf + 1, n);
if (!x)
goto err_freebuf;
y = mpi_read_raw_data(buf + 1 + n, n);
if (!y)
goto err_freex;
mpi_normalize(x);
mpi_normalize(y);
mpi_set(result->x, x);
mpi_set(result->y, y);
mpi_set_ui(result->z, 1);
rc = 0;
mpi_free(y);
err_freex:
mpi_free(x);
err_freebuf:
kfree(buf);
return rc;
}
struct sm2_signature_ctx {
MPI sig_r;
MPI sig_s;
};
int sm2_get_signature_r(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct sm2_signature_ctx *sig = context;
if (!value || !vlen)
return -EINVAL;
sig->sig_r = mpi_read_raw_data(value, vlen);
if (!sig->sig_r)
return -ENOMEM;
return 0;
}
int sm2_get_signature_s(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct sm2_signature_ctx *sig = context;
if (!value || !vlen)
return -EINVAL;
sig->sig_s = mpi_read_raw_data(value, vlen);
if (!sig->sig_s)
return -ENOMEM;
return 0;
}
static int sm2_z_digest_update(struct shash_desc *desc,
MPI m, unsigned int pbytes)
{
static const unsigned char zero[32];
unsigned char *in;
unsigned int inlen;
int err;
in = mpi_get_buffer(m, &inlen, NULL);
if (!in)
return -EINVAL;
if (inlen < pbytes) {
/* padding with zero */
err = crypto_shash_update(desc, zero, pbytes - inlen) ?:
crypto_shash_update(desc, in, inlen);
} else if (inlen > pbytes) {
/* skip the starting zero */
err = crypto_shash_update(desc, in + inlen - pbytes, pbytes);
} else {
err = crypto_shash_update(desc, in, inlen);
}
kfree(in);
return err;
}
static int sm2_z_digest_update_point(struct shash_desc *desc,
MPI_POINT point, struct mpi_ec_ctx *ec,
unsigned int pbytes)
{
MPI x, y;
int ret = -EINVAL;
x = mpi_new(0);
y = mpi_new(0);
ret = mpi_ec_get_affine(x, y, point, ec) ? -EINVAL :
sm2_z_digest_update(desc, x, pbytes) ?:
sm2_z_digest_update(desc, y, pbytes);
mpi_free(x);
mpi_free(y);
return ret;
}
int sm2_compute_z_digest(struct shash_desc *desc,
const void *key, unsigned int keylen, void *dgst)
{
struct mpi_ec_ctx *ec;
unsigned int bits_len;
unsigned int pbytes;
u8 entl[2];
int err;
ec = kmalloc(sizeof(*ec), GFP_KERNEL);
if (!ec)
return -ENOMEM;
err = sm2_ec_ctx_init(ec);
if (err)
goto out_free_ec;
err = __sm2_set_pub_key(ec, key, keylen);
if (err)
goto out_deinit_ec;
bits_len = SM2_DEFAULT_USERID_LEN * 8;
entl[0] = bits_len >> 8;
entl[1] = bits_len & 0xff;
pbytes = MPI_NBYTES(ec->p);
/* ZA = H256(ENTLA | IDA | a | b | xG | yG | xA | yA) */
err = crypto_shash_init(desc);
if (err)
goto out_deinit_ec;
err = crypto_shash_update(desc, entl, 2);
if (err)
goto out_deinit_ec;
err = crypto_shash_update(desc, SM2_DEFAULT_USERID,
SM2_DEFAULT_USERID_LEN);
if (err)
goto out_deinit_ec;
err = sm2_z_digest_update(desc, ec->a, pbytes) ?:
sm2_z_digest_update(desc, ec->b, pbytes) ?:
sm2_z_digest_update_point(desc, ec->G, ec, pbytes) ?:
sm2_z_digest_update_point(desc, ec->Q, ec, pbytes);
if (err)
goto out_deinit_ec;
err = crypto_shash_final(desc, dgst);
out_deinit_ec:
sm2_ec_ctx_deinit(ec);
out_free_ec:
kfree(ec);
return err;
}
EXPORT_SYMBOL_GPL(sm2_compute_z_digest);
static int _sm2_verify(struct mpi_ec_ctx *ec, MPI hash, MPI sig_r, MPI sig_s)
{
int rc = -EINVAL;
struct gcry_mpi_point sG, tP;
MPI t = NULL;
MPI x1 = NULL, y1 = NULL;
mpi_point_init(&sG);
mpi_point_init(&tP);
x1 = mpi_new(0);
y1 = mpi_new(0);
t = mpi_new(0);
/* r, s in [1, n-1] */
if (mpi_cmp_ui(sig_r, 1) < 0 || mpi_cmp(sig_r, ec->n) > 0 ||
mpi_cmp_ui(sig_s, 1) < 0 || mpi_cmp(sig_s, ec->n) > 0) {
goto leave;
}
/* t = (r + s) % n, t == 0 */
mpi_addm(t, sig_r, sig_s, ec->n);
if (mpi_cmp_ui(t, 0) == 0)
goto leave;
/* sG + tP = (x1, y1) */
rc = -EBADMSG;
mpi_ec_mul_point(&sG, sig_s, ec->G, ec);
mpi_ec_mul_point(&tP, t, ec->Q, ec);
mpi_ec_add_points(&sG, &sG, &tP, ec);
if (mpi_ec_get_affine(x1, y1, &sG, ec))
goto leave;
/* R = (e + x1) % n */
mpi_addm(t, hash, x1, ec->n);
/* check R == r */
rc = -EKEYREJECTED;
if (mpi_cmp(t, sig_r))
goto leave;
rc = 0;
leave:
mpi_point_free_parts(&sG);
mpi_point_free_parts(&tP);
mpi_free(x1);
mpi_free(y1);
mpi_free(t);
return rc;
}
static int sm2_verify(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm);
unsigned char *buffer;
struct sm2_signature_ctx sig;
MPI hash;
int ret;
if (unlikely(!ec->Q))
return -EINVAL;
buffer = kmalloc(req->src_len + req->dst_len, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
sg_pcopy_to_buffer(req->src,
sg_nents_for_len(req->src, req->src_len + req->dst_len),
buffer, req->src_len + req->dst_len, 0);
sig.sig_r = NULL;
sig.sig_s = NULL;
ret = asn1_ber_decoder(&sm2signature_decoder, &sig,
buffer, req->src_len);
if (ret)
goto error;
ret = -ENOMEM;
hash = mpi_read_raw_data(buffer + req->src_len, req->dst_len);
if (!hash)
goto error;
ret = _sm2_verify(ec, hash, sig.sig_r, sig.sig_s);
mpi_free(hash);
error:
mpi_free(sig.sig_r);
mpi_free(sig.sig_s);
kfree(buffer);
return ret;
}
static int sm2_set_pub_key(struct crypto_akcipher *tfm,
const void *key, unsigned int keylen)
{
struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm);
return __sm2_set_pub_key(ec, key, keylen);
}
static int __sm2_set_pub_key(struct mpi_ec_ctx *ec,
const void *key, unsigned int keylen)
{
MPI a;
int rc;
/* include the uncompressed flag '0x04' */
a = mpi_read_raw_data(key, keylen);
if (!a)
return -ENOMEM;
mpi_normalize(a);
rc = sm2_ecc_os2ec(ec->Q, a);
mpi_free(a);
return rc;
}
static unsigned int sm2_max_size(struct crypto_akcipher *tfm)
{
/* Unlimited max size */
return PAGE_SIZE;
}
static int sm2_init_tfm(struct crypto_akcipher *tfm)
{
struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm);
return sm2_ec_ctx_init(ec);
}
static void sm2_exit_tfm(struct crypto_akcipher *tfm)
{
struct mpi_ec_ctx *ec = akcipher_tfm_ctx(tfm);
sm2_ec_ctx_deinit(ec);
}
static struct akcipher_alg sm2 = {
.verify = sm2_verify,
.set_pub_key = sm2_set_pub_key,
.max_size = sm2_max_size,
.init = sm2_init_tfm,
.exit = sm2_exit_tfm,
.base = {
.cra_name = "sm2",
.cra_driver_name = "sm2-generic",
.cra_priority = 100,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct mpi_ec_ctx),
},
};
static int __init sm2_init(void)
{
return crypto_register_akcipher(&sm2);
}
static void __exit sm2_exit(void)
{
crypto_unregister_akcipher(&sm2);
}
subsys_initcall(sm2_init);
module_exit(sm2_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tianjia Zhang <tianjia.zhang@linux.alibaba.com>");
MODULE_DESCRIPTION("SM2 generic algorithm");
MODULE_ALIAS_CRYPTO("sm2-generic");

View file

@ -1,4 +0,0 @@
Sm2Signature ::= SEQUENCE {
sig_r INTEGER ({ sm2_get_signature_r }),
sig_s INTEGER ({ sm2_get_signature_s })
}

View file

@ -2613,6 +2613,15 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
break;
case 600:
if (alg) {
u8 speed_template[2] = {klen, 0};
test_mb_skcipher_speed(alg, ENCRYPT, sec, NULL, 0,
speed_template, num_mb);
test_mb_skcipher_speed(alg, DECRYPT, sec, NULL, 0,
speed_template, num_mb);
break;
}
test_mb_skcipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("ecb(aes)", DECRYPT, sec, NULL, 0,

View file

@ -293,6 +293,10 @@ struct test_sg_division {
* the @key_offset
* @finalization_type: what finalization function to use for hashes
* @nosimd: execute with SIMD disabled? Requires !CRYPTO_TFM_REQ_MAY_SLEEP.
* This applies to the parts of the operation that aren't controlled
* individually by @nosimd_setkey or @src_divs[].nosimd.
* @nosimd_setkey: set the key (if applicable) with SIMD disabled? Requires
* !CRYPTO_TFM_REQ_MAY_SLEEP.
*/
struct testvec_config {
const char *name;
@ -306,6 +310,7 @@ struct testvec_config {
bool key_offset_relative_to_alignmask;
enum finalization_type finalization_type;
bool nosimd;
bool nosimd_setkey;
};
#define TESTVEC_CONFIG_NAMELEN 192
@ -533,7 +538,8 @@ static bool valid_testvec_config(const struct testvec_config *cfg)
cfg->finalization_type == FINALIZATION_TYPE_DIGEST)
return false;
if ((cfg->nosimd || (flags & SGDIVS_HAVE_NOSIMD)) &&
if ((cfg->nosimd || cfg->nosimd_setkey ||
(flags & SGDIVS_HAVE_NOSIMD)) &&
(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP))
return false;
@ -841,7 +847,10 @@ static int prepare_keybuf(const u8 *key, unsigned int ksize,
return 0;
}
/* Like setkey_f(tfm, key, ksize), but sometimes misalign the key */
/*
* Like setkey_f(tfm, key, ksize), but sometimes misalign the key.
* In addition, run the setkey function in no-SIMD context if requested.
*/
#define do_setkey(setkey_f, tfm, key, ksize, cfg, alignmask) \
({ \
const u8 *keybuf, *keyptr; \
@ -850,7 +859,11 @@ static int prepare_keybuf(const u8 *key, unsigned int ksize,
err = prepare_keybuf((key), (ksize), (cfg), (alignmask), \
&keybuf, &keyptr); \
if (err == 0) { \
if ((cfg)->nosimd_setkey) \
crypto_disable_simd_for_test(); \
err = setkey_f((tfm), keyptr, (ksize)); \
if ((cfg)->nosimd_setkey) \
crypto_reenable_simd_for_test(); \
kfree(keybuf); \
} \
err; \
@ -903,14 +916,20 @@ static unsigned int generate_random_length(struct rnd_state *rng,
switch (prandom_u32_below(rng, 4)) {
case 0:
return len % 64;
len %= 64;
break;
case 1:
return len % 256;
len %= 256;
break;
case 2:
return len % 1024;
len %= 1024;
break;
default:
return len;
break;
}
if (len && prandom_u32_below(rng, 4) == 0)
len = rounddown_pow_of_two(len);
return len;
}
/* Flip a random bit in the given nonempty data buffer */
@ -1006,6 +1025,8 @@ static char *generate_random_sgl_divisions(struct rnd_state *rng,
if (div == &divs[max_divs - 1] || prandom_bool(rng))
this_len = remaining;
else if (prandom_u32_below(rng, 4) == 0)
this_len = (remaining + 1) / 2;
else
this_len = prandom_u32_inclusive(rng, 1, remaining);
div->proportion_of_total = this_len;
@ -1118,9 +1139,15 @@ static void generate_random_testvec_config(struct rnd_state *rng,
break;
}
if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) && prandom_bool(rng)) {
cfg->nosimd = true;
p += scnprintf(p, end - p, " nosimd");
if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP)) {
if (prandom_bool(rng)) {
cfg->nosimd = true;
p += scnprintf(p, end - p, " nosimd");
}
if (prandom_bool(rng)) {
cfg->nosimd_setkey = true;
p += scnprintf(p, end - p, " nosimd_setkey");
}
}
p += scnprintf(p, end - p, " src_divs=[");
@ -5589,12 +5616,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.suite = {
.hash = __VECS(sha512_tv_template)
}
}, {
.alg = "sm2",
.test = alg_test_akcipher,
.suite = {
.akcipher = __VECS(sm2_tv_template)
}
}, {
.alg = "sm3",
.test = alg_test_hash,

View file

@ -5774,65 +5774,6 @@ static const struct hash_testvec hmac_streebog512_tv_template[] = {
},
};
/*
* SM2 test vectors.
*/
static const struct akcipher_testvec sm2_tv_template[] = {
{ /* Generated from openssl */
.key =
"\x04"
"\x8e\xa0\x33\x69\x91\x7e\x3d\xec\xad\x8e\xf0\x45\x5e\x13\x3e\x68"
"\x5b\x8c\xab\x5c\xc6\xc8\x50\xdf\x91\x00\xe0\x24\x73\x4d\x31\xf2"
"\x2e\xc0\xd5\x6b\xee\xda\x98\x93\xec\xd8\x36\xaa\xb9\xcf\x63\x82"
"\xef\xa7\x1a\x03\xed\x16\xba\x74\xb8\x8b\xf9\xe5\x70\x39\xa4\x70",
.key_len = 65,
.param_len = 0,
.c =
"\x30\x45"
"\x02\x20"
"\x70\xab\xb6\x7d\xd6\x54\x80\x64\x42\x7e\x2d\x05\x08\x36\xc9\x96"
"\x25\xc2\xbb\xff\x08\xe5\x43\x15\x5e\xf3\x06\xd9\x2b\x2f\x0a\x9f"
"\x02\x21"
"\x00"
"\xbf\x21\x5f\x7e\x5d\x3f\x1a\x4d\x8f\x84\xc2\xe9\xa6\x4c\xa4\x18"
"\xb2\xb8\x46\xf4\x32\x96\xfa\x57\xc6\x29\xd4\x89\xae\xcc\xda\xdb",
.c_size = 71,
.algo = OID_SM2_with_SM3,
.m =
"\x47\xa7\xbf\xd3\xda\xc4\x79\xee\xda\x8b\x4f\xe8\x40\x94\xd4\x32"
"\x8f\xf1\xcd\x68\x4d\xbd\x9b\x1d\xe0\xd8\x9a\x5d\xad\x85\x47\x5c",
.m_size = 32,
.public_key_vec = true,
.siggen_sigver_test = true,
},
{ /* From libgcrypt */
.key =
"\x04"
"\x87\x59\x38\x9a\x34\xaa\xad\x07\xec\xf4\xe0\xc8\xc2\x65\x0a\x44"
"\x59\xc8\xd9\x26\xee\x23\x78\x32\x4e\x02\x61\xc5\x25\x38\xcb\x47"
"\x75\x28\x10\x6b\x1e\x0b\x7c\x8d\xd5\xff\x29\xa9\xc8\x6a\x89\x06"
"\x56\x56\xeb\x33\x15\x4b\xc0\x55\x60\x91\xef\x8a\xc9\xd1\x7d\x78",
.key_len = 65,
.param_len = 0,
.c =
"\x30\x44"
"\x02\x20"
"\xd9\xec\xef\xe8\x5f\xee\x3c\x59\x57\x8e\x5b\xab\xb3\x02\xe1\x42"
"\x4b\x67\x2c\x0b\x26\xb6\x51\x2c\x3e\xfc\xc6\x49\xec\xfe\x89\xe5"
"\x02\x20"
"\x43\x45\xd0\xa5\xff\xe5\x13\x27\x26\xd0\xec\x37\xad\x24\x1e\x9a"
"\x71\x9a\xa4\x89\xb0\x7e\x0f\xc4\xbb\x2d\x50\xd0\xe5\x7f\x7a\x68",
.c_size = 70,
.algo = OID_SM2_with_SM3,
.m =
"\x11\x22\x33\x44\x55\x66\x77\x88\x99\xaa\xbb\xcc\xdd\xee\xff\x00"
"\x12\x34\x56\x78\x9a\xbc\xde\xf0\x12\x34\x56\x78\x9a\xbc\xde\xf0",
.m_size = 32,
.public_key_vec = true,
.siggen_sigver_test = true,
},
};
/* Example vectors below taken from
* http://www.oscca.gov.cn/UpFile/20101222141857786.pdf
*

View file

@ -165,6 +165,7 @@ calibrate_xor_blocks(void)
static __exit void xor_exit(void) { }
MODULE_DESCRIPTION("RAID-5 checksumming functions");
MODULE_LICENSE("GPL");
#ifndef MODULE

View file

@ -555,7 +555,6 @@ config HW_RANDOM_ARM_SMCCC_TRNG
config HW_RANDOM_CN10K
tristate "Marvell CN10K Random Number Generator support"
depends on HW_RANDOM && PCI && (ARM64 || (64BIT && COMPILE_TEST))
default HW_RANDOM
help
This driver provides support for the True Random Number
generator available in Marvell CN10K SoCs.

View file

@ -143,8 +143,10 @@ static int __init amd_rng_mod_init(void)
found:
err = pci_read_config_dword(pdev, 0x58, &pmbase);
if (err)
if (err) {
err = pcibios_err_to_errno(err);
goto put_dev;
}
pmbase &= 0x0000FF00;
if (pmbase == 0) {

View file

@ -118,4 +118,5 @@ module_platform_driver(smccc_trng_driver);
MODULE_ALIAS("platform:smccc_trng");
MODULE_AUTHOR("Andre Przywara");
MODULE_DESCRIPTION("Arm SMCCC TRNG firmware interface support");
MODULE_LICENSE("GPL");

View file

@ -266,4 +266,5 @@ static struct pci_driver cavium_rng_vf_driver = {
module_pci_driver(cavium_rng_vf_driver);
MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>");
MODULE_DESCRIPTION("Cavium ThunderX Random Number Generator VF support");
MODULE_LICENSE("GPL v2");

View file

@ -88,4 +88,5 @@ static struct pci_driver cavium_rng_pf_driver = {
module_pci_driver(cavium_rng_pf_driver);
MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>");
MODULE_DESCRIPTION("Cavium ThunderX Random Number Generator support");
MODULE_LICENSE("GPL v2");

View file

@ -161,7 +161,6 @@ static int hwrng_init(struct hwrng *rng)
reinit_completion(&rng->cleanup_done);
skip_init:
rng->quality = min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024);
current_quality = rng->quality; /* obsolete */
return 0;
@ -470,16 +469,6 @@ static struct attribute *rng_dev_attrs[] = {
ATTRIBUTE_GROUPS(rng_dev);
static void __exit unregister_miscdev(void)
{
misc_deregister(&rng_miscdev);
}
static int __init register_miscdev(void)
{
return misc_register(&rng_miscdev);
}
static int hwrng_fillfn(void *unused)
{
size_t entropy, entropy_credit = 0; /* in 1/1024 of a bit */
@ -545,6 +534,9 @@ int hwrng_register(struct hwrng *rng)
complete(&rng->cleanup_done);
init_completion(&rng->dying);
/* Adjust quality field to always have a proper value */
rng->quality = min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024);
if (!current_rng ||
(!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
/*
@ -668,7 +660,7 @@ static int __init hwrng_modinit(void)
return -ENOMEM;
}
ret = register_miscdev();
ret = misc_register(&rng_miscdev);
if (ret) {
kfree(rng_fillbuf);
kfree(rng_buffer);
@ -685,7 +677,7 @@ static void __exit hwrng_modexit(void)
kfree(rng_fillbuf);
mutex_unlock(&rng_mutex);
unregister_miscdev();
misc_deregister(&rng_miscdev);
}
fs_initcall(hwrng_modinit); /* depends on misc_register() */

View file

@ -10,6 +10,7 @@
* Krzysztof Kozłowski <krzk@kernel.org>
*/
#include <linux/arm-smccc.h>
#include <linux/clk.h>
#include <linux/crypto.h>
#include <linux/delay.h>
@ -22,46 +23,69 @@
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#define EXYNOS_TRNG_CLKDIV (0x0)
#define EXYNOS_TRNG_CLKDIV 0x0
#define EXYNOS_TRNG_CTRL (0x20)
#define EXYNOS_TRNG_CTRL_RNGEN BIT(31)
#define EXYNOS_TRNG_CTRL 0x20
#define EXYNOS_TRNG_CTRL_RNGEN BIT(31)
#define EXYNOS_TRNG_POST_CTRL (0x30)
#define EXYNOS_TRNG_ONLINE_CTRL (0x40)
#define EXYNOS_TRNG_ONLINE_STAT (0x44)
#define EXYNOS_TRNG_ONLINE_MAXCHI2 (0x48)
#define EXYNOS_TRNG_FIFO_CTRL (0x50)
#define EXYNOS_TRNG_FIFO_0 (0x80)
#define EXYNOS_TRNG_FIFO_1 (0x84)
#define EXYNOS_TRNG_FIFO_2 (0x88)
#define EXYNOS_TRNG_FIFO_3 (0x8c)
#define EXYNOS_TRNG_FIFO_4 (0x90)
#define EXYNOS_TRNG_FIFO_5 (0x94)
#define EXYNOS_TRNG_FIFO_6 (0x98)
#define EXYNOS_TRNG_FIFO_7 (0x9c)
#define EXYNOS_TRNG_FIFO_LEN (8)
#define EXYNOS_TRNG_CLOCK_RATE (500000)
#define EXYNOS_TRNG_POST_CTRL 0x30
#define EXYNOS_TRNG_ONLINE_CTRL 0x40
#define EXYNOS_TRNG_ONLINE_STAT 0x44
#define EXYNOS_TRNG_ONLINE_MAXCHI2 0x48
#define EXYNOS_TRNG_FIFO_CTRL 0x50
#define EXYNOS_TRNG_FIFO_0 0x80
#define EXYNOS_TRNG_FIFO_1 0x84
#define EXYNOS_TRNG_FIFO_2 0x88
#define EXYNOS_TRNG_FIFO_3 0x8c
#define EXYNOS_TRNG_FIFO_4 0x90
#define EXYNOS_TRNG_FIFO_5 0x94
#define EXYNOS_TRNG_FIFO_6 0x98
#define EXYNOS_TRNG_FIFO_7 0x9c
#define EXYNOS_TRNG_FIFO_LEN 8
#define EXYNOS_TRNG_CLOCK_RATE 500000
/* Driver feature flags */
#define EXYNOS_SMC BIT(0)
#define EXYNOS_SMC_CALL_VAL(func_num) \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
ARM_SMCCC_SMC_32, \
ARM_SMCCC_OWNER_SIP, \
func_num)
/* SMC command for DTRNG access */
#define SMC_CMD_RANDOM EXYNOS_SMC_CALL_VAL(0x1012)
/* SMC_CMD_RANDOM: arguments */
#define HWRNG_INIT 0x0
#define HWRNG_EXIT 0x1
#define HWRNG_GET_DATA 0x2
#define HWRNG_RESUME 0x3
/* SMC_CMD_RANDOM: return values */
#define HWRNG_RET_OK 0x0
#define HWRNG_RET_RETRY_ERROR 0x2
#define HWRNG_MAX_TRIES 100
struct exynos_trng_dev {
struct device *dev;
void __iomem *mem;
struct clk *clk;
struct hwrng rng;
struct device *dev;
void __iomem *mem;
struct clk *clk; /* operating clock */
struct clk *pclk; /* bus clock */
struct hwrng rng;
unsigned long flags;
};
static int exynos_trng_do_read(struct hwrng *rng, void *data, size_t max,
bool wait)
static int exynos_trng_do_read_reg(struct hwrng *rng, void *data, size_t max,
bool wait)
{
struct exynos_trng_dev *trng;
struct exynos_trng_dev *trng = (struct exynos_trng_dev *)rng->priv;
int val;
max = min_t(size_t, max, (EXYNOS_TRNG_FIFO_LEN * 4));
trng = (struct exynos_trng_dev *)rng->priv;
writel_relaxed(max * 8, trng->mem + EXYNOS_TRNG_FIFO_CTRL);
val = readl_poll_timeout(trng->mem + EXYNOS_TRNG_FIFO_CTRL, val,
val == 0, 200, 1000000);
@ -73,7 +97,40 @@ static int exynos_trng_do_read(struct hwrng *rng, void *data, size_t max,
return max;
}
static int exynos_trng_init(struct hwrng *rng)
static int exynos_trng_do_read_smc(struct hwrng *rng, void *data, size_t max,
bool wait)
{
struct arm_smccc_res res;
unsigned int copied = 0;
u32 *buf = data;
int tries = 0;
while (copied < max) {
arm_smccc_smc(SMC_CMD_RANDOM, HWRNG_GET_DATA, 0, 0, 0, 0, 0, 0,
&res);
switch (res.a0) {
case HWRNG_RET_OK:
*buf++ = res.a2;
*buf++ = res.a3;
copied += 8;
tries = 0;
break;
case HWRNG_RET_RETRY_ERROR:
if (!wait)
return copied;
if (++tries >= HWRNG_MAX_TRIES)
return copied;
cond_resched();
break;
default:
return -EIO;
}
}
return copied;
}
static int exynos_trng_init_reg(struct hwrng *rng)
{
struct exynos_trng_dev *trng = (struct exynos_trng_dev *)rng->priv;
unsigned long sss_rate;
@ -87,7 +144,7 @@ static int exynos_trng_init(struct hwrng *rng)
*/
val = sss_rate / (EXYNOS_TRNG_CLOCK_RATE * 2);
if (val > 0x7fff) {
dev_err(trng->dev, "clock divider too large: %d", val);
dev_err(trng->dev, "clock divider too large: %d\n", val);
return -ERANGE;
}
val = val << 1;
@ -106,6 +163,24 @@ static int exynos_trng_init(struct hwrng *rng)
return 0;
}
static int exynos_trng_init_smc(struct hwrng *rng)
{
struct exynos_trng_dev *trng = (struct exynos_trng_dev *)rng->priv;
struct arm_smccc_res res;
int ret = 0;
arm_smccc_smc(SMC_CMD_RANDOM, HWRNG_INIT, 0, 0, 0, 0, 0, 0, &res);
if (res.a0 != HWRNG_RET_OK) {
dev_err(trng->dev, "SMC command for TRNG init failed (%d)\n",
(int)res.a0);
ret = -EIO;
}
if ((int)res.a0 == -1)
dev_info(trng->dev, "Make sure LDFW is loaded by your BL\n");
return ret;
}
static int exynos_trng_probe(struct platform_device *pdev)
{
struct exynos_trng_dev *trng;
@ -115,21 +190,29 @@ static int exynos_trng_probe(struct platform_device *pdev)
if (!trng)
return ret;
platform_set_drvdata(pdev, trng);
trng->dev = &pdev->dev;
trng->flags = (unsigned long)device_get_match_data(&pdev->dev);
trng->rng.name = devm_kstrdup(&pdev->dev, dev_name(&pdev->dev),
GFP_KERNEL);
if (!trng->rng.name)
return ret;
trng->rng.init = exynos_trng_init;
trng->rng.read = exynos_trng_do_read;
trng->rng.priv = (unsigned long) trng;
trng->rng.priv = (unsigned long)trng;
platform_set_drvdata(pdev, trng);
trng->dev = &pdev->dev;
if (trng->flags & EXYNOS_SMC) {
trng->rng.init = exynos_trng_init_smc;
trng->rng.read = exynos_trng_do_read_smc;
} else {
trng->rng.init = exynos_trng_init_reg;
trng->rng.read = exynos_trng_do_read_reg;
trng->mem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(trng->mem))
return PTR_ERR(trng->mem);
trng->mem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(trng->mem))
return PTR_ERR(trng->mem);
}
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_resume_and_get(&pdev->dev);
@ -138,32 +221,30 @@ static int exynos_trng_probe(struct platform_device *pdev)
goto err_pm_get;
}
trng->clk = devm_clk_get(&pdev->dev, "secss");
trng->clk = devm_clk_get_enabled(&pdev->dev, "secss");
if (IS_ERR(trng->clk)) {
ret = PTR_ERR(trng->clk);
dev_err(&pdev->dev, "Could not get clock.\n");
ret = dev_err_probe(&pdev->dev, PTR_ERR(trng->clk),
"Could not get clock\n");
goto err_clock;
}
ret = clk_prepare_enable(trng->clk);
if (ret) {
dev_err(&pdev->dev, "Could not enable the clk.\n");
trng->pclk = devm_clk_get_optional_enabled(&pdev->dev, "pclk");
if (IS_ERR(trng->pclk)) {
ret = dev_err_probe(&pdev->dev, PTR_ERR(trng->pclk),
"Could not get pclk\n");
goto err_clock;
}
ret = devm_hwrng_register(&pdev->dev, &trng->rng);
if (ret) {
dev_err(&pdev->dev, "Could not register hwrng device.\n");
goto err_register;
goto err_clock;
}
dev_info(&pdev->dev, "Exynos True Random Number Generator.\n");
return 0;
err_register:
clk_disable_unprepare(trng->clk);
err_clock:
pm_runtime_put_noidle(&pdev->dev);
@ -175,9 +256,14 @@ static int exynos_trng_probe(struct platform_device *pdev)
static void exynos_trng_remove(struct platform_device *pdev)
{
struct exynos_trng_dev *trng = platform_get_drvdata(pdev);
struct exynos_trng_dev *trng = platform_get_drvdata(pdev);
clk_disable_unprepare(trng->clk);
if (trng->flags & EXYNOS_SMC) {
struct arm_smccc_res res;
arm_smccc_smc(SMC_CMD_RANDOM, HWRNG_EXIT, 0, 0, 0, 0, 0, 0,
&res);
}
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@ -185,6 +271,16 @@ static void exynos_trng_remove(struct platform_device *pdev)
static int exynos_trng_suspend(struct device *dev)
{
struct exynos_trng_dev *trng = dev_get_drvdata(dev);
struct arm_smccc_res res;
if (trng->flags & EXYNOS_SMC) {
arm_smccc_smc(SMC_CMD_RANDOM, HWRNG_EXIT, 0, 0, 0, 0, 0, 0,
&res);
if (res.a0 != HWRNG_RET_OK)
return -EIO;
}
pm_runtime_put_sync(dev);
return 0;
@ -192,6 +288,7 @@ static int exynos_trng_suspend(struct device *dev)
static int exynos_trng_resume(struct device *dev)
{
struct exynos_trng_dev *trng = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_resume_and_get(dev);
@ -200,15 +297,32 @@ static int exynos_trng_resume(struct device *dev)
return ret;
}
if (trng->flags & EXYNOS_SMC) {
struct arm_smccc_res res;
arm_smccc_smc(SMC_CMD_RANDOM, HWRNG_RESUME, 0, 0, 0, 0, 0, 0,
&res);
if (res.a0 != HWRNG_RET_OK)
return -EIO;
arm_smccc_smc(SMC_CMD_RANDOM, HWRNG_INIT, 0, 0, 0, 0, 0, 0,
&res);
if (res.a0 != HWRNG_RET_OK)
return -EIO;
}
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(exynos_trng_pm_ops, exynos_trng_suspend,
exynos_trng_resume);
exynos_trng_resume);
static const struct of_device_id exynos_trng_dt_match[] = {
{
.compatible = "samsung,exynos5250-trng",
}, {
.compatible = "samsung,exynos850-trng",
.data = (void *)EXYNOS_SMC,
},
{ },
};
@ -225,6 +339,7 @@ static struct platform_driver exynos_trng_driver = {
};
module_platform_driver(exynos_trng_driver);
MODULE_AUTHOR("Łukasz Stelmach");
MODULE_DESCRIPTION("H/W TRNG driver for Exynos chips");
MODULE_LICENSE("GPL v2");

View file

@ -564,4 +564,5 @@ static struct platform_driver omap_rng_driver = {
module_platform_driver(omap_rng_driver);
MODULE_ALIAS("platform:omap_rng");
MODULE_AUTHOR("Deepak Saxena (and others)");
MODULE_DESCRIPTION("RNG driver for TI OMAP CPU family");
MODULE_LICENSE("GPL");

View file

@ -178,4 +178,5 @@ module_platform_driver(omap3_rom_rng_driver);
MODULE_ALIAS("platform:omap3-rom-rng");
MODULE_AUTHOR("Juha Yrjola");
MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
MODULE_DESCRIPTION("RNG driver for TI OMAP3 CPU family");
MODULE_LICENSE("GPL");

View file

@ -70,6 +70,7 @@ struct stm32_rng_config {
struct stm32_rng_private {
struct hwrng rng;
struct device *dev;
void __iomem *base;
struct clk *clk;
struct reset_control *rst;
@ -99,7 +100,7 @@ struct stm32_rng_private {
*/
static int stm32_rng_conceal_seed_error_cond_reset(struct stm32_rng_private *priv)
{
struct device *dev = (struct device *)priv->rng.priv;
struct device *dev = priv->dev;
u32 sr = readl_relaxed(priv->base + RNG_SR);
u32 cr = readl_relaxed(priv->base + RNG_CR);
int err;
@ -171,7 +172,7 @@ static int stm32_rng_conceal_seed_error(struct hwrng *rng)
{
struct stm32_rng_private *priv = container_of(rng, struct stm32_rng_private, rng);
dev_dbg((struct device *)priv->rng.priv, "Concealing seed error\n");
dev_dbg(priv->dev, "Concealing seed error\n");
if (priv->data->has_cond_reset)
return stm32_rng_conceal_seed_error_cond_reset(priv);
@ -187,7 +188,9 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
int retval = 0, err = 0;
u32 sr;
pm_runtime_get_sync((struct device *) priv->rng.priv);
retval = pm_runtime_resume_and_get(priv->dev);
if (retval)
return retval;
if (readl_relaxed(priv->base + RNG_SR) & RNG_SR_SEIS)
stm32_rng_conceal_seed_error(rng);
@ -204,8 +207,7 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
sr, sr,
10, 50000);
if (err) {
dev_err((struct device *)priv->rng.priv,
"%s: timeout %x!\n", __func__, sr);
dev_err(priv->dev, "%s: timeout %x!\n", __func__, sr);
break;
}
} else if (!sr) {
@ -218,8 +220,7 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
err = stm32_rng_conceal_seed_error(rng);
i++;
if (err && i > RNG_NB_RECOVER_TRIES) {
dev_err((struct device *)priv->rng.priv,
"Couldn't recover from seed error\n");
dev_err(priv->dev, "Couldn't recover from seed error\n");
retval = -ENOTRECOVERABLE;
goto exit_rpm;
}
@ -237,8 +238,7 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
err = stm32_rng_conceal_seed_error(rng);
i++;
if (err && i > RNG_NB_RECOVER_TRIES) {
dev_err((struct device *)priv->rng.priv,
"Couldn't recover from seed error");
dev_err(priv->dev, "Couldn't recover from seed error");
retval = -ENOTRECOVERABLE;
goto exit_rpm;
}
@ -253,8 +253,8 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
}
exit_rpm:
pm_runtime_mark_last_busy((struct device *) priv->rng.priv);
pm_runtime_put_sync_autosuspend((struct device *) priv->rng.priv);
pm_runtime_mark_last_busy(priv->dev);
pm_runtime_put_sync_autosuspend(priv->dev);
return retval || !wait ? retval : -EIO;
}
@ -329,8 +329,7 @@ static int stm32_rng_init(struct hwrng *rng)
10, 50000);
if (err) {
clk_disable_unprepare(priv->clk);
dev_err((struct device *)priv->rng.priv,
"%s: timeout %x!\n", __func__, reg);
dev_err(priv->dev, "%s: timeout %x!\n", __func__, reg);
return -EINVAL;
}
} else {
@ -358,8 +357,7 @@ static int stm32_rng_init(struct hwrng *rng)
10, 100000);
if (err || (reg & ~RNG_SR_DRDY)) {
clk_disable_unprepare(priv->clk);
dev_err((struct device *)priv->rng.priv,
"%s: timeout:%x SR: %x!\n", __func__, err, reg);
dev_err(priv->dev, "%s: timeout:%x SR: %x!\n", __func__, err, reg);
return -EINVAL;
}
@ -465,8 +463,7 @@ static int __maybe_unused stm32_rng_resume(struct device *dev)
if (err) {
clk_disable_unprepare(priv->clk);
dev_err((struct device *)priv->rng.priv,
"%s: timeout:%x CR: %x!\n", __func__, err, reg);
dev_err(priv->dev, "%s: timeout:%x CR: %x!\n", __func__, err, reg);
return -EINVAL;
}
} else {
@ -520,7 +517,7 @@ static int stm32_rng_probe(struct platform_device *ofdev)
struct stm32_rng_private *priv;
struct resource *res;
priv = devm_kzalloc(dev, sizeof(struct stm32_rng_private), GFP_KERNEL);
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@ -541,6 +538,7 @@ static int stm32_rng_probe(struct platform_device *ofdev)
priv->ced = of_property_read_bool(np, "clock-error-detect");
priv->lock_conf = of_property_read_bool(np, "st,rng-lock-conf");
priv->dev = dev;
priv->data = of_device_get_match_data(dev);
if (!priv->data)
@ -551,7 +549,6 @@ static int stm32_rng_probe(struct platform_device *ofdev)
priv->rng.name = dev_driver_string(dev);
priv->rng.init = stm32_rng_init;
priv->rng.read = stm32_rng_read;
priv->rng.priv = (unsigned long) dev;
priv->rng.quality = 900;
pm_runtime_set_autosuspend_delay(dev, 100);

View file

@ -190,7 +190,7 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
err = -EFAULT;
goto theend;
}
cet->t_key = cpu_to_le32(rctx->addr_key);
cet->t_key = desc_addr_val_le32(ce, rctx->addr_key);
ivsize = crypto_skcipher_ivsize(tfm);
if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
@ -208,7 +208,7 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
err = -ENOMEM;
goto theend_iv;
}
cet->t_iv = cpu_to_le32(rctx->addr_iv);
cet->t_iv = desc_addr_val_le32(ce, rctx->addr_iv);
}
if (areq->src == areq->dst) {
@ -236,7 +236,7 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
len = areq->cryptlen;
for_each_sg(areq->src, sg, nr_sgs, i) {
cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg));
cet->t_src[i].addr = desc_addr_val_le32(ce, sg_dma_address(sg));
todo = min(len, sg_dma_len(sg));
cet->t_src[i].len = cpu_to_le32(todo / 4);
dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
@ -251,7 +251,7 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
len = areq->cryptlen;
for_each_sg(areq->dst, sg, nr_sgd, i) {
cet->t_dst[i].addr = cpu_to_le32(sg_dma_address(sg));
cet->t_dst[i].addr = desc_addr_val_le32(ce, sg_dma_address(sg));
todo = min(len, sg_dma_len(sg));
cet->t_dst[i].len = cpu_to_le32(todo / 4);
dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,

View file

@ -92,6 +92,30 @@ static const struct ce_variant ce_h6_variant = {
.trng = CE_ALG_TRNG_V2,
};
static const struct ce_variant ce_h616_variant = {
.alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
},
.alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256,
CE_ALG_SHA384, CE_ALG_SHA512
},
.op_mode = { CE_OP_ECB, CE_OP_CBC
},
.cipher_t_dlen_in_bytes = true,
.hash_t_dlen_in_bits = true,
.prng_t_dlen_in_bytes = true,
.trng_t_dlen_in_bytes = true,
.needs_word_addresses = true,
.ce_clks = {
{ "bus", 0, 200000000 },
{ "mod", 300000000, 0 },
{ "ram", 0, 400000000 },
{ "trng", 0, 0 },
},
.esr = ESR_H6,
.prng = CE_ALG_PRNG_V2,
.trng = CE_ALG_TRNG_V2,
};
static const struct ce_variant ce_a64_variant = {
.alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES,
},
@ -172,7 +196,7 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
writel(v, ce->base + CE_ICR);
reinit_completion(&ce->chanlist[flow].complete);
writel(ce->chanlist[flow].t_phy, ce->base + CE_TDQ);
writel(desc_addr_val(ce, ce->chanlist[flow].t_phy), ce->base + CE_TDQ);
ce->chanlist[flow].status = 0;
/* Be sure all data is written before enabling the task */
@ -1097,6 +1121,8 @@ static const struct of_device_id sun8i_ce_crypto_of_match_table[] = {
.data = &ce_h5_variant },
{ .compatible = "allwinner,sun50i-h6-crypto",
.data = &ce_h6_variant },
{ .compatible = "allwinner,sun50i-h616-crypto",
.data = &ce_h616_variant },
{}
};
MODULE_DEVICE_TABLE(of, sun8i_ce_crypto_of_match_table);

View file

@ -403,7 +403,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
len = areq->nbytes;
for_each_sg(areq->src, sg, nr_sgs, i) {
cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg));
cet->t_src[i].addr = desc_addr_val_le32(ce, sg_dma_address(sg));
todo = min(len, sg_dma_len(sg));
cet->t_src[i].len = cpu_to_le32(todo / 4);
len -= todo;
@ -414,7 +414,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
goto theend;
}
addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE);
cet->t_dst[0].addr = cpu_to_le32(addr_res);
cet->t_dst[0].addr = desc_addr_val_le32(ce, addr_res);
cet->t_dst[0].len = cpu_to_le32(digestsize / 4);
if (dma_mapping_error(ce->dev, addr_res)) {
dev_err(ce->dev, "DMA map dest\n");
@ -445,7 +445,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
}
addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE);
cet->t_src[i].addr = cpu_to_le32(addr_pad);
cet->t_src[i].addr = desc_addr_val_le32(ce, addr_pad);
cet->t_src[i].len = cpu_to_le32(j);
if (dma_mapping_error(ce->dev, addr_pad)) {
dev_err(ce->dev, "DMA error on padding SG\n");

View file

@ -132,10 +132,10 @@ int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src,
cet->t_sym_ctl = cpu_to_le32(sym);
cet->t_asym_ctl = 0;
cet->t_key = cpu_to_le32(dma_iv);
cet->t_iv = cpu_to_le32(dma_iv);
cet->t_key = desc_addr_val_le32(ce, dma_iv);
cet->t_iv = desc_addr_val_le32(ce, dma_iv);
cet->t_dst[0].addr = cpu_to_le32(dma_dst);
cet->t_dst[0].addr = desc_addr_val_le32(ce, dma_dst);
cet->t_dst[0].len = cpu_to_le32(todo / 4);
ce->chanlist[flow].timeout = 2000;

View file

@ -77,7 +77,7 @@ static int sun8i_ce_trng_read(struct hwrng *rng, void *data, size_t max, bool wa
cet->t_sym_ctl = 0;
cet->t_asym_ctl = 0;
cet->t_dst[0].addr = cpu_to_le32(dma_dst);
cet->t_dst[0].addr = desc_addr_val_le32(ce, dma_dst);
cet->t_dst[0].len = cpu_to_le32(todo / 4);
ce->chanlist[flow].timeout = todo;

View file

@ -149,6 +149,7 @@ struct ce_variant {
bool hash_t_dlen_in_bits;
bool prng_t_dlen_in_bytes;
bool trng_t_dlen_in_bytes;
bool needs_word_addresses;
struct ce_clock ce_clks[CE_MAX_CLOCKS];
int esr;
unsigned char prng;
@ -241,6 +242,20 @@ struct sun8i_ce_dev {
#endif
};
static inline u32 desc_addr_val(struct sun8i_ce_dev *dev, dma_addr_t addr)
{
if (dev->variant->needs_word_addresses)
return addr / 4;
return addr;
}
static inline __le32 desc_addr_val_le32(struct sun8i_ce_dev *dev,
dma_addr_t addr)
{
return cpu_to_le32(desc_addr_val(dev, addr));
}
/*
* struct sun8i_cipher_req_ctx - context for a skcipher request
* @op_dir: direction (encrypt vs decrypt) for this request

View file

@ -106,7 +106,7 @@ static int atmel_sha204a_otp_read(struct i2c_client *client, u16 addr, u8 *otp)
if (cmd.data[0] == 0xff) {
dev_err(&client->dev, "failed, device not ready\n");
return -ret;
return -EINVAL;
}
memcpy(otp, cmd.data+1, 4);
@ -232,4 +232,5 @@ module_init(atmel_sha204a_init);
module_exit(atmel_sha204a_exit);
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_DESCRIPTION("Microchip / Atmel SHA204A (I2C) driver");
MODULE_LICENSE("GPL v2");

View file

@ -2811,13 +2811,6 @@ static struct aead_alg aead_algos[] = {
#ifdef CONFIG_DEBUG_FS
struct dbgfs_u32 {
char *name;
mode_t mode;
u32 *flag;
char *desc;
};
static struct dentry *dbgfs_root;
static void artpec6_crypto_init_debugfs(void)

View file

@ -12,7 +12,8 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \
sev-dev.o \
tee-dev.o \
platform-access.o \
dbc.o
dbc.o \
hsti.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \

View file

@ -223,7 +223,7 @@ int dbc_dev_init(struct psp_device *psp)
dbc_dev->dev = dev;
dbc_dev->psp = psp;
if (PSP_CAPABILITY(psp, DBC_THRU_EXT)) {
if (psp->capability.dbc_thru_ext) {
dbc_dev->use_ext = true;
dbc_dev->payload_size = &dbc_dev->mbox->ext_req.header.payload_size;
dbc_dev->result = &dbc_dev->mbox->ext_req.header.status;

138
drivers/crypto/ccp/hsti.c Normal file
View file

@ -0,0 +1,138 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* AMD Secure Processor device driver, security attributes
*
* Copyright (C) 2023-2024 Advanced Micro Devices, Inc.
*
* Author: Mario Limonciello <mario.limonciello@amd.com>
*/
#include <linux/device.h>
#include "psp-dev.h"
#include "hsti.h"
#define PSP_CAPABILITY_PSP_SECURITY_OFFSET 8
struct hsti_request {
struct psp_req_buffer_hdr header;
u32 hsti;
} __packed;
#define security_attribute_show(name) \
static ssize_t name##_show(struct device *d, struct device_attribute *attr, \
char *buf) \
{ \
struct sp_device *sp = dev_get_drvdata(d); \
struct psp_device *psp = sp->psp_data; \
return sysfs_emit(buf, "%d\n", psp->capability.name); \
}
security_attribute_show(fused_part)
static DEVICE_ATTR_RO(fused_part);
security_attribute_show(debug_lock_on)
static DEVICE_ATTR_RO(debug_lock_on);
security_attribute_show(tsme_status)
static DEVICE_ATTR_RO(tsme_status);
security_attribute_show(anti_rollback_status)
static DEVICE_ATTR_RO(anti_rollback_status);
security_attribute_show(rpmc_production_enabled)
static DEVICE_ATTR_RO(rpmc_production_enabled);
security_attribute_show(rpmc_spirom_available)
static DEVICE_ATTR_RO(rpmc_spirom_available);
security_attribute_show(hsp_tpm_available)
static DEVICE_ATTR_RO(hsp_tpm_available);
security_attribute_show(rom_armor_enforced)
static DEVICE_ATTR_RO(rom_armor_enforced);
static struct attribute *psp_security_attrs[] = {
&dev_attr_fused_part.attr,
&dev_attr_debug_lock_on.attr,
&dev_attr_tsme_status.attr,
&dev_attr_anti_rollback_status.attr,
&dev_attr_rpmc_production_enabled.attr,
&dev_attr_rpmc_spirom_available.attr,
&dev_attr_hsp_tpm_available.attr,
&dev_attr_rom_armor_enforced.attr,
NULL
};
static umode_t psp_security_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
{
struct device *dev = kobj_to_dev(kobj);
struct sp_device *sp = dev_get_drvdata(dev);
struct psp_device *psp = sp->psp_data;
if (psp && psp->capability.security_reporting)
return 0444;
return 0;
}
struct attribute_group psp_security_attr_group = {
.attrs = psp_security_attrs,
.is_visible = psp_security_is_visible,
};
static int psp_poulate_hsti(struct psp_device *psp)
{
struct hsti_request *req;
int ret;
/* Are the security attributes already reported? */
if (psp->capability.security_reporting)
return 0;
/* Allocate command-response buffer */
req = kzalloc(sizeof(*req), GFP_KERNEL | __GFP_ZERO);
if (!req)
return -ENOMEM;
req->header.payload_size = sizeof(req);
ret = psp_send_platform_access_msg(PSP_CMD_HSTI_QUERY, (struct psp_request *)req);
if (ret)
goto out;
if (req->header.status != 0) {
dev_dbg(psp->dev, "failed to populate HSTI state: %d\n", req->header.status);
ret = -EINVAL;
goto out;
}
psp->capability.security_reporting = 1;
psp->capability.raw |= req->hsti << PSP_CAPABILITY_PSP_SECURITY_OFFSET;
out:
kfree(req);
return ret;
}
int psp_init_hsti(struct psp_device *psp)
{
int ret;
if (PSP_FEATURE(psp, HSTI)) {
ret = psp_poulate_hsti(psp);
if (ret)
return ret;
}
/*
* At this stage, if security information hasn't been populated by
* either the PSP or by the driver through the platform command,
* then there is nothing more to do.
*/
if (!psp->capability.security_reporting)
return 0;
if (psp->capability.tsme_status) {
if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
dev_notice(psp->dev, "psp: Both TSME and SME are active, SME is unnecessary when TSME is active.\n");
else
dev_notice(psp->dev, "psp: TSME enabled\n");
}
return 0;
}

17
drivers/crypto/ccp/hsti.h Normal file
View file

@ -0,0 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* AMD Secure Processor device driver, security attributes
*
* Copyright (C) 2023-2024 Advanced Micro Devices, Inc.
*
* Author: Mario Limonciello <mario.limonciello@amd.com>
*/
#ifndef __HSTI_H
#define __HSTI_H
extern struct attribute_group psp_security_attr_group;
int psp_init_hsti(struct psp_device *psp);
#endif /* __HSTI_H */

View file

@ -19,6 +19,7 @@
#include "tee-dev.h"
#include "platform-access.h"
#include "dbc.h"
#include "hsti.h"
struct psp_device *psp_master;
@ -154,16 +155,7 @@ static unsigned int psp_get_capability(struct psp_device *psp)
dev_notice(psp->dev, "psp: unable to access the device: you might be running a broken BIOS.\n");
return -ENODEV;
}
psp->capability = val;
/* Detect TSME and/or SME status */
if (PSP_CAPABILITY(psp, PSP_SECURITY_REPORTING) &&
psp->capability & (PSP_SECURITY_TSME_STATUS << PSP_CAPABILITY_PSP_SECURITY_OFFSET)) {
if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
dev_notice(psp->dev, "psp: Both TSME and SME are active, SME is unnecessary when TSME is active.\n");
else
dev_notice(psp->dev, "psp: TSME enabled\n");
}
psp->capability.raw = val;
return 0;
}
@ -171,7 +163,7 @@ static unsigned int psp_get_capability(struct psp_device *psp)
static int psp_check_sev_support(struct psp_device *psp)
{
/* Check if device supports SEV feature */
if (!PSP_CAPABILITY(psp, SEV)) {
if (!psp->capability.sev) {
dev_dbg(psp->dev, "psp does not support SEV\n");
return -ENODEV;
}
@ -182,7 +174,7 @@ static int psp_check_sev_support(struct psp_device *psp)
static int psp_check_tee_support(struct psp_device *psp)
{
/* Check if device supports TEE feature */
if (!PSP_CAPABILITY(psp, TEE)) {
if (!psp->capability.tee) {
dev_dbg(psp->dev, "psp does not support TEE\n");
return -ENODEV;
}
@ -214,12 +206,17 @@ static int psp_init(struct psp_device *psp)
/* dbc must come after platform access as it tests the feature */
if (PSP_FEATURE(psp, DBC) ||
PSP_CAPABILITY(psp, DBC_THRU_EXT)) {
psp->capability.dbc_thru_ext) {
ret = dbc_dev_init(psp);
if (ret)
return ret;
}
/* HSTI uses platform access on some systems. */
ret = psp_init_hsti(psp);
if (ret)
return ret;
return 0;
}

View file

@ -26,6 +26,29 @@ extern struct psp_device *psp_master;
typedef void (*psp_irq_handler_t)(int, void *, unsigned int);
union psp_cap_register {
unsigned int raw;
struct {
unsigned int sev :1,
tee :1,
dbc_thru_ext :1,
rsvd1 :4,
security_reporting :1,
fused_part :1,
rsvd2 :1,
debug_lock_on :1,
rsvd3 :2,
tsme_status :1,
rsvd4 :1,
anti_rollback_status :1,
rpmc_production_enabled :1,
rpmc_spirom_available :1,
hsp_tpm_available :1,
rom_armor_enforced :1,
rsvd5 :12;
};
};
struct psp_device {
struct list_head entry;
@ -46,7 +69,7 @@ struct psp_device {
void *platform_access_data;
void *dbc_data;
unsigned int capability;
union psp_cap_register capability;
};
void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
@ -55,27 +78,6 @@ void psp_clear_sev_irq_handler(struct psp_device *psp);
struct psp_device *psp_get_master_device(void);
#define PSP_CAPABILITY_SEV BIT(0)
#define PSP_CAPABILITY_TEE BIT(1)
#define PSP_CAPABILITY_DBC_THRU_EXT BIT(2)
#define PSP_CAPABILITY_PSP_SECURITY_REPORTING BIT(7)
#define PSP_CAPABILITY_PSP_SECURITY_OFFSET 8
/*
* The PSP doesn't directly store these bits in the capability register
* but instead copies them from the results of query command.
*
* The offsets from the query command are below, and shifted when used.
*/
#define PSP_SECURITY_FUSED_PART BIT(0)
#define PSP_SECURITY_DEBUG_LOCK_ON BIT(2)
#define PSP_SECURITY_TSME_STATUS BIT(5)
#define PSP_SECURITY_ANTI_ROLLBACK_STATUS BIT(7)
#define PSP_SECURITY_RPMC_PRODUCTION_ENABLED BIT(8)
#define PSP_SECURITY_RPMC_SPIROM_AVAILABLE BIT(9)
#define PSP_SECURITY_HSP_TPM_AVAILABLE BIT(10)
#define PSP_SECURITY_ROM_ARMOR_ENFORCED BIT(11)
/**
* enum psp_cmd - PSP mailbox commands
* @PSP_CMD_TEE_RING_INIT: Initialize TEE ring buffer

View file

@ -1642,10 +1642,16 @@ static int sev_update_firmware(struct device *dev)
static int __sev_snp_shutdown_locked(int *error, bool panic)
{
struct sev_device *sev = psp_master->sev_data;
struct psp_device *psp = psp_master;
struct sev_device *sev;
struct sev_data_snp_shutdown_ex data;
int ret;
if (!psp || !psp->sev_data)
return 0;
sev = psp->sev_data;
if (!sev->snp_initialized)
return 0;

View file

@ -29,8 +29,8 @@
#define CACHE_WB_NO_ALLOC 0xb7
#define PLATFORM_FEATURE_DBC 0x1
#define PLATFORM_FEATURE_HSTI 0x2
#define PSP_CAPABILITY(psp, cap) (psp->capability & PSP_CAPABILITY_##cap)
#define PSP_FEATURE(psp, feat) (psp->vdata && psp->vdata->platform_features & PLATFORM_FEATURE_##feat)
/* Structure to hold CCP device data */

View file

@ -24,6 +24,7 @@
#include "ccp-dev.h"
#include "psp-dev.h"
#include "hsti.h"
/* used for version string AA.BB.CC.DD */
#define AA GENMASK(31, 24)
@ -39,62 +40,6 @@ struct sp_pci {
};
static struct sp_device *sp_dev_master;
#define security_attribute_show(name, def) \
static ssize_t name##_show(struct device *d, struct device_attribute *attr, \
char *buf) \
{ \
struct sp_device *sp = dev_get_drvdata(d); \
struct psp_device *psp = sp->psp_data; \
int bit = PSP_SECURITY_##def << PSP_CAPABILITY_PSP_SECURITY_OFFSET; \
return sysfs_emit(buf, "%d\n", (psp->capability & bit) > 0); \
}
security_attribute_show(fused_part, FUSED_PART)
static DEVICE_ATTR_RO(fused_part);
security_attribute_show(debug_lock_on, DEBUG_LOCK_ON)
static DEVICE_ATTR_RO(debug_lock_on);
security_attribute_show(tsme_status, TSME_STATUS)
static DEVICE_ATTR_RO(tsme_status);
security_attribute_show(anti_rollback_status, ANTI_ROLLBACK_STATUS)
static DEVICE_ATTR_RO(anti_rollback_status);
security_attribute_show(rpmc_production_enabled, RPMC_PRODUCTION_ENABLED)
static DEVICE_ATTR_RO(rpmc_production_enabled);
security_attribute_show(rpmc_spirom_available, RPMC_SPIROM_AVAILABLE)
static DEVICE_ATTR_RO(rpmc_spirom_available);
security_attribute_show(hsp_tpm_available, HSP_TPM_AVAILABLE)
static DEVICE_ATTR_RO(hsp_tpm_available);
security_attribute_show(rom_armor_enforced, ROM_ARMOR_ENFORCED)
static DEVICE_ATTR_RO(rom_armor_enforced);
static struct attribute *psp_security_attrs[] = {
&dev_attr_fused_part.attr,
&dev_attr_debug_lock_on.attr,
&dev_attr_tsme_status.attr,
&dev_attr_anti_rollback_status.attr,
&dev_attr_rpmc_production_enabled.attr,
&dev_attr_rpmc_spirom_available.attr,
&dev_attr_hsp_tpm_available.attr,
&dev_attr_rom_armor_enforced.attr,
NULL
};
static umode_t psp_security_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
{
struct device *dev = kobj_to_dev(kobj);
struct sp_device *sp = dev_get_drvdata(dev);
struct psp_device *psp = sp->psp_data;
if (psp && PSP_CAPABILITY(psp, PSP_SECURITY_REPORTING))
return 0444;
return 0;
}
static struct attribute_group psp_security_attr_group = {
.attrs = psp_security_attrs,
.is_visible = psp_security_is_visible,
};
#define version_attribute_show(name, _offset) \
static ssize_t name##_show(struct device *d, struct device_attribute *attr, \
char *buf) \
@ -134,8 +79,7 @@ static umode_t psp_firmware_is_visible(struct kobject *kobj, struct attribute *a
psp->vdata->bootloader_info_reg)
val = ioread32(psp->io_regs + psp->vdata->bootloader_info_reg);
if (attr == &dev_attr_tee_version.attr &&
PSP_CAPABILITY(psp, TEE) &&
if (attr == &dev_attr_tee_version.attr && psp->capability.tee &&
psp->vdata->tee->info_reg)
val = ioread32(psp->io_regs + psp->vdata->tee->info_reg);
@ -152,7 +96,9 @@ static struct attribute_group psp_firmware_attr_group = {
};
static const struct attribute_group *psp_groups[] = {
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
&psp_security_attr_group,
#endif
&psp_firmware_attr_group,
NULL,
};
@ -451,10 +397,12 @@ static const struct psp_vdata pspv1 = {
static const struct psp_vdata pspv2 = {
.sev = &sevv2,
.platform_access = &pa_v1,
.bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
.feature_reg = 0x109fc, /* C2PMSG_63 */
.inten_reg = 0x10690, /* P2CMSG_INTEN */
.intsts_reg = 0x10694, /* P2CMSG_INTSTS */
.platform_features = PLATFORM_FEATURE_HSTI,
};
static const struct psp_vdata pspv3 = {
@ -467,7 +415,8 @@ static const struct psp_vdata pspv3 = {
.feature_reg = 0x109fc, /* C2PMSG_63 */
.inten_reg = 0x10690, /* P2CMSG_INTEN */
.intsts_reg = 0x10694, /* P2CMSG_INTSTS */
.platform_features = PLATFORM_FEATURE_DBC,
.platform_features = PLATFORM_FEATURE_DBC |
PLATFORM_FEATURE_HSTI,
};
static const struct psp_vdata pspv4 = {

View file

@ -261,12 +261,6 @@ static void cc_cipher_exit(struct crypto_tfm *tfm)
kfree_sensitive(ctx_p->user.key);
}
struct tdes_keys {
u8 key1[DES_KEY_SIZE];
u8 key2[DES_KEY_SIZE];
u8 key3[DES_KEY_SIZE];
};
static enum cc_hw_crypto_key cc_slot_to_hw_key(u8 slot_num)
{
switch (slot_num) {

View file

@ -495,16 +495,6 @@ struct hifn_crypt_command {
#define HIFN_CRYPT_CMD_SRCLEN_M 0xc000
#define HIFN_CRYPT_CMD_SRCLEN_S 14
/*
* Structure to help build up the command data structure.
*/
struct hifn_mac_command {
volatile __le16 masks;
volatile __le16 header_skip;
volatile __le16 source_count;
volatile __le16 reserved;
};
#define HIFN_MAC_CMD_ALG_MASK 0x0001
#define HIFN_MAC_CMD_ALG_SHA1 0x0000
#define HIFN_MAC_CMD_ALG_MD5 0x0001
@ -526,13 +516,6 @@ struct hifn_mac_command {
#define HIFN_MAC_CMD_POS_IPSEC 0x0200
#define HIFN_MAC_CMD_NEW_KEY 0x0800
struct hifn_comp_command {
volatile __le16 masks;
volatile __le16 header_skip;
volatile __le16 source_count;
volatile __le16 reserved;
};
#define HIFN_COMP_CMD_SRCLEN_M 0xc000
#define HIFN_COMP_CMD_SRCLEN_S 14
#define HIFN_COMP_CMD_ONE 0x0100 /* must be one */

View file

@ -3793,14 +3793,13 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
goto err_put_sync;
}
qm->vfs_num = num_vfs;
ret = pci_enable_sriov(pdev, num_vfs);
if (ret) {
pci_err(pdev, "Can't enable VF!\n");
qm_clear_vft_config(qm);
goto err_put_sync;
}
qm->vfs_num = num_vfs;
pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
@ -3822,7 +3821,6 @@ EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
{
struct hisi_qm *qm = pci_get_drvdata(pdev);
int ret;
if (pci_vfs_assigned(pdev)) {
pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
@ -3837,13 +3835,10 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
pci_disable_sriov(pdev);
ret = qm_clear_vft_config(qm);
if (ret)
return ret;
qm->vfs_num = 0;
qm_pm_put_sync(qm);
return 0;
return qm_clear_vft_config(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);

View file

@ -37,7 +37,7 @@
#define HZIP_QM_IDEL_STATUS 0x3040e4
#define HZIP_CORE_DFX_BASE 0x301000
#define HZIP_CLOCK_GATED_CONTL 0X301004
#define HZIP_CORE_DFX_DECOMP_BASE 0x304000
#define HZIP_CORE_DFX_COMP_0 0x302000
#define HZIP_CORE_DFX_COMP_1 0x303000
#define HZIP_CORE_DFX_DECOMP_0 0x304000
@ -48,6 +48,7 @@
#define HZIP_CORE_DFX_DECOMP_5 0x309000
#define HZIP_CORE_REGS_BASE_LEN 0xB0
#define HZIP_CORE_REGS_DFX_LEN 0x28
#define HZIP_CORE_ADDR_INTRVL 0x1000
#define HZIP_CORE_INT_SOURCE 0x3010A0
#define HZIP_CORE_INT_MASK_REG 0x3010A4
@ -269,28 +270,6 @@ static const u32 zip_pre_store_caps[] = {
ZIP_DEV_ALG_BITMAP,
};
enum {
HZIP_COMP_CORE0,
HZIP_COMP_CORE1,
HZIP_DECOMP_CORE0,
HZIP_DECOMP_CORE1,
HZIP_DECOMP_CORE2,
HZIP_DECOMP_CORE3,
HZIP_DECOMP_CORE4,
HZIP_DECOMP_CORE5,
};
static const u64 core_offsets[] = {
[HZIP_COMP_CORE0] = 0x302000,
[HZIP_COMP_CORE1] = 0x303000,
[HZIP_DECOMP_CORE0] = 0x304000,
[HZIP_DECOMP_CORE1] = 0x305000,
[HZIP_DECOMP_CORE2] = 0x306000,
[HZIP_DECOMP_CORE3] = 0x307000,
[HZIP_DECOMP_CORE4] = 0x308000,
[HZIP_DECOMP_CORE5] = 0x309000,
};
static const struct debugfs_reg32 hzip_dfx_regs[] = {
{"HZIP_GET_BD_NUM ", 0x00},
{"HZIP_GET_RIGHT_BD ", 0x04},
@ -807,6 +786,18 @@ static int hisi_zip_regs_show(struct seq_file *s, void *unused)
DEFINE_SHOW_ATTRIBUTE(hisi_zip_regs);
static void __iomem *get_zip_core_addr(struct hisi_qm *qm, int core_num)
{
u32 zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val;
if (core_num < zip_comp_core_num)
return qm->io_base + HZIP_CORE_DFX_BASE +
(core_num + 1) * HZIP_CORE_ADDR_INTRVL;
return qm->io_base + HZIP_CORE_DFX_DECOMP_BASE +
(core_num - zip_comp_core_num) * HZIP_CORE_ADDR_INTRVL;
}
static int hisi_zip_core_debug_init(struct hisi_qm *qm)
{
u32 zip_core_num, zip_comp_core_num;
@ -832,7 +823,7 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm)
regset->regs = hzip_dfx_regs;
regset->nregs = ARRAY_SIZE(hzip_dfx_regs);
regset->base = qm->io_base + core_offsets[i];
regset->base = get_zip_core_addr(qm, i);
regset->dev = dev;
tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
@ -921,13 +912,14 @@ static int hisi_zip_debugfs_init(struct hisi_qm *qm)
/* hisi_zip_debug_regs_clear() - clear the zip debug regs */
static void hisi_zip_debug_regs_clear(struct hisi_qm *qm)
{
u32 zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val;
int i, j;
/* enable register read_clear bit */
writel(HZIP_RD_CNT_CLR_CE_EN, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE);
for (i = 0; i < ARRAY_SIZE(core_offsets); i++)
for (i = 0; i < zip_core_num; i++)
for (j = 0; j < ARRAY_SIZE(hzip_dfx_regs); j++)
readl(qm->io_base + core_offsets[i] +
readl(get_zip_core_addr(qm, i) +
hzip_dfx_regs[j].offset);
/* disable register read_clear bit */
@ -970,7 +962,7 @@ static int hisi_zip_show_last_regs_init(struct hisi_qm *qm)
}
for (i = 0; i < zip_core_num; i++) {
io_base = qm->io_base + core_offsets[i];
io_base = get_zip_core_addr(qm, i);
for (j = 0; j < core_dfx_regs_num; j++) {
idx = com_dfx_regs_num + i * core_dfx_regs_num + j;
debug->last_words[idx] = readl_relaxed(
@ -1022,7 +1014,7 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm)
else
scnprintf(buf, sizeof(buf), "Decomp_core-%d",
i - zip_comp_core_num);
base = qm->io_base + core_offsets[i];
base = get_zip_core_addr(qm, i);
pci_info(qm->pdev, "==>%s:\n", buf);
/* dump last word for dfx regs during control resetting */

View file

@ -837,4 +837,5 @@ irqreturn_t ocs_hcu_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
MODULE_DESCRIPTION("Intel Keem Bay OCS HCU Crypto Driver");
MODULE_LICENSE("GPL");

View file

@ -290,17 +290,19 @@ int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
* 3. if the key exists with the same value, then return without doing
* anything (the newly created key_val is freed).
*/
down_write(&cfg->lock);
if (!adf_cfg_key_val_get(accel_dev, section_name, key, temp_val)) {
if (strncmp(temp_val, key_val->val, sizeof(temp_val))) {
adf_cfg_keyval_remove(key, section);
} else {
kfree(key_val);
return 0;
goto out;
}
}
down_write(&cfg->lock);
adf_cfg_keyval_add(key_val, section);
out:
up_write(&cfg->lock);
return 0;
}

View file

@ -31,19 +31,22 @@ static const struct file_operations adf_ctl_ops = {
.compat_ioctl = compat_ptr_ioctl,
};
static const struct class adf_ctl_class = {
.name = DEVICE_NAME,
};
struct adf_ctl_drv_info {
unsigned int major;
struct cdev drv_cdev;
struct class *drv_class;
};
static struct adf_ctl_drv_info adf_ctl_drv;
static void adf_chr_drv_destroy(void)
{
device_destroy(adf_ctl_drv.drv_class, MKDEV(adf_ctl_drv.major, 0));
device_destroy(&adf_ctl_class, MKDEV(adf_ctl_drv.major, 0));
cdev_del(&adf_ctl_drv.drv_cdev);
class_destroy(adf_ctl_drv.drv_class);
class_unregister(&adf_ctl_class);
unregister_chrdev_region(MKDEV(adf_ctl_drv.major, 0), 1);
}
@ -51,17 +54,17 @@ static int adf_chr_drv_create(void)
{
dev_t dev_id;
struct device *drv_device;
int ret;
if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) {
pr_err("QAT: unable to allocate chrdev region\n");
return -EFAULT;
}
adf_ctl_drv.drv_class = class_create(DEVICE_NAME);
if (IS_ERR(adf_ctl_drv.drv_class)) {
pr_err("QAT: class_create failed for adf_ctl\n");
ret = class_register(&adf_ctl_class);
if (ret)
goto err_chrdev_unreg;
}
adf_ctl_drv.major = MAJOR(dev_id);
cdev_init(&adf_ctl_drv.drv_cdev, &adf_ctl_ops);
if (cdev_add(&adf_ctl_drv.drv_cdev, dev_id, 1)) {
@ -69,7 +72,7 @@ static int adf_chr_drv_create(void)
goto err_class_destr;
}
drv_device = device_create(adf_ctl_drv.drv_class, NULL,
drv_device = device_create(&adf_ctl_class, NULL,
MKDEV(adf_ctl_drv.major, 0),
NULL, DEVICE_NAME);
if (IS_ERR(drv_device)) {
@ -80,7 +83,7 @@ static int adf_chr_drv_create(void)
err_cdev_del:
cdev_del(&adf_ctl_drv.drv_cdev);
err_class_destr:
class_destroy(adf_ctl_drv.drv_class);
class_unregister(&adf_ctl_class);
err_chrdev_unreg:
unregister_chrdev_region(dev_id, 1);
return -EFAULT;

View file

@ -59,7 +59,7 @@ static int adf_get_vf_real_id(u32 fake)
}
/**
* adf_clean_vf_map() - Cleans VF id mapings
* adf_clean_vf_map() - Cleans VF id mappings
* @vf: flag indicating whether mappings is cleaned
* for vfs only or for vfs and pfs
*

View file

@ -100,7 +100,9 @@ static u32 adf_gen2_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
errmsk3 &= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled);
/* Update only section of errmsk3 related to VF2PF */
errmsk3 &= ~ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK);
errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled);
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
/* Return the sources of the (new) interrupt(s) */

View file

@ -1106,6 +1106,7 @@ int adf_rl_init(struct adf_accel_dev *accel_dev)
mutex_init(&rl->rl_lock);
rl->device_data = &accel_dev->hw_device->rl_data;
rl->accel_dev = accel_dev;
init_rwsem(&rl->user_input.lock);
accel_dev->rate_limiting = rl;
err_ret:

View file

@ -193,8 +193,12 @@ static u32 disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr)
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);
errmsk3 &= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled);
errmsk5 &= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled);
/* Update only section of errmsk3 and errmsk5 related to VF2PF */
errmsk3 &= ~ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK);
errmsk5 &= ~ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK);
errmsk3 |= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled);
errmsk5 |= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled);
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3);
ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5);

View file

@ -225,7 +225,8 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
struct skcipher_request *req, int init)
{
dma_addr_t key_phys, src_phys, dst_phys;
dma_addr_t key_phys = 0;
dma_addr_t src_phys, dst_phys;
struct dcp *sdcp = global_sdcp;
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);

View file

@ -720,10 +720,6 @@ static inline struct n2_skcipher_alg *n2_skcipher_alg(struct crypto_skcipher *tf
return container_of(alg, struct n2_skcipher_alg, skcipher);
}
struct n2_skcipher_request_context {
struct skcipher_walk walk;
};
static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen)
{

View file

@ -2496,4 +2496,5 @@ static struct platform_driver sa_ul_driver = {
},
};
module_platform_driver(sa_ul_driver);
MODULE_DESCRIPTION("K3 SA2UL crypto accelerator driver");
MODULE_LICENSE("GPL v2");

View file

@ -30,6 +30,7 @@
#define MAX_KEY_SIZE SHA512_BLOCK_SIZE
#define STARFIVE_AES_IV_LEN AES_BLOCK_SIZE
#define STARFIVE_AES_CTR_LEN AES_BLOCK_SIZE
#define STARFIVE_RSA_MAX_KEYSZ 256
union starfive_aes_csr {
u32 v;
@ -217,12 +218,11 @@ struct starfive_cryp_request_ctx {
struct scatterlist *out_sg;
struct ahash_request ahash_fbk_req;
size_t total;
size_t nents;
unsigned int blksize;
unsigned int digsize;
unsigned long in_sg_len;
unsigned char *adata;
u8 rsa_data[] __aligned(sizeof(u32));
u8 rsa_data[STARFIVE_RSA_MAX_KEYSZ] __aligned(sizeof(u32));
};
struct starfive_cryp_dev *starfive_cryp_find_dev(struct starfive_cryp_ctx *ctx);

View file

@ -31,7 +31,6 @@
/* A * A * R mod N ==> A */
#define CRYPTO_CMD_AARN 0x7
#define STARFIVE_RSA_MAX_KEYSZ 256
#define STARFIVE_RSA_RESET 0x2
static inline int starfive_pka_wait_done(struct starfive_cryp_ctx *ctx)
@ -74,7 +73,7 @@ static int starfive_rsa_montgomery_form(struct starfive_cryp_ctx *ctx,
{
struct starfive_cryp_dev *cryp = ctx->cryp;
struct starfive_cryp_request_ctx *rctx = ctx->rctx;
int count = rctx->total / sizeof(u32) - 1;
int count = (ALIGN(rctx->total, 4) / 4) - 1;
int loop;
u32 temp;
u8 opsize;
@ -251,12 +250,17 @@ static int starfive_rsa_enc_core(struct starfive_cryp_ctx *ctx, int enc)
struct starfive_cryp_dev *cryp = ctx->cryp;
struct starfive_cryp_request_ctx *rctx = ctx->rctx;
struct starfive_rsa_key *key = &ctx->rsa_key;
int ret = 0;
int ret = 0, shift = 0;
writel(STARFIVE_RSA_RESET, cryp->base + STARFIVE_PKA_CACR_OFFSET);
rctx->total = sg_copy_to_buffer(rctx->in_sg, rctx->nents,
rctx->rsa_data, rctx->total);
if (!IS_ALIGNED(rctx->total, sizeof(u32))) {
shift = sizeof(u32) - (rctx->total & 0x3);
memset(rctx->rsa_data, 0, shift);
}
rctx->total = sg_copy_to_buffer(rctx->in_sg, sg_nents(rctx->in_sg),
rctx->rsa_data + shift, rctx->total);
if (enc) {
key->bitlen = key->e_bitlen;
@ -305,7 +309,6 @@ static int starfive_rsa_enc(struct akcipher_request *req)
rctx->in_sg = req->src;
rctx->out_sg = req->dst;
rctx->total = req->src_len;
rctx->nents = sg_nents(rctx->in_sg);
ctx->rctx = rctx;
return starfive_rsa_enc_core(ctx, 1);

View file

@ -11,8 +11,11 @@
#include <crypto/internal/des.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/bottom_half.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/iopoll.h>
#include <linux/interrupt.h>
@ -40,6 +43,8 @@
/* Mode mask = bits [15..0] */
#define FLG_MODE_MASK GENMASK(15, 0)
/* Bit [31..16] status */
#define FLG_IN_OUT_DMA BIT(16)
#define FLG_HEADER_DMA BIT(17)
/* Registers */
#define CRYP_CR 0x00000000
@ -121,8 +126,12 @@
#define CR_PH_MASK 0x00030000
#define CR_NBPBL_SHIFT 20
#define SR_BUSY 0x00000010
#define SR_OFNE 0x00000004
#define SR_IFNF BIT(1)
#define SR_OFNE BIT(2)
#define SR_BUSY BIT(8)
#define DMACR_DIEN BIT(0)
#define DMACR_DOEN BIT(1)
#define IMSCR_IN BIT(0)
#define IMSCR_OUT BIT(1)
@ -133,7 +142,15 @@
/* Misc */
#define AES_BLOCK_32 (AES_BLOCK_SIZE / sizeof(u32))
#define GCM_CTR_INIT 2
#define CRYP_AUTOSUSPEND_DELAY 50
#define CRYP_AUTOSUSPEND_DELAY 50
#define CRYP_DMA_BURST_REG 4
enum stm32_dma_mode {
NO_DMA,
DMA_PLAIN_SG,
DMA_NEED_SG_TRUNC
};
struct stm32_cryp_caps {
bool aeads_support;
@ -146,6 +163,7 @@ struct stm32_cryp_caps {
u32 sr;
u32 din;
u32 dout;
u32 dmacr;
u32 imsc;
u32 mis;
u32 k1l;
@ -172,6 +190,7 @@ struct stm32_cryp {
struct list_head list;
struct device *dev;
void __iomem *regs;
phys_addr_t phys_base;
struct clk *clk;
unsigned long flags;
u32 irq_status;
@ -190,8 +209,20 @@ struct stm32_cryp {
size_t header_in;
size_t payload_out;
/* DMA process fields */
struct scatterlist *in_sg;
struct scatterlist *header_sg;
struct scatterlist *out_sg;
size_t in_sg_len;
size_t header_sg_len;
size_t out_sg_len;
struct completion dma_completion;
struct dma_chan *dma_lch_in;
struct dma_chan *dma_lch_out;
enum stm32_dma_mode dma_mode;
/* IT process fields */
struct scatter_walk in_walk;
struct scatter_walk out_walk;
@ -291,12 +322,20 @@ static inline int stm32_cryp_wait_enable(struct stm32_cryp *cryp)
!(status & CR_CRYPEN), 10, 100000);
}
static inline int stm32_cryp_wait_input(struct stm32_cryp *cryp)
{
u32 status;
return readl_relaxed_poll_timeout_atomic(cryp->regs + cryp->caps->sr, status,
status & SR_IFNF, 1, 10);
}
static inline int stm32_cryp_wait_output(struct stm32_cryp *cryp)
{
u32 status;
return readl_relaxed_poll_timeout(cryp->regs + cryp->caps->sr, status,
status & SR_OFNE, 10, 100000);
return readl_relaxed_poll_timeout_atomic(cryp->regs + cryp->caps->sr, status,
status & SR_OFNE, 1, 10);
}
static inline void stm32_cryp_key_read_enable(struct stm32_cryp *cryp)
@ -311,8 +350,13 @@ static inline void stm32_cryp_key_read_disable(struct stm32_cryp *cryp)
cryp->regs + cryp->caps->cr);
}
static void stm32_cryp_irq_read_data(struct stm32_cryp *cryp);
static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp);
static void stm32_cryp_irq_write_gcmccm_header(struct stm32_cryp *cryp);
static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp);
static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err);
static int stm32_cryp_dma_start(struct stm32_cryp *cryp);
static int stm32_cryp_it_start(struct stm32_cryp *cryp);
static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx)
{
@ -813,11 +857,238 @@ static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err)
if (is_gcm(cryp) || is_ccm(cryp))
crypto_finalize_aead_request(cryp->engine, cryp->areq, err);
else
crypto_finalize_skcipher_request(cryp->engine, cryp->req,
err);
crypto_finalize_skcipher_request(cryp->engine, cryp->req, err);
}
static int stm32_cryp_cpu_start(struct stm32_cryp *cryp)
static void stm32_cryp_header_dma_callback(void *param)
{
struct stm32_cryp *cryp = (struct stm32_cryp *)param;
int ret;
u32 reg;
dma_unmap_sg(cryp->dev, cryp->header_sg, cryp->header_sg_len, DMA_TO_DEVICE);
reg = stm32_cryp_read(cryp, cryp->caps->dmacr);
stm32_cryp_write(cryp, cryp->caps->dmacr, reg & ~(DMACR_DOEN | DMACR_DIEN));
kfree(cryp->header_sg);
reg = stm32_cryp_read(cryp, cryp->caps->cr);
if (cryp->header_in) {
stm32_cryp_write(cryp, cryp->caps->cr, reg | CR_CRYPEN);
ret = stm32_cryp_wait_input(cryp);
if (ret) {
dev_err(cryp->dev, "input header ready timeout after dma\n");
stm32_cryp_finish_req(cryp, ret);
return;
}
stm32_cryp_irq_write_gcmccm_header(cryp);
WARN_ON(cryp->header_in);
}
if (stm32_cryp_get_input_text_len(cryp)) {
/* Phase 3 : payload */
reg = stm32_cryp_read(cryp, cryp->caps->cr);
stm32_cryp_write(cryp, cryp->caps->cr, reg & ~CR_CRYPEN);
reg &= ~CR_PH_MASK;
reg |= CR_PH_PAYLOAD | CR_CRYPEN;
stm32_cryp_write(cryp, cryp->caps->cr, reg);
if (cryp->flags & FLG_IN_OUT_DMA) {
ret = stm32_cryp_dma_start(cryp);
if (ret)
stm32_cryp_finish_req(cryp, ret);
} else {
stm32_cryp_it_start(cryp);
}
} else {
/*
* Phase 4 : tag.
* Nothing to read, nothing to write => end request
*/
stm32_cryp_finish_req(cryp, 0);
}
}
static void stm32_cryp_dma_callback(void *param)
{
struct stm32_cryp *cryp = (struct stm32_cryp *)param;
int ret;
u32 reg;
complete(&cryp->dma_completion); /* completion to indicate no timeout */
dma_sync_sg_for_device(cryp->dev, cryp->out_sg, cryp->out_sg_len, DMA_FROM_DEVICE);
if (cryp->in_sg != cryp->out_sg)
dma_unmap_sg(cryp->dev, cryp->in_sg, cryp->in_sg_len, DMA_TO_DEVICE);
dma_unmap_sg(cryp->dev, cryp->out_sg, cryp->out_sg_len, DMA_FROM_DEVICE);
reg = stm32_cryp_read(cryp, cryp->caps->dmacr);
stm32_cryp_write(cryp, cryp->caps->dmacr, reg & ~(DMACR_DOEN | DMACR_DIEN));
reg = stm32_cryp_read(cryp, cryp->caps->cr);
if (is_gcm(cryp) || is_ccm(cryp)) {
kfree(cryp->in_sg);
kfree(cryp->out_sg);
} else {
if (cryp->in_sg != cryp->req->src)
kfree(cryp->in_sg);
if (cryp->out_sg != cryp->req->dst)
kfree(cryp->out_sg);
}
if (cryp->payload_in) {
stm32_cryp_write(cryp, cryp->caps->cr, reg | CR_CRYPEN);
ret = stm32_cryp_wait_input(cryp);
if (ret) {
dev_err(cryp->dev, "input ready timeout after dma\n");
stm32_cryp_finish_req(cryp, ret);
return;
}
stm32_cryp_irq_write_data(cryp);
ret = stm32_cryp_wait_output(cryp);
if (ret) {
dev_err(cryp->dev, "output ready timeout after dma\n");
stm32_cryp_finish_req(cryp, ret);
return;
}
stm32_cryp_irq_read_data(cryp);
}
stm32_cryp_finish_req(cryp, 0);
}
static int stm32_cryp_header_dma_start(struct stm32_cryp *cryp)
{
int ret;
struct dma_async_tx_descriptor *tx_in;
u32 reg;
size_t align_size;
ret = dma_map_sg(cryp->dev, cryp->header_sg, cryp->header_sg_len, DMA_TO_DEVICE);
if (!ret) {
dev_err(cryp->dev, "dma_map_sg() error\n");
return -ENOMEM;
}
dma_sync_sg_for_device(cryp->dev, cryp->header_sg, cryp->header_sg_len, DMA_TO_DEVICE);
tx_in = dmaengine_prep_slave_sg(cryp->dma_lch_in, cryp->header_sg, cryp->header_sg_len,
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!tx_in) {
dev_err(cryp->dev, "IN prep_slave_sg() failed\n");
return -EINVAL;
}
tx_in->callback_param = cryp;
tx_in->callback = stm32_cryp_header_dma_callback;
/* Advance scatterwalk to not DMA'ed data */
align_size = ALIGN_DOWN(cryp->header_in, cryp->hw_blocksize);
scatterwalk_copychunks(NULL, &cryp->in_walk, align_size, 2);
cryp->header_in -= align_size;
ret = dma_submit_error(dmaengine_submit(tx_in));
if (ret < 0) {
dev_err(cryp->dev, "DMA in submit failed\n");
return ret;
}
dma_async_issue_pending(cryp->dma_lch_in);
reg = stm32_cryp_read(cryp, cryp->caps->dmacr);
stm32_cryp_write(cryp, cryp->caps->dmacr, reg | DMACR_DIEN);
return 0;
}
static int stm32_cryp_dma_start(struct stm32_cryp *cryp)
{
int ret;
size_t align_size;
struct dma_async_tx_descriptor *tx_in, *tx_out;
u32 reg;
if (cryp->in_sg != cryp->out_sg) {
ret = dma_map_sg(cryp->dev, cryp->in_sg, cryp->in_sg_len, DMA_TO_DEVICE);
if (!ret) {
dev_err(cryp->dev, "dma_map_sg() error\n");
return -ENOMEM;
}
}
ret = dma_map_sg(cryp->dev, cryp->out_sg, cryp->out_sg_len, DMA_FROM_DEVICE);
if (!ret) {
dev_err(cryp->dev, "dma_map_sg() error\n");
return -ENOMEM;
}
dma_sync_sg_for_device(cryp->dev, cryp->in_sg, cryp->in_sg_len, DMA_TO_DEVICE);
tx_in = dmaengine_prep_slave_sg(cryp->dma_lch_in, cryp->in_sg, cryp->in_sg_len,
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!tx_in) {
dev_err(cryp->dev, "IN prep_slave_sg() failed\n");
return -EINVAL;
}
/* No callback necessary */
tx_in->callback_param = cryp;
tx_in->callback = NULL;
tx_out = dmaengine_prep_slave_sg(cryp->dma_lch_out, cryp->out_sg, cryp->out_sg_len,
DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!tx_out) {
dev_err(cryp->dev, "OUT prep_slave_sg() failed\n");
return -EINVAL;
}
reinit_completion(&cryp->dma_completion);
tx_out->callback = stm32_cryp_dma_callback;
tx_out->callback_param = cryp;
/* Advance scatterwalk to not DMA'ed data */
align_size = ALIGN_DOWN(cryp->payload_in, cryp->hw_blocksize);
scatterwalk_copychunks(NULL, &cryp->in_walk, align_size, 2);
cryp->payload_in -= align_size;
ret = dma_submit_error(dmaengine_submit(tx_in));
if (ret < 0) {
dev_err(cryp->dev, "DMA in submit failed\n");
return ret;
}
dma_async_issue_pending(cryp->dma_lch_in);
/* Advance scatterwalk to not DMA'ed data */
scatterwalk_copychunks(NULL, &cryp->out_walk, align_size, 2);
cryp->payload_out -= align_size;
ret = dma_submit_error(dmaengine_submit(tx_out));
if (ret < 0) {
dev_err(cryp->dev, "DMA out submit failed\n");
return ret;
}
dma_async_issue_pending(cryp->dma_lch_out);
reg = stm32_cryp_read(cryp, cryp->caps->dmacr);
stm32_cryp_write(cryp, cryp->caps->dmacr, reg | DMACR_DOEN | DMACR_DIEN);
if (!wait_for_completion_timeout(&cryp->dma_completion, msecs_to_jiffies(1000))) {
dev_err(cryp->dev, "DMA out timed out\n");
dmaengine_terminate_sync(cryp->dma_lch_out);
return -ETIMEDOUT;
}
return 0;
}
static int stm32_cryp_it_start(struct stm32_cryp *cryp)
{
/* Enable interrupt and let the IRQ handler do everything */
stm32_cryp_write(cryp, cryp->caps->imsc, IMSCR_IN | IMSCR_OUT);
@ -1149,13 +1420,256 @@ static int stm32_cryp_tdes_cbc_decrypt(struct skcipher_request *req)
return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC);
}
static enum stm32_dma_mode stm32_cryp_dma_check_sg(struct scatterlist *test_sg, size_t len,
size_t block_size)
{
struct scatterlist *sg;
int i;
if (len <= 16)
return NO_DMA; /* Faster */
for_each_sg(test_sg, sg, sg_nents(test_sg), i) {
if (!IS_ALIGNED(sg->length, block_size) && !sg_is_last(sg))
return NO_DMA;
if (sg->offset % sizeof(u32))
return NO_DMA;
if (sg_is_last(sg) && !IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
return DMA_NEED_SG_TRUNC;
}
return DMA_PLAIN_SG;
}
static enum stm32_dma_mode stm32_cryp_dma_check(struct stm32_cryp *cryp, struct scatterlist *in_sg,
struct scatterlist *out_sg)
{
enum stm32_dma_mode ret = DMA_PLAIN_SG;
if (!is_aes(cryp))
return NO_DMA;
if (!cryp->dma_lch_in || !cryp->dma_lch_out)
return NO_DMA;
ret = stm32_cryp_dma_check_sg(in_sg, cryp->payload_in, AES_BLOCK_SIZE);
if (ret == NO_DMA)
return ret;
ret = stm32_cryp_dma_check_sg(out_sg, cryp->payload_out, AES_BLOCK_SIZE);
if (ret == NO_DMA)
return ret;
/* Check CTR counter overflow */
if (is_aes(cryp) && is_ctr(cryp)) {
u32 c;
__be32 iv3;
memcpy(&iv3, &cryp->req->iv[3 * sizeof(u32)], sizeof(iv3));
c = be32_to_cpu(iv3);
if ((c + cryp->payload_in) < cryp->payload_in)
return NO_DMA;
}
/* Workaround */
if (is_aes(cryp) && is_ctr(cryp) && ret == DMA_NEED_SG_TRUNC)
return NO_DMA;
return ret;
}
static int stm32_cryp_truncate_sg(struct scatterlist **new_sg, size_t *new_sg_len,
struct scatterlist *sg, off_t skip, size_t size)
{
struct scatterlist *cur;
int alloc_sg_len;
*new_sg_len = 0;
if (!sg || !size) {
*new_sg = NULL;
return 0;
}
alloc_sg_len = sg_nents_for_len(sg, skip + size);
if (alloc_sg_len < 0)
return alloc_sg_len;
/* We allocate to much sg entry, but it is easier */
*new_sg = kmalloc_array((size_t)alloc_sg_len, sizeof(struct scatterlist), GFP_KERNEL);
if (!*new_sg)
return -ENOMEM;
sg_init_table(*new_sg, (unsigned int)alloc_sg_len);
cur = *new_sg;
while (sg && size) {
unsigned int len = sg->length;
unsigned int offset = sg->offset;
if (skip > len) {
skip -= len;
sg = sg_next(sg);
continue;
}
if (skip) {
len -= skip;
offset += skip;
skip = 0;
}
if (size < len)
len = size;
if (len > 0) {
(*new_sg_len)++;
size -= len;
sg_set_page(cur, sg_page(sg), len, offset);
if (size == 0)
sg_mark_end(cur);
cur = sg_next(cur);
}
sg = sg_next(sg);
}
return 0;
}
static int stm32_cryp_cipher_prepare(struct stm32_cryp *cryp, struct scatterlist *in_sg,
struct scatterlist *out_sg)
{
size_t align_size;
int ret;
cryp->dma_mode = stm32_cryp_dma_check(cryp, in_sg, out_sg);
scatterwalk_start(&cryp->in_walk, in_sg);
scatterwalk_start(&cryp->out_walk, out_sg);
if (cryp->dma_mode == NO_DMA) {
cryp->flags &= ~FLG_IN_OUT_DMA;
if (is_ctr(cryp))
memset(cryp->last_ctr, 0, sizeof(cryp->last_ctr));
} else if (cryp->dma_mode == DMA_NEED_SG_TRUNC) {
cryp->flags |= FLG_IN_OUT_DMA;
align_size = ALIGN_DOWN(cryp->payload_in, cryp->hw_blocksize);
ret = stm32_cryp_truncate_sg(&cryp->in_sg, &cryp->in_sg_len, in_sg, 0, align_size);
if (ret)
return ret;
ret = stm32_cryp_truncate_sg(&cryp->out_sg, &cryp->out_sg_len, out_sg, 0,
align_size);
if (ret) {
kfree(cryp->in_sg);
return ret;
}
} else {
cryp->flags |= FLG_IN_OUT_DMA;
cryp->in_sg = in_sg;
cryp->out_sg = out_sg;
ret = sg_nents_for_len(cryp->in_sg, cryp->payload_in);
if (ret < 0)
return ret;
cryp->in_sg_len = (size_t)ret;
ret = sg_nents_for_len(out_sg, cryp->payload_out);
if (ret < 0)
return ret;
cryp->out_sg_len = (size_t)ret;
}
return 0;
}
static int stm32_cryp_aead_prepare(struct stm32_cryp *cryp, struct scatterlist *in_sg,
struct scatterlist *out_sg)
{
size_t align_size;
off_t skip;
int ret, ret2;
cryp->header_sg = NULL;
cryp->in_sg = NULL;
cryp->out_sg = NULL;
if (!cryp->dma_lch_in || !cryp->dma_lch_out) {
cryp->dma_mode = NO_DMA;
cryp->flags &= ~(FLG_IN_OUT_DMA | FLG_HEADER_DMA);
return 0;
}
/* CCM hw_init may have advanced in header */
skip = cryp->areq->assoclen - cryp->header_in;
align_size = ALIGN_DOWN(cryp->header_in, cryp->hw_blocksize);
ret = stm32_cryp_truncate_sg(&cryp->header_sg, &cryp->header_sg_len, in_sg, skip,
align_size);
if (ret)
return ret;
ret = stm32_cryp_dma_check_sg(cryp->header_sg, align_size, AES_BLOCK_SIZE);
if (ret == NO_DMA) {
/* We cannot DMA the header */
kfree(cryp->header_sg);
cryp->header_sg = NULL;
cryp->flags &= ~FLG_HEADER_DMA;
} else {
cryp->flags |= FLG_HEADER_DMA;
}
/* Now skip all header to be at payload start */
skip = cryp->areq->assoclen;
align_size = ALIGN_DOWN(cryp->payload_in, cryp->hw_blocksize);
ret = stm32_cryp_truncate_sg(&cryp->in_sg, &cryp->in_sg_len, in_sg, skip, align_size);
if (ret) {
kfree(cryp->header_sg);
return ret;
}
/* For out buffer align_size is same as in buffer */
ret = stm32_cryp_truncate_sg(&cryp->out_sg, &cryp->out_sg_len, out_sg, skip, align_size);
if (ret) {
kfree(cryp->header_sg);
kfree(cryp->in_sg);
return ret;
}
ret = stm32_cryp_dma_check_sg(cryp->in_sg, align_size, AES_BLOCK_SIZE);
ret2 = stm32_cryp_dma_check_sg(cryp->out_sg, align_size, AES_BLOCK_SIZE);
if (ret == NO_DMA || ret2 == NO_DMA) {
kfree(cryp->in_sg);
cryp->in_sg = NULL;
kfree(cryp->out_sg);
cryp->out_sg = NULL;
cryp->flags &= ~FLG_IN_OUT_DMA;
} else {
cryp->flags |= FLG_IN_OUT_DMA;
}
return 0;
}
static int stm32_cryp_prepare_req(struct skcipher_request *req,
struct aead_request *areq)
{
struct stm32_cryp_ctx *ctx;
struct stm32_cryp *cryp;
struct stm32_cryp_reqctx *rctx;
struct scatterlist *in_sg;
struct scatterlist *in_sg, *out_sg;
int ret;
if (!req && !areq)
@ -1169,8 +1683,6 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req,
rctx = req ? skcipher_request_ctx(req) : aead_request_ctx(areq);
rctx->mode &= FLG_MODE_MASK;
ctx->cryp = cryp;
cryp->flags = (cryp->flags & ~FLG_MODE_MASK) | rctx->mode;
cryp->hw_blocksize = is_aes(cryp) ? AES_BLOCK_SIZE : DES_BLOCK_SIZE;
cryp->ctx = ctx;
@ -1182,6 +1694,15 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req,
cryp->payload_in = req->cryptlen;
cryp->payload_out = req->cryptlen;
cryp->authsize = 0;
in_sg = req->src;
out_sg = req->dst;
ret = stm32_cryp_cipher_prepare(cryp, in_sg, out_sg);
if (ret)
return ret;
ret = stm32_cryp_hw_init(cryp);
} else {
/*
* Length of input and output data:
@ -1211,23 +1732,22 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req,
cryp->header_in = areq->assoclen;
cryp->payload_out = cryp->payload_in;
}
}
in_sg = req ? req->src : areq->src;
scatterwalk_start(&cryp->in_walk, in_sg);
in_sg = areq->src;
out_sg = areq->dst;
cryp->out_sg = req ? req->dst : areq->dst;
scatterwalk_start(&cryp->out_walk, cryp->out_sg);
if (is_gcm(cryp) || is_ccm(cryp)) {
scatterwalk_start(&cryp->in_walk, in_sg);
scatterwalk_start(&cryp->out_walk, out_sg);
/* In output, jump after assoc data */
scatterwalk_copychunks(NULL, &cryp->out_walk, cryp->areq->assoclen, 2);
ret = stm32_cryp_hw_init(cryp);
if (ret)
return ret;
ret = stm32_cryp_aead_prepare(cryp, in_sg, out_sg);
}
if (is_ctr(cryp))
memset(cryp->last_ctr, 0, sizeof(cryp->last_ctr));
ret = stm32_cryp_hw_init(cryp);
return ret;
}
@ -1239,12 +1759,24 @@ static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq)
struct stm32_cryp_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
struct stm32_cryp *cryp = ctx->cryp;
int ret;
if (!cryp)
return -ENODEV;
return stm32_cryp_prepare_req(req, NULL) ?:
stm32_cryp_cpu_start(cryp);
ret = stm32_cryp_prepare_req(req, NULL);
if (ret)
return ret;
if (cryp->flags & FLG_IN_OUT_DMA)
ret = stm32_cryp_dma_start(cryp);
else
ret = stm32_cryp_it_start(cryp);
if (ret == -ETIMEDOUT)
stm32_cryp_finish_req(cryp, ret);
return ret;
}
static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq)
@ -1262,13 +1794,20 @@ static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq)
if (err)
return err;
if (unlikely(!cryp->payload_in && !cryp->header_in)) {
if (!stm32_cryp_get_input_text_len(cryp) && !cryp->header_in &&
!(cryp->flags & FLG_HEADER_DMA)) {
/* No input data to process: get tag and finish */
stm32_cryp_finish_req(cryp, 0);
return 0;
}
return stm32_cryp_cpu_start(cryp);
if (cryp->flags & FLG_HEADER_DMA)
return stm32_cryp_header_dma_start(cryp);
if (!cryp->header_in && cryp->flags & FLG_IN_OUT_DMA)
return stm32_cryp_dma_start(cryp);
return stm32_cryp_it_start(cryp);
}
static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
@ -1665,8 +2204,11 @@ static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg)
it_mask &= ~IMSCR_OUT;
stm32_cryp_write(cryp, cryp->caps->imsc, it_mask);
if (!cryp->payload_in && !cryp->header_in && !cryp->payload_out)
if (!cryp->payload_in && !cryp->header_in && !cryp->payload_out) {
local_bh_disable();
stm32_cryp_finish_req(cryp, 0);
local_bh_enable();
}
return IRQ_HANDLED;
}
@ -1680,13 +2222,72 @@ static irqreturn_t stm32_cryp_irq(int irq, void *arg)
return IRQ_WAKE_THREAD;
}
static int stm32_cryp_dma_init(struct stm32_cryp *cryp)
{
struct dma_slave_config dma_conf;
struct dma_chan *chan;
int ret;
memset(&dma_conf, 0, sizeof(dma_conf));
dma_conf.direction = DMA_MEM_TO_DEV;
dma_conf.dst_addr = cryp->phys_base + cryp->caps->din;
dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dma_conf.dst_maxburst = CRYP_DMA_BURST_REG;
dma_conf.device_fc = false;
chan = dma_request_chan(cryp->dev, "in");
if (IS_ERR(chan))
return PTR_ERR(chan);
cryp->dma_lch_in = chan;
ret = dmaengine_slave_config(cryp->dma_lch_in, &dma_conf);
if (ret) {
dma_release_channel(cryp->dma_lch_in);
cryp->dma_lch_in = NULL;
dev_err(cryp->dev, "Couldn't configure DMA in slave.\n");
return ret;
}
memset(&dma_conf, 0, sizeof(dma_conf));
dma_conf.direction = DMA_DEV_TO_MEM;
dma_conf.src_addr = cryp->phys_base + cryp->caps->dout;
dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dma_conf.src_maxburst = CRYP_DMA_BURST_REG;
dma_conf.device_fc = false;
chan = dma_request_chan(cryp->dev, "out");
if (IS_ERR(chan)) {
dma_release_channel(cryp->dma_lch_in);
cryp->dma_lch_in = NULL;
return PTR_ERR(chan);
}
cryp->dma_lch_out = chan;
ret = dmaengine_slave_config(cryp->dma_lch_out, &dma_conf);
if (ret) {
dma_release_channel(cryp->dma_lch_out);
cryp->dma_lch_out = NULL;
dev_err(cryp->dev, "Couldn't configure DMA out slave.\n");
dma_release_channel(cryp->dma_lch_in);
cryp->dma_lch_in = NULL;
return ret;
}
init_completion(&cryp->dma_completion);
return 0;
}
static struct skcipher_engine_alg crypto_algs[] = {
{
.base = {
.base.cra_name = "ecb(aes)",
.base.cra_driver_name = "stm32-ecb-aes",
.base.cra_priority = 200,
.base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_priority = 300,
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
.base.cra_alignmask = 0,
@ -1707,8 +2308,8 @@ static struct skcipher_engine_alg crypto_algs[] = {
.base = {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "stm32-cbc-aes",
.base.cra_priority = 200,
.base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_priority = 300,
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
.base.cra_alignmask = 0,
@ -1730,8 +2331,8 @@ static struct skcipher_engine_alg crypto_algs[] = {
.base = {
.base.cra_name = "ctr(aes)",
.base.cra_driver_name = "stm32-ctr-aes",
.base.cra_priority = 200,
.base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_priority = 300,
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
.base.cra_alignmask = 0,
@ -1753,8 +2354,8 @@ static struct skcipher_engine_alg crypto_algs[] = {
.base = {
.base.cra_name = "ecb(des)",
.base.cra_driver_name = "stm32-ecb-des",
.base.cra_priority = 200,
.base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_priority = 300,
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
.base.cra_alignmask = 0,
@ -1775,8 +2376,8 @@ static struct skcipher_engine_alg crypto_algs[] = {
.base = {
.base.cra_name = "cbc(des)",
.base.cra_driver_name = "stm32-cbc-des",
.base.cra_priority = 200,
.base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_priority = 300,
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
.base.cra_alignmask = 0,
@ -1798,8 +2399,8 @@ static struct skcipher_engine_alg crypto_algs[] = {
.base = {
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "stm32-ecb-des3",
.base.cra_priority = 200,
.base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_priority = 300,
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
.base.cra_alignmask = 0,
@ -1820,8 +2421,8 @@ static struct skcipher_engine_alg crypto_algs[] = {
.base = {
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "stm32-cbc-des3",
.base.cra_priority = 200,
.base.cra_flags = CRYPTO_ALG_ASYNC,
.base.cra_priority = 300,
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY,
.base.cra_blocksize = DES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
.base.cra_alignmask = 0,
@ -1854,8 +2455,8 @@ static struct aead_engine_alg aead_algs[] = {
.base.base = {
.cra_name = "gcm(aes)",
.cra_driver_name = "stm32-gcm-aes",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
.cra_alignmask = 0,
@ -1877,8 +2478,8 @@ static struct aead_engine_alg aead_algs[] = {
.base.base = {
.cra_name = "ccm(aes)",
.cra_driver_name = "stm32-ccm-aes",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct stm32_cryp_ctx),
.cra_alignmask = 0,
@ -1901,6 +2502,7 @@ static const struct stm32_cryp_caps ux500_data = {
.sr = UX500_CRYP_SR,
.din = UX500_CRYP_DIN,
.dout = UX500_CRYP_DOUT,
.dmacr = UX500_CRYP_DMACR,
.imsc = UX500_CRYP_IMSC,
.mis = UX500_CRYP_MIS,
.k1l = UX500_CRYP_K1L,
@ -1923,6 +2525,7 @@ static const struct stm32_cryp_caps f7_data = {
.sr = CRYP_SR,
.din = CRYP_DIN,
.dout = CRYP_DOUT,
.dmacr = CRYP_DMACR,
.imsc = CRYP_IMSCR,
.mis = CRYP_MISR,
.k1l = CRYP_K1LR,
@ -1945,6 +2548,7 @@ static const struct stm32_cryp_caps mp1_data = {
.sr = CRYP_SR,
.din = CRYP_DIN,
.dout = CRYP_DOUT,
.dmacr = CRYP_DMACR,
.imsc = CRYP_IMSCR,
.mis = CRYP_MISR,
.k1l = CRYP_K1LR,
@ -1985,6 +2589,8 @@ static int stm32_cryp_probe(struct platform_device *pdev)
if (IS_ERR(cryp->regs))
return PTR_ERR(cryp->regs);
cryp->phys_base = platform_get_resource(pdev, IORESOURCE_MEM, 0)->start;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@ -2030,6 +2636,17 @@ static int stm32_cryp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, cryp);
ret = stm32_cryp_dma_init(cryp);
switch (ret) {
case 0:
break;
case -ENODEV:
dev_dbg(dev, "DMA mode not available\n");
break;
default:
goto err_dma;
}
spin_lock(&cryp_list.lock);
list_add(&cryp->list, &cryp_list.dev_list);
spin_unlock(&cryp_list.lock);
@ -2075,6 +2692,12 @@ static int stm32_cryp_probe(struct platform_device *pdev)
spin_lock(&cryp_list.lock);
list_del(&cryp->list);
spin_unlock(&cryp_list.lock);
if (cryp->dma_lch_in)
dma_release_channel(cryp->dma_lch_in);
if (cryp->dma_lch_out)
dma_release_channel(cryp->dma_lch_out);
err_dma:
err_rst:
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
@ -2101,6 +2724,12 @@ static void stm32_cryp_remove(struct platform_device *pdev)
list_del(&cryp->list);
spin_unlock(&cryp_list.lock);
if (cryp->dma_lch_in)
dma_release_channel(cryp->dma_lch_in);
if (cryp->dma_lch_out)
dma_release_channel(cryp->dma_lch_out);
pm_runtime_disable(cryp->dev);
pm_runtime_put_noidle(cryp->dev);

View file

@ -326,7 +326,6 @@ static void tegra_se_remove(struct platform_device *pdev)
crypto_engine_stop(se->engine);
crypto_engine_exit(se->engine);
iommu_fwspec_free(se->dev);
host1x_client_unregister(&se->client);
}

View file

@ -446,4 +446,5 @@ static struct platform_driver zynqmp_aes_driver = {
};
module_platform_driver(zynqmp_aes_driver);
MODULE_DESCRIPTION("Xilinx ZynqMP AES Driver");
MODULE_LICENSE("GPL");

View file

@ -63,6 +63,9 @@ static inline void ecc_swap_digits(const void *in, u64 *out, unsigned int ndigit
* @nbytes Size of input byte array
* @out Output digits array
* @ndigits: Number of digits to create from byte array
*
* The first byte in the input byte array is expected to hold the most
* significant bits of the large integer.
*/
void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes,
u64 *out, unsigned int ndigits);

Some files were not shown because too many files have changed in this diff Show more