Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto updates from Herbert Xu:
 "API:

   - crypto_destroy_tfm now ignores errors as well as NULL pointers

  Algorithms:

   - Add explicit curve IDs in ECDH algorithm names

   - Add NIST P384 curve parameters

   - Add ECDSA

  Drivers:

   - Add support for Green Sardine in ccp

   - Add ecdh/curve25519 to hisilicon/hpre

   - Add support for AM64 in sa2ul"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (184 commits)
  fsverity: relax build time dependency on CRYPTO_SHA256
  fscrypt: relax Kconfig dependencies for crypto API algorithms
  crypto: camellia - drop duplicate "depends on CRYPTO"
  crypto: s5p-sss - consistently use local 'dev' variable in probe()
  crypto: s5p-sss - remove unneeded local variable initialization
  crypto: s5p-sss - simplify getting of_device_id match data
  ccp: ccp - add support for Green Sardine
  crypto: ccp - Make ccp_dev_suspend and ccp_dev_resume void functions
  crypto: octeontx2 - add support for OcteonTX2 98xx CPT block.
  crypto: chelsio/chcr - Remove useless MODULE_VERSION
  crypto: ux500/cryp - Remove duplicate argument
  crypto: chelsio - remove unused function
  crypto: sa2ul - Add support for AM64
  crypto: sa2ul - Support for per channel coherency
  dt-bindings: crypto: ti,sa2ul: Add new compatible for AM64
  crypto: hisilicon - enable new error types for QM
  crypto: hisilicon - add new error type for SEC
  crypto: hisilicon - support new error types for ZIP
  crypto: hisilicon - dynamic configuration 'err_info'
  crypto: doc - fix kernel-doc notation in chacha.c and af_alg.c
  ...
This commit is contained in:
Linus Torvalds 2021-04-26 08:51:23 -07:00
commit a4a78bc8ea
209 changed files with 4598 additions and 2026 deletions

View File

@ -14,6 +14,7 @@ properties:
enum: enum:
- ti,j721e-sa2ul - ti,j721e-sa2ul
- ti,am654-sa2ul - ti,am654-sa2ul
- ti,am64-sa2ul
reg: reg:
maxItems: 1 maxItems: 1
@ -45,6 +46,18 @@ properties:
description: description:
Address translation for the possible RNG child node for SA2UL Address translation for the possible RNG child node for SA2UL
clocks:
items:
- description: Clock used by PKA
- description: Main Input Clock
- description: Clock used by rng
clock-names:
items:
- const: pka_in_clk
- const: x1_clk
- const: x2_clk
patternProperties: patternProperties:
"^rng@[a-f0-9]+$": "^rng@[a-f0-9]+$":
type: object type: object
@ -57,7 +70,16 @@ required:
- power-domains - power-domains
- dmas - dmas
- dma-names - dma-names
- dma-coherent
if:
properties:
compatible:
enum:
- ti,j721e-sa2ul
- ti,am654-sa2ul
then:
required:
- dma-coherent
additionalProperties: false additionalProperties: false

View File

@ -28,6 +28,12 @@ properties:
clock-names: clock-names:
const: ipsec const: ipsec
resets:
maxItems: 1
reset-names:
const: ipsec
interrupts: interrupts:
maxItems: 1 maxItems: 1
@ -35,6 +41,18 @@ required:
- compatible - compatible
- reg - reg
if:
properties:
compatible:
enum:
- brcm,bcm6368-rng
then:
required:
- clocks
- clock-names
- resets
- reset-names
additionalProperties: false additionalProperties: false
examples: examples:
@ -58,4 +76,7 @@ examples:
clocks = <&periph_clk 18>; clocks = <&periph_clk 18>;
clock-names = "ipsec"; clock-names = "ipsec";
resets = <&periph_rst 4>;
reset-names = "ipsec";
}; };

View File

@ -99,28 +99,6 @@
__hround \out2, \out3, \in2, \in1, \in0, \in3, \in1, \in0, 0, \sz, \op, \oldcpsr __hround \out2, \out3, \in2, \in1, \in0, \in3, \in1, \in0, 0, \sz, \op, \oldcpsr
.endm .endm
.macro __rev, out, in
.if __LINUX_ARM_ARCH__ < 6
lsl t0, \in, #24
and t1, \in, #0xff00
and t2, \in, #0xff0000
orr \out, t0, \in, lsr #24
orr \out, \out, t1, lsl #8
orr \out, \out, t2, lsr #8
.else
rev \out, \in
.endif
.endm
.macro __adrl, out, sym, c
.if __LINUX_ARM_ARCH__ < 7
ldr\c \out, =\sym
.else
movw\c \out, #:lower16:\sym
movt\c \out, #:upper16:\sym
.endif
.endm
.macro do_crypt, round, ttab, ltab, bsz .macro do_crypt, round, ttab, ltab, bsz
push {r3-r11, lr} push {r3-r11, lr}
@ -133,10 +111,10 @@
ldr r7, [in, #12] ldr r7, [in, #12]
#ifdef CONFIG_CPU_BIG_ENDIAN #ifdef CONFIG_CPU_BIG_ENDIAN
__rev r4, r4 rev_l r4, t0
__rev r5, r5 rev_l r5, t0
__rev r6, r6 rev_l r6, t0
__rev r7, r7 rev_l r7, t0
#endif #endif
eor r4, r4, r8 eor r4, r4, r8
@ -144,7 +122,7 @@
eor r6, r6, r10 eor r6, r6, r10
eor r7, r7, r11 eor r7, r7, r11
__adrl ttab, \ttab mov_l ttab, \ttab
/* /*
* Disable interrupts and prefetch the 1024-byte 'ft' or 'it' table into * Disable interrupts and prefetch the 1024-byte 'ft' or 'it' table into
* L1 cache, assuming cacheline size >= 32. This is a hardening measure * L1 cache, assuming cacheline size >= 32. This is a hardening measure
@ -180,7 +158,7 @@
2: .ifb \ltab 2: .ifb \ltab
add ttab, ttab, #1 add ttab, ttab, #1
.else .else
__adrl ttab, \ltab mov_l ttab, \ltab
// Prefetch inverse S-box for final round; see explanation above // Prefetch inverse S-box for final round; see explanation above
.set i, 0 .set i, 0
.rept 256 / 64 .rept 256 / 64
@ -194,10 +172,10 @@
\round r4, r5, r6, r7, r8, r9, r10, r11, \bsz, b, rounds \round r4, r5, r6, r7, r8, r9, r10, r11, \bsz, b, rounds
#ifdef CONFIG_CPU_BIG_ENDIAN #ifdef CONFIG_CPU_BIG_ENDIAN
__rev r4, r4 rev_l r4, t0
__rev r5, r5 rev_l r5, t0
__rev r6, r6 rev_l r6, t0
__rev r7, r7 rev_l r7, t0
#endif #endif
ldr out, [sp] ldr out, [sp]

View File

@ -85,8 +85,8 @@ static int __init blake2b_neon_mod_init(void)
static void __exit blake2b_neon_mod_exit(void) static void __exit blake2b_neon_mod_exit(void)
{ {
return crypto_unregister_shashes(blake2b_neon_algs, crypto_unregister_shashes(blake2b_neon_algs,
ARRAY_SIZE(blake2b_neon_algs)); ARRAY_SIZE(blake2b_neon_algs));
} }
module_init(blake2b_neon_mod_init); module_init(blake2b_neon_mod_init);

View File

@ -8,6 +8,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/assembler.h>
// Registers used to hold message words temporarily. There aren't // Registers used to hold message words temporarily. There aren't
// enough ARM registers to hold the whole message block, so we have to // enough ARM registers to hold the whole message block, so we have to
@ -38,6 +39,23 @@
#endif #endif
.endm .endm
.macro _le32_bswap a, tmp
#ifdef __ARMEB__
rev_l \a, \tmp
#endif
.endm
.macro _le32_bswap_8x a, b, c, d, e, f, g, h, tmp
_le32_bswap \a, \tmp
_le32_bswap \b, \tmp
_le32_bswap \c, \tmp
_le32_bswap \d, \tmp
_le32_bswap \e, \tmp
_le32_bswap \f, \tmp
_le32_bswap \g, \tmp
_le32_bswap \h, \tmp
.endm
// Execute a quarter-round of BLAKE2s by mixing two columns or two diagonals. // Execute a quarter-round of BLAKE2s by mixing two columns or two diagonals.
// (a0, b0, c0, d0) and (a1, b1, c1, d1) give the registers containing the two // (a0, b0, c0, d0) and (a1, b1, c1, d1) give the registers containing the two
// columns/diagonals. s0-s1 are the word offsets to the message words the first // columns/diagonals. s0-s1 are the word offsets to the message words the first
@ -180,8 +198,10 @@ ENTRY(blake2s_compress_arch)
tst r1, #3 tst r1, #3
bne .Lcopy_block_misaligned bne .Lcopy_block_misaligned
ldmia r1!, {r2-r9} ldmia r1!, {r2-r9}
_le32_bswap_8x r2, r3, r4, r5, r6, r7, r8, r9, r14
stmia r12!, {r2-r9} stmia r12!, {r2-r9}
ldmia r1!, {r2-r9} ldmia r1!, {r2-r9}
_le32_bswap_8x r2, r3, r4, r5, r6, r7, r8, r9, r14
stmia r12, {r2-r9} stmia r12, {r2-r9}
.Lcopy_block_done: .Lcopy_block_done:
str r1, [sp, #68] // Update message pointer str r1, [sp, #68] // Update message pointer
@ -268,6 +288,7 @@ ENTRY(blake2s_compress_arch)
1: 1:
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
ldr r3, [r1], #4 ldr r3, [r1], #4
_le32_bswap r3, r4
#else #else
ldrb r3, [r1, #0] ldrb r3, [r1, #0]
ldrb r4, [r1, #1] ldrb r4, [r1, #1]

View File

@ -41,32 +41,15 @@
X14 .req r12 X14 .req r12
X15 .req r14 X15 .req r14
.macro __rev out, in, t0, t1, t2 .macro _le32_bswap_4x a, b, c, d, tmp
.if __LINUX_ARM_ARCH__ >= 6
rev \out, \in
.else
lsl \t0, \in, #24
and \t1, \in, #0xff00
and \t2, \in, #0xff0000
orr \out, \t0, \in, lsr #24
orr \out, \out, \t1, lsl #8
orr \out, \out, \t2, lsr #8
.endif
.endm
.macro _le32_bswap x, t0, t1, t2
#ifdef __ARMEB__ #ifdef __ARMEB__
__rev \x, \x, \t0, \t1, \t2 rev_l \a, \tmp
rev_l \b, \tmp
rev_l \c, \tmp
rev_l \d, \tmp
#endif #endif
.endm .endm
.macro _le32_bswap_4x a, b, c, d, t0, t1, t2
_le32_bswap \a, \t0, \t1, \t2
_le32_bswap \b, \t0, \t1, \t2
_le32_bswap \c, \t0, \t1, \t2
_le32_bswap \d, \t0, \t1, \t2
.endm
.macro __ldrd a, b, src, offset .macro __ldrd a, b, src, offset
#if __LINUX_ARM_ARCH__ >= 6 #if __LINUX_ARM_ARCH__ >= 6
ldrd \a, \b, [\src, #\offset] ldrd \a, \b, [\src, #\offset]
@ -200,7 +183,7 @@
add X1, X1, r9 add X1, X1, r9
add X2, X2, r10 add X2, X2, r10
add X3, X3, r11 add X3, X3, r11
_le32_bswap_4x X0, X1, X2, X3, r8, r9, r10 _le32_bswap_4x X0, X1, X2, X3, r8
ldmia r12!, {r8-r11} ldmia r12!, {r8-r11}
eor X0, X0, r8 eor X0, X0, r8
eor X1, X1, r9 eor X1, X1, r9
@ -216,7 +199,7 @@
ldmia r12!, {X0-X3} ldmia r12!, {X0-X3}
add X6, r10, X6, ror #brot add X6, r10, X6, ror #brot
add X7, r11, X7, ror #brot add X7, r11, X7, ror #brot
_le32_bswap_4x X4, X5, X6, X7, r8, r9, r10 _le32_bswap_4x X4, X5, X6, X7, r8
eor X4, X4, X0 eor X4, X4, X0
eor X5, X5, X1 eor X5, X5, X1
eor X6, X6, X2 eor X6, X6, X2
@ -231,7 +214,7 @@
add r1, r1, r9 // x9 add r1, r1, r9 // x9
add r6, r6, r10 // x10 add r6, r6, r10 // x10
add r7, r7, r11 // x11 add r7, r7, r11 // x11
_le32_bswap_4x r0, r1, r6, r7, r8, r9, r10 _le32_bswap_4x r0, r1, r6, r7, r8
ldmia r12!, {r8-r11} ldmia r12!, {r8-r11}
eor r0, r0, r8 // x8 eor r0, r0, r8 // x8
eor r1, r1, r9 // x9 eor r1, r1, r9 // x9
@ -245,7 +228,7 @@
add r3, r9, r3, ror #drot // x13 add r3, r9, r3, ror #drot // x13
add r4, r10, r4, ror #drot // x14 add r4, r10, r4, ror #drot // x14
add r5, r11, r5, ror #drot // x15 add r5, r11, r5, ror #drot // x15
_le32_bswap_4x r2, r3, r4, r5, r9, r10, r11 _le32_bswap_4x r2, r3, r4, r5, r9
ldr r9, [sp, #72] // load LEN ldr r9, [sp, #72] // load LEN
eor r2, r2, r0 // x12 eor r2, r2, r0 // x12
eor r3, r3, r1 // x13 eor r3, r3, r1 // x13
@ -301,7 +284,7 @@
add X1, X1, r9 add X1, X1, r9
add X2, X2, r10 add X2, X2, r10
add X3, X3, r11 add X3, X3, r11
_le32_bswap_4x X0, X1, X2, X3, r8, r9, r10 _le32_bswap_4x X0, X1, X2, X3, r8
stmia r14!, {X0-X3} stmia r14!, {X0-X3}
// Save keystream for x4-x7 // Save keystream for x4-x7
@ -311,7 +294,7 @@
add X5, r9, X5, ror #brot add X5, r9, X5, ror #brot
add X6, r10, X6, ror #brot add X6, r10, X6, ror #brot
add X7, r11, X7, ror #brot add X7, r11, X7, ror #brot
_le32_bswap_4x X4, X5, X6, X7, r8, r9, r10 _le32_bswap_4x X4, X5, X6, X7, r8
add r8, sp, #64 add r8, sp, #64
stmia r14!, {X4-X7} stmia r14!, {X4-X7}
@ -323,7 +306,7 @@
add r1, r1, r9 // x9 add r1, r1, r9 // x9
add r6, r6, r10 // x10 add r6, r6, r10 // x10
add r7, r7, r11 // x11 add r7, r7, r11 // x11
_le32_bswap_4x r0, r1, r6, r7, r8, r9, r10 _le32_bswap_4x r0, r1, r6, r7, r8
stmia r14!, {r0,r1,r6,r7} stmia r14!, {r0,r1,r6,r7}
__ldrd r8, r9, sp, 144 __ldrd r8, r9, sp, 144
__ldrd r10, r11, sp, 152 __ldrd r10, r11, sp, 152
@ -331,7 +314,7 @@
add r3, r9, r3, ror #drot // x13 add r3, r9, r3, ror #drot // x13
add r4, r10, r4, ror #drot // x14 add r4, r10, r4, ror #drot // x14
add r5, r11, r5, ror #drot // x15 add r5, r11, r5, ror #drot // x15
_le32_bswap_4x r2, r3, r4, r5, r9, r10, r11 _le32_bswap_4x r2, r3, r4, r5, r9
stmia r14, {r2-r5} stmia r14, {r2-r5}
// Stack: ks0-ks15 unused0-unused7 x0-x15 OUT IN LEN // Stack: ks0-ks15 unused0-unused7 x0-x15 OUT IN LEN

View File

@ -10,8 +10,8 @@
#include <linux/linkage.h> #include <linux/linkage.h>
.text .text
.fpu neon
.arch armv7-a .arch armv7-a
.fpu neon
.align 4 .align 4
ENTRY(curve25519_neon) ENTRY(curve25519_neon)

View File

@ -29,7 +29,7 @@ void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit)
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key) void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
{ {
poly1305_init_arm(&dctx->h, key); poly1305_init_arm(&dctx->h, key);
dctx->s[0] = get_unaligned_le32(key + 16); dctx->s[0] = get_unaligned_le32(key + 16);

View File

@ -359,6 +359,7 @@ ST5( mov v4.16b, vctr.16b )
ins vctr.d[0], x8 ins vctr.d[0], x8
/* apply carry to N counter blocks for N := x12 */ /* apply carry to N counter blocks for N := x12 */
cbz x12, 2f
adr x16, 1f adr x16, 1f
sub x16, x16, x12, lsl #3 sub x16, x16, x12, lsl #3
br x16 br x16

View File

@ -25,7 +25,7 @@ asmlinkage void poly1305_emit(void *state, u8 *digest, const u32 *nonce);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key) void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
{ {
poly1305_init_arm64(&dctx->h, key); poly1305_init_arm64(&dctx->h, key);
dctx->s[0] = get_unaligned_le32(key + 16); dctx->s[0] = get_unaligned_le32(key + 16);

View File

@ -17,7 +17,7 @@ asmlinkage void poly1305_init_mips(void *state, const u8 *key);
asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit); asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit);
asmlinkage void poly1305_emit_mips(void *state, u8 *digest, const u32 *nonce); asmlinkage void poly1305_emit_mips(void *state, u8 *digest, const u32 *nonce);
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key) void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
{ {
poly1305_init_mips(&dctx->h, key); poly1305_init_mips(&dctx->h, key);
dctx->s[0] = get_unaligned_le32(key + 16); dctx->s[0] = get_unaligned_le32(key + 16);

View File

@ -107,7 +107,7 @@ static int ppc_spe_sha1_update(struct shash_desc *desc, const u8 *data,
src += bytes; src += bytes;
len -= bytes; len -= bytes;
}; }
memcpy((char *)sctx->buffer, src, len); memcpy((char *)sctx->buffer, src, len);
return 0; return 0;

View File

@ -16,7 +16,7 @@
#include <asm/simd.h> #include <asm/simd.h>
asmlinkage void poly1305_init_x86_64(void *ctx, asmlinkage void poly1305_init_x86_64(void *ctx,
const u8 key[POLY1305_KEY_SIZE]); const u8 key[POLY1305_BLOCK_SIZE]);
asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp, asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp,
const size_t len, const u32 padbit); const size_t len, const u32 padbit);
asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[POLY1305_DIGEST_SIZE], asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
@ -81,7 +81,7 @@ static void convert_to_base2_64(void *ctx)
state->is_base2_26 = 0; state->is_base2_26 = 0;
} }
static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_KEY_SIZE]) static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_BLOCK_SIZE])
{ {
poly1305_init_x86_64(ctx, key); poly1305_init_x86_64(ctx, key);
} }
@ -129,7 +129,7 @@ static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
poly1305_emit_avx(ctx, mac, nonce); poly1305_emit_avx(ctx, mac, nonce);
} }
void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key) void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
{ {
poly1305_simd_init(&dctx->h, key); poly1305_simd_init(&dctx->h, key);
dctx->s[0] = get_unaligned_le32(&key[16]); dctx->s[0] = get_unaligned_le32(&key[16]);

View File

@ -242,6 +242,16 @@ config CRYPTO_ECDH
help help
Generic implementation of the ECDH algorithm Generic implementation of the ECDH algorithm
config CRYPTO_ECDSA
tristate "ECDSA (NIST P192, P256 etc.) algorithm"
select CRYPTO_ECC
select CRYPTO_AKCIPHER
select ASN1
help
Elliptic Curve Digital Signature Algorithm (NIST P192, P256 etc.)
is A NIST cryptographic standard algorithm. Only signature verification
is implemented.
config CRYPTO_ECRDSA config CRYPTO_ECRDSA
tristate "EC-RDSA (GOST 34.10) algorithm" tristate "EC-RDSA (GOST 34.10) algorithm"
select CRYPTO_ECC select CRYPTO_ECC
@ -1213,7 +1223,6 @@ config CRYPTO_BLOWFISH_X86_64
config CRYPTO_CAMELLIA config CRYPTO_CAMELLIA
tristate "Camellia cipher algorithms" tristate "Camellia cipher algorithms"
depends on CRYPTO
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
help help
Camellia cipher algorithms module. Camellia cipher algorithms module.
@ -1229,7 +1238,6 @@ config CRYPTO_CAMELLIA
config CRYPTO_CAMELLIA_X86_64 config CRYPTO_CAMELLIA_X86_64
tristate "Camellia cipher algorithm (x86_64)" tristate "Camellia cipher algorithm (x86_64)"
depends on X86 && 64BIT depends on X86 && 64BIT
depends on CRYPTO
select CRYPTO_SKCIPHER select CRYPTO_SKCIPHER
imply CRYPTO_CTR imply CRYPTO_CTR
help help
@ -1246,7 +1254,6 @@ config CRYPTO_CAMELLIA_X86_64
config CRYPTO_CAMELLIA_AESNI_AVX_X86_64 config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX)" tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX)"
depends on X86 && 64BIT depends on X86 && 64BIT
depends on CRYPTO
select CRYPTO_SKCIPHER select CRYPTO_SKCIPHER
select CRYPTO_CAMELLIA_X86_64 select CRYPTO_CAMELLIA_X86_64
select CRYPTO_SIMD select CRYPTO_SIMD
@ -1265,7 +1272,6 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64
tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX2)" tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX2)"
depends on X86 && 64BIT depends on X86 && 64BIT
depends on CRYPTO
select CRYPTO_CAMELLIA_AESNI_AVX_X86_64 select CRYPTO_CAMELLIA_AESNI_AVX_X86_64
help help
Camellia cipher algorithm module (x86_64/AES-NI/AVX2). Camellia cipher algorithm module (x86_64/AES-NI/AVX2).
@ -1281,7 +1287,6 @@ config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64
config CRYPTO_CAMELLIA_SPARC64 config CRYPTO_CAMELLIA_SPARC64
tristate "Camellia cipher algorithm (SPARC64)" tristate "Camellia cipher algorithm (SPARC64)"
depends on SPARC64 depends on SPARC64
depends on CRYPTO
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
select CRYPTO_SKCIPHER select CRYPTO_SKCIPHER
help help

View File

@ -50,6 +50,12 @@ sm2_generic-y += sm2.o
obj-$(CONFIG_CRYPTO_SM2) += sm2_generic.o obj-$(CONFIG_CRYPTO_SM2) += sm2_generic.o
$(obj)/ecdsasignature.asn1.o: $(obj)/ecdsasignature.asn1.c $(obj)/ecdsasignature.asn1.h
$(obj)/ecdsa.o: $(obj)/ecdsasignature.asn1.h
ecdsa_generic-y += ecdsa.o
ecdsa_generic-y += ecdsasignature.asn1.o
obj-$(CONFIG_CRYPTO_ECDSA) += ecdsa_generic.o
crypto_acompress-y := acompress.o crypto_acompress-y := acompress.o
crypto_acompress-y += scompress.o crypto_acompress-y += scompress.o
obj-$(CONFIG_CRYPTO_ACOMP2) += crypto_acompress.o obj-$(CONFIG_CRYPTO_ACOMP2) += crypto_acompress.o

View File

@ -21,9 +21,28 @@ union aegis_block {
u8 bytes[AEGIS_BLOCK_SIZE]; u8 bytes[AEGIS_BLOCK_SIZE];
}; };
struct aegis_state;
extern int aegis128_have_aes_insn;
#define AEGIS_BLOCK_ALIGN (__alignof__(union aegis_block)) #define AEGIS_BLOCK_ALIGN (__alignof__(union aegis_block))
#define AEGIS_ALIGNED(p) IS_ALIGNED((uintptr_t)p, AEGIS_BLOCK_ALIGN) #define AEGIS_ALIGNED(p) IS_ALIGNED((uintptr_t)p, AEGIS_BLOCK_ALIGN)
bool crypto_aegis128_have_simd(void);
void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg);
void crypto_aegis128_init_simd(struct aegis_state *state,
const union aegis_block *key,
const u8 *iv);
void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size);
void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size);
int crypto_aegis128_final_simd(struct aegis_state *state,
union aegis_block *tag_xor,
unsigned int assoclen,
unsigned int cryptlen,
unsigned int authsize);
static __always_inline void crypto_aegis_block_xor(union aegis_block *dst, static __always_inline void crypto_aegis_block_xor(union aegis_block *dst,
const union aegis_block *src) const union aegis_block *src)
{ {

View File

@ -58,21 +58,6 @@ static bool aegis128_do_simd(void)
return false; return false;
} }
bool crypto_aegis128_have_simd(void);
void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg);
void crypto_aegis128_init_simd(struct aegis_state *state,
const union aegis_block *key,
const u8 *iv);
void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size);
void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size);
int crypto_aegis128_final_simd(struct aegis_state *state,
union aegis_block *tag_xor,
unsigned int assoclen,
unsigned int cryptlen,
unsigned int authsize);
static void crypto_aegis128_update(struct aegis_state *state) static void crypto_aegis128_update(struct aegis_state *state)
{ {
union aegis_block tmp; union aegis_block tmp;

View File

@ -30,7 +30,7 @@ bool crypto_aegis128_have_simd(void)
return IS_ENABLED(CONFIG_ARM64); return IS_ENABLED(CONFIG_ARM64);
} }
void crypto_aegis128_init_simd(union aegis_block *state, void crypto_aegis128_init_simd(struct aegis_state *state,
const union aegis_block *key, const union aegis_block *key,
const u8 *iv) const u8 *iv)
{ {
@ -39,14 +39,14 @@ void crypto_aegis128_init_simd(union aegis_block *state,
kernel_neon_end(); kernel_neon_end();
} }
void crypto_aegis128_update_simd(union aegis_block *state, const void *msg) void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg)
{ {
kernel_neon_begin(); kernel_neon_begin();
crypto_aegis128_update_neon(state, msg); crypto_aegis128_update_neon(state, msg);
kernel_neon_end(); kernel_neon_end();
} }
void crypto_aegis128_encrypt_chunk_simd(union aegis_block *state, u8 *dst, void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size) const u8 *src, unsigned int size)
{ {
kernel_neon_begin(); kernel_neon_begin();
@ -54,7 +54,7 @@ void crypto_aegis128_encrypt_chunk_simd(union aegis_block *state, u8 *dst,
kernel_neon_end(); kernel_neon_end();
} }
void crypto_aegis128_decrypt_chunk_simd(union aegis_block *state, u8 *dst, void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst,
const u8 *src, unsigned int size) const u8 *src, unsigned int size)
{ {
kernel_neon_begin(); kernel_neon_begin();
@ -62,7 +62,7 @@ void crypto_aegis128_decrypt_chunk_simd(union aegis_block *state, u8 *dst,
kernel_neon_end(); kernel_neon_end();
} }
int crypto_aegis128_final_simd(union aegis_block *state, int crypto_aegis128_final_simd(struct aegis_state *state,
union aegis_block *tag_xor, union aegis_block *tag_xor,
unsigned int assoclen, unsigned int assoclen,
unsigned int cryptlen, unsigned int cryptlen,

View File

@ -491,8 +491,8 @@ static int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
/** /**
* af_alg_alloc_tsgl - allocate the TX SGL * af_alg_alloc_tsgl - allocate the TX SGL
* *
* @sk socket of connection to user space * @sk: socket of connection to user space
* @return: 0 upon success, < 0 upon error * Return: 0 upon success, < 0 upon error
*/ */
static int af_alg_alloc_tsgl(struct sock *sk) static int af_alg_alloc_tsgl(struct sock *sk)
{ {
@ -525,15 +525,15 @@ static int af_alg_alloc_tsgl(struct sock *sk)
} }
/** /**
* aead_count_tsgl - Count number of TX SG entries * af_alg_count_tsgl - Count number of TX SG entries
* *
* The counting starts from the beginning of the SGL to @bytes. If * The counting starts from the beginning of the SGL to @bytes. If
* an offset is provided, the counting of the SG entries starts at the offset. * an @offset is provided, the counting of the SG entries starts at the @offset.
* *
* @sk socket of connection to user space * @sk: socket of connection to user space
* @bytes Count the number of SG entries holding given number of bytes. * @bytes: Count the number of SG entries holding given number of bytes.
* @offset Start the counting of SG entries from the given offset. * @offset: Start the counting of SG entries from the given offset.
* @return Number of TX SG entries found given the constraints * Return: Number of TX SG entries found given the constraints
*/ */
unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset) unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
{ {
@ -577,19 +577,19 @@ unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
EXPORT_SYMBOL_GPL(af_alg_count_tsgl); EXPORT_SYMBOL_GPL(af_alg_count_tsgl);
/** /**
* aead_pull_tsgl - Release the specified buffers from TX SGL * af_alg_pull_tsgl - Release the specified buffers from TX SGL
* *
* If @dst is non-null, reassign the pages to dst. The caller must release * If @dst is non-null, reassign the pages to @dst. The caller must release
* the pages. If @dst_offset is given only reassign the pages to @dst starting * the pages. If @dst_offset is given only reassign the pages to @dst starting
* at the @dst_offset (byte). The caller must ensure that @dst is large * at the @dst_offset (byte). The caller must ensure that @dst is large
* enough (e.g. by using af_alg_count_tsgl with the same offset). * enough (e.g. by using af_alg_count_tsgl with the same offset).
* *
* @sk socket of connection to user space * @sk: socket of connection to user space
* @used Number of bytes to pull from TX SGL * @used: Number of bytes to pull from TX SGL
* @dst If non-NULL, buffer is reassigned to dst SGL instead of releasing. The * @dst: If non-NULL, buffer is reassigned to dst SGL instead of releasing. The
* caller must release the buffers in dst. * caller must release the buffers in dst.
* @dst_offset Reassign the TX SGL from given offset. All buffers before * @dst_offset: Reassign the TX SGL from given offset. All buffers before
* reaching the offset is released. * reaching the offset is released.
*/ */
void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
size_t dst_offset) size_t dst_offset)
@ -657,7 +657,7 @@ EXPORT_SYMBOL_GPL(af_alg_pull_tsgl);
/** /**
* af_alg_free_areq_sgls - Release TX and RX SGLs of the request * af_alg_free_areq_sgls - Release TX and RX SGLs of the request
* *
* @areq Request holding the TX and RX SGL * @areq: Request holding the TX and RX SGL
*/ */
static void af_alg_free_areq_sgls(struct af_alg_async_req *areq) static void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
{ {
@ -692,9 +692,9 @@ static void af_alg_free_areq_sgls(struct af_alg_async_req *areq)
/** /**
* af_alg_wait_for_wmem - wait for availability of writable memory * af_alg_wait_for_wmem - wait for availability of writable memory
* *
* @sk socket of connection to user space * @sk: socket of connection to user space
* @flags If MSG_DONTWAIT is set, then only report if function would sleep * @flags: If MSG_DONTWAIT is set, then only report if function would sleep
* @return 0 when writable memory is available, < 0 upon error * Return: 0 when writable memory is available, < 0 upon error
*/ */
static int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags) static int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags)
{ {
@ -725,7 +725,7 @@ static int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags)
/** /**
* af_alg_wmem_wakeup - wakeup caller when writable memory is available * af_alg_wmem_wakeup - wakeup caller when writable memory is available
* *
* @sk socket of connection to user space * @sk: socket of connection to user space
*/ */
void af_alg_wmem_wakeup(struct sock *sk) void af_alg_wmem_wakeup(struct sock *sk)
{ {
@ -748,10 +748,10 @@ EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup);
/** /**
* af_alg_wait_for_data - wait for availability of TX data * af_alg_wait_for_data - wait for availability of TX data
* *
* @sk socket of connection to user space * @sk: socket of connection to user space
* @flags If MSG_DONTWAIT is set, then only report if function would sleep * @flags: If MSG_DONTWAIT is set, then only report if function would sleep
* @min Set to minimum request size if partial requests are allowed. * @min: Set to minimum request size if partial requests are allowed.
* @return 0 when writable memory is available, < 0 upon error * Return: 0 when writable memory is available, < 0 upon error
*/ */
int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min) int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min)
{ {
@ -790,7 +790,7 @@ EXPORT_SYMBOL_GPL(af_alg_wait_for_data);
/** /**
* af_alg_data_wakeup - wakeup caller when new data can be sent to kernel * af_alg_data_wakeup - wakeup caller when new data can be sent to kernel
* *
* @sk socket of connection to user space * @sk: socket of connection to user space
*/ */
static void af_alg_data_wakeup(struct sock *sk) static void af_alg_data_wakeup(struct sock *sk)
{ {
@ -820,12 +820,12 @@ static void af_alg_data_wakeup(struct sock *sk)
* *
* In addition, the ctx is filled with the information sent via CMSG. * In addition, the ctx is filled with the information sent via CMSG.
* *
* @sock socket of connection to user space * @sock: socket of connection to user space
* @msg message from user space * @msg: message from user space
* @size size of message from user space * @size: size of message from user space
* @ivsize the size of the IV for the cipher operation to verify that the * @ivsize: the size of the IV for the cipher operation to verify that the
* user-space-provided IV has the right size * user-space-provided IV has the right size
* @return the number of copied data upon success, < 0 upon error * Return: the number of copied data upon success, < 0 upon error
*/ */
int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
unsigned int ivsize) unsigned int ivsize)
@ -977,6 +977,11 @@ EXPORT_SYMBOL_GPL(af_alg_sendmsg);
/** /**
* af_alg_sendpage - sendpage system call handler * af_alg_sendpage - sendpage system call handler
* @sock: socket of connection to user space to write to
* @page: data to send
* @offset: offset into page to begin sending
* @size: length of data
* @flags: message send/receive flags
* *
* This is a generic implementation of sendpage to fill ctx->tsgl_list. * This is a generic implementation of sendpage to fill ctx->tsgl_list.
*/ */
@ -1035,6 +1040,7 @@ EXPORT_SYMBOL_GPL(af_alg_sendpage);
/** /**
* af_alg_free_resources - release resources required for crypto request * af_alg_free_resources - release resources required for crypto request
* @areq: Request holding the TX and RX SGL
*/ */
void af_alg_free_resources(struct af_alg_async_req *areq) void af_alg_free_resources(struct af_alg_async_req *areq)
{ {
@ -1047,6 +1053,9 @@ EXPORT_SYMBOL_GPL(af_alg_free_resources);
/** /**
* af_alg_async_cb - AIO callback handler * af_alg_async_cb - AIO callback handler
* @_req: async request info
* @err: if non-zero, error result to be returned via ki_complete();
* otherwise return the AIO output length via ki_complete().
* *
* This handler cleans up the struct af_alg_async_req upon completion of the * This handler cleans up the struct af_alg_async_req upon completion of the
* AIO operation. * AIO operation.
@ -1073,6 +1082,9 @@ EXPORT_SYMBOL_GPL(af_alg_async_cb);
/** /**
* af_alg_poll - poll system call handler * af_alg_poll - poll system call handler
* @file: file pointer
* @sock: socket to poll
* @wait: poll_table
*/ */
__poll_t af_alg_poll(struct file *file, struct socket *sock, __poll_t af_alg_poll(struct file *file, struct socket *sock,
poll_table *wait) poll_table *wait)
@ -1098,9 +1110,9 @@ EXPORT_SYMBOL_GPL(af_alg_poll);
/** /**
* af_alg_alloc_areq - allocate struct af_alg_async_req * af_alg_alloc_areq - allocate struct af_alg_async_req
* *
* @sk socket of connection to user space * @sk: socket of connection to user space
* @areqlen size of struct af_alg_async_req + crypto_*_reqsize * @areqlen: size of struct af_alg_async_req + crypto_*_reqsize
* @return allocated data structure or ERR_PTR upon error * Return: allocated data structure or ERR_PTR upon error
*/ */
struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
unsigned int areqlen) unsigned int areqlen)
@ -1125,13 +1137,13 @@ EXPORT_SYMBOL_GPL(af_alg_alloc_areq);
* af_alg_get_rsgl - create the RX SGL for the output data from the crypto * af_alg_get_rsgl - create the RX SGL for the output data from the crypto
* operation * operation
* *
* @sk socket of connection to user space * @sk: socket of connection to user space
* @msg user space message * @msg: user space message
* @flags flags used to invoke recvmsg with * @flags: flags used to invoke recvmsg with
* @areq instance of the cryptographic request that will hold the RX SGL * @areq: instance of the cryptographic request that will hold the RX SGL
* @maxsize maximum number of bytes to be pulled from user space * @maxsize: maximum number of bytes to be pulled from user space
* @outlen number of bytes in the RX SGL * @outlen: number of bytes in the RX SGL
* @return 0 on success, < 0 upon error * Return: 0 on success, < 0 upon error
*/ */
int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
struct af_alg_async_req *areq, size_t maxsize, struct af_alg_async_req *areq, size_t maxsize,

View File

@ -562,7 +562,7 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
{ {
struct crypto_alg *alg; struct crypto_alg *alg;
if (unlikely(!mem)) if (IS_ERR_OR_NULL(mem))
return; return;
alg = tfm->__crt_alg; alg = tfm->__crt_alg;

View File

@ -14,6 +14,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/asn1.h>
#include <keys/asymmetric-subtype.h> #include <keys/asymmetric-subtype.h>
#include <crypto/public_key.h> #include <crypto/public_key.h>
#include <crypto/akcipher.h> #include <crypto/akcipher.h>
@ -85,7 +86,8 @@ int software_key_determine_akcipher(const char *encoding,
return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0; return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0;
} }
if (strcmp(encoding, "raw") == 0) { if (strcmp(encoding, "raw") == 0 ||
strcmp(encoding, "x962") == 0) {
strcpy(alg_name, pkey->pkey_algo); strcpy(alg_name, pkey->pkey_algo);
return 0; return 0;
} }

View File

@ -227,6 +227,26 @@ int x509_note_pkey_algo(void *context, size_t hdrlen,
ctx->cert->sig->hash_algo = "sha224"; ctx->cert->sig->hash_algo = "sha224";
goto rsa_pkcs1; goto rsa_pkcs1;
case OID_id_ecdsa_with_sha1:
ctx->cert->sig->hash_algo = "sha1";
goto ecdsa;
case OID_id_ecdsa_with_sha224:
ctx->cert->sig->hash_algo = "sha224";
goto ecdsa;
case OID_id_ecdsa_with_sha256:
ctx->cert->sig->hash_algo = "sha256";
goto ecdsa;
case OID_id_ecdsa_with_sha384:
ctx->cert->sig->hash_algo = "sha384";
goto ecdsa;
case OID_id_ecdsa_with_sha512:
ctx->cert->sig->hash_algo = "sha512";
goto ecdsa;
case OID_gost2012Signature256: case OID_gost2012Signature256:
ctx->cert->sig->hash_algo = "streebog256"; ctx->cert->sig->hash_algo = "streebog256";
goto ecrdsa; goto ecrdsa;
@ -255,6 +275,11 @@ sm2:
ctx->cert->sig->encoding = "raw"; ctx->cert->sig->encoding = "raw";
ctx->algo_oid = ctx->last_oid; ctx->algo_oid = ctx->last_oid;
return 0; return 0;
ecdsa:
ctx->cert->sig->pkey_algo = "ecdsa";
ctx->cert->sig->encoding = "x962";
ctx->algo_oid = ctx->last_oid;
return 0;
} }
/* /*
@ -276,7 +301,8 @@ int x509_note_signature(void *context, size_t hdrlen,
if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0 || if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0 ||
strcmp(ctx->cert->sig->pkey_algo, "ecrdsa") == 0 || strcmp(ctx->cert->sig->pkey_algo, "ecrdsa") == 0 ||
strcmp(ctx->cert->sig->pkey_algo, "sm2") == 0) { strcmp(ctx->cert->sig->pkey_algo, "sm2") == 0 ||
strcmp(ctx->cert->sig->pkey_algo, "ecdsa") == 0) {
/* Discard the BIT STRING metadata */ /* Discard the BIT STRING metadata */
if (vlen < 1 || *(const u8 *)value != 0) if (vlen < 1 || *(const u8 *)value != 0)
return -EBADMSG; return -EBADMSG;
@ -459,6 +485,7 @@ int x509_extract_key_data(void *context, size_t hdrlen,
const void *value, size_t vlen) const void *value, size_t vlen)
{ {
struct x509_parse_context *ctx = context; struct x509_parse_context *ctx = context;
enum OID oid;
ctx->key_algo = ctx->last_oid; ctx->key_algo = ctx->last_oid;
switch (ctx->last_oid) { switch (ctx->last_oid) {
@ -470,7 +497,25 @@ int x509_extract_key_data(void *context, size_t hdrlen,
ctx->cert->pub->pkey_algo = "ecrdsa"; ctx->cert->pub->pkey_algo = "ecrdsa";
break; break;
case OID_id_ecPublicKey: case OID_id_ecPublicKey:
ctx->cert->pub->pkey_algo = "sm2"; if (parse_OID(ctx->params, ctx->params_size, &oid) != 0)
return -EBADMSG;
switch (oid) {
case OID_sm2:
ctx->cert->pub->pkey_algo = "sm2";
break;
case OID_id_prime192v1:
ctx->cert->pub->pkey_algo = "ecdsa-nist-p192";
break;
case OID_id_prime256v1:
ctx->cert->pub->pkey_algo = "ecdsa-nist-p256";
break;
case OID_id_ansip384r1:
ctx->cert->pub->pkey_algo = "ecdsa-nist-p384";
break;
default:
return -ENOPKG;
}
break; break;
default: default:
return -ENOPKG; return -ENOPKG;

View File

@ -129,7 +129,9 @@ int x509_check_for_self_signed(struct x509_certificate *cert)
} }
ret = -EKEYREJECTED; ret = -EKEYREJECTED;
if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0) if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0 &&
(strncmp(cert->pub->pkey_algo, "ecdsa-", 6) != 0 ||
strcmp(cert->sig->pkey_algo, "ecdsa") != 0))
goto out; goto out;
ret = public_key_verify_signature(cert->pub, cert->sig); ret = public_key_verify_signature(cert->pub, cert->sig);

View File

@ -1,26 +1,4 @@
/* GPL HEADER START // SPDX-License-Identifier: GPL-2.0-only
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 only,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see http://www.gnu.org/licenses
*
* Please visit http://www.xyratex.com/contact if you need additional
* information or have any questions.
*
* GPL HEADER END
*/
/* /*
* Copyright 2012 Xyratex Technology Limited * Copyright 2012 Xyratex Technology Limited
*/ */

View File

@ -24,6 +24,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ */
#include <crypto/ecc_curve.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/slab.h> #include <linux/slab.h>
@ -42,7 +43,14 @@ typedef struct {
u64 m_high; u64 m_high;
} uint128_t; } uint128_t;
static inline const struct ecc_curve *ecc_get_curve(unsigned int curve_id) /* Returns curv25519 curve param */
const struct ecc_curve *ecc_get_curve25519(void)
{
return &ecc_25519;
}
EXPORT_SYMBOL(ecc_get_curve25519);
const struct ecc_curve *ecc_get_curve(unsigned int curve_id)
{ {
switch (curve_id) { switch (curve_id) {
/* In FIPS mode only allow P256 and higher */ /* In FIPS mode only allow P256 and higher */
@ -50,10 +58,13 @@ static inline const struct ecc_curve *ecc_get_curve(unsigned int curve_id)
return fips_enabled ? NULL : &nist_p192; return fips_enabled ? NULL : &nist_p192;
case ECC_CURVE_NIST_P256: case ECC_CURVE_NIST_P256:
return &nist_p256; return &nist_p256;
case ECC_CURVE_NIST_P384:
return &nist_p384;
default: default:
return NULL; return NULL;
} }
} }
EXPORT_SYMBOL(ecc_get_curve);
static u64 *ecc_alloc_digits_space(unsigned int ndigits) static u64 *ecc_alloc_digits_space(unsigned int ndigits)
{ {
@ -128,7 +139,7 @@ bool vli_is_zero(const u64 *vli, unsigned int ndigits)
} }
EXPORT_SYMBOL(vli_is_zero); EXPORT_SYMBOL(vli_is_zero);
/* Returns nonzero if bit bit of vli is set. */ /* Returns nonzero if bit of vli is set. */
static u64 vli_test_bit(const u64 *vli, unsigned int bit) static u64 vli_test_bit(const u64 *vli, unsigned int bit)
{ {
return (vli[bit / 64] & ((u64)1 << (bit % 64))); return (vli[bit / 64] & ((u64)1 << (bit % 64)));
@ -775,18 +786,133 @@ static void vli_mmod_fast_256(u64 *result, const u64 *product,
} }
} }
#define SL32OR32(x32, y32) (((u64)x32 << 32) | y32)
#define AND64H(x64) (x64 & 0xffFFffFF00000000ull)
#define AND64L(x64) (x64 & 0x00000000ffFFffFFull)
/* Computes result = product % curve_prime
* from "Mathematical routines for the NIST prime elliptic curves"
*/
static void vli_mmod_fast_384(u64 *result, const u64 *product,
const u64 *curve_prime, u64 *tmp)
{
int carry;
const unsigned int ndigits = 6;
/* t */
vli_set(result, product, ndigits);
/* s1 */
tmp[0] = 0; // 0 || 0
tmp[1] = 0; // 0 || 0
tmp[2] = SL32OR32(product[11], (product[10]>>32)); //a22||a21
tmp[3] = product[11]>>32; // 0 ||a23
tmp[4] = 0; // 0 || 0
tmp[5] = 0; // 0 || 0
carry = vli_lshift(tmp, tmp, 1, ndigits);
carry += vli_add(result, result, tmp, ndigits);
/* s2 */
tmp[0] = product[6]; //a13||a12
tmp[1] = product[7]; //a15||a14
tmp[2] = product[8]; //a17||a16
tmp[3] = product[9]; //a19||a18
tmp[4] = product[10]; //a21||a20
tmp[5] = product[11]; //a23||a22
carry += vli_add(result, result, tmp, ndigits);
/* s3 */
tmp[0] = SL32OR32(product[11], (product[10]>>32)); //a22||a21
tmp[1] = SL32OR32(product[6], (product[11]>>32)); //a12||a23
tmp[2] = SL32OR32(product[7], (product[6])>>32); //a14||a13
tmp[3] = SL32OR32(product[8], (product[7]>>32)); //a16||a15
tmp[4] = SL32OR32(product[9], (product[8]>>32)); //a18||a17
tmp[5] = SL32OR32(product[10], (product[9]>>32)); //a20||a19
carry += vli_add(result, result, tmp, ndigits);
/* s4 */
tmp[0] = AND64H(product[11]); //a23|| 0
tmp[1] = (product[10]<<32); //a20|| 0
tmp[2] = product[6]; //a13||a12
tmp[3] = product[7]; //a15||a14
tmp[4] = product[8]; //a17||a16
tmp[5] = product[9]; //a19||a18
carry += vli_add(result, result, tmp, ndigits);
/* s5 */
tmp[0] = 0; // 0|| 0
tmp[1] = 0; // 0|| 0
tmp[2] = product[10]; //a21||a20
tmp[3] = product[11]; //a23||a22
tmp[4] = 0; // 0|| 0
tmp[5] = 0; // 0|| 0
carry += vli_add(result, result, tmp, ndigits);
/* s6 */
tmp[0] = AND64L(product[10]); // 0 ||a20
tmp[1] = AND64H(product[10]); //a21|| 0
tmp[2] = product[11]; //a23||a22
tmp[3] = 0; // 0 || 0
tmp[4] = 0; // 0 || 0
tmp[5] = 0; // 0 || 0
carry += vli_add(result, result, tmp, ndigits);
/* d1 */
tmp[0] = SL32OR32(product[6], (product[11]>>32)); //a12||a23
tmp[1] = SL32OR32(product[7], (product[6]>>32)); //a14||a13
tmp[2] = SL32OR32(product[8], (product[7]>>32)); //a16||a15
tmp[3] = SL32OR32(product[9], (product[8]>>32)); //a18||a17
tmp[4] = SL32OR32(product[10], (product[9]>>32)); //a20||a19
tmp[5] = SL32OR32(product[11], (product[10]>>32)); //a22||a21
carry -= vli_sub(result, result, tmp, ndigits);
/* d2 */
tmp[0] = (product[10]<<32); //a20|| 0
tmp[1] = SL32OR32(product[11], (product[10]>>32)); //a22||a21
tmp[2] = (product[11]>>32); // 0 ||a23
tmp[3] = 0; // 0 || 0
tmp[4] = 0; // 0 || 0
tmp[5] = 0; // 0 || 0
carry -= vli_sub(result, result, tmp, ndigits);
/* d3 */
tmp[0] = 0; // 0 || 0
tmp[1] = AND64H(product[11]); //a23|| 0
tmp[2] = product[11]>>32; // 0 ||a23
tmp[3] = 0; // 0 || 0
tmp[4] = 0; // 0 || 0
tmp[5] = 0; // 0 || 0
carry -= vli_sub(result, result, tmp, ndigits);
if (carry < 0) {
do {
carry += vli_add(result, result, curve_prime, ndigits);
} while (carry < 0);
} else {
while (carry || vli_cmp(curve_prime, result, ndigits) != 1)
carry -= vli_sub(result, result, curve_prime, ndigits);
}
}
#undef SL32OR32
#undef AND64H
#undef AND64L
/* Computes result = product % curve_prime for different curve_primes. /* Computes result = product % curve_prime for different curve_primes.
* *
* Note that curve_primes are distinguished just by heuristic check and * Note that curve_primes are distinguished just by heuristic check and
* not by complete conformance check. * not by complete conformance check.
*/ */
static bool vli_mmod_fast(u64 *result, u64 *product, static bool vli_mmod_fast(u64 *result, u64 *product,
const u64 *curve_prime, unsigned int ndigits) const struct ecc_curve *curve)
{ {
u64 tmp[2 * ECC_MAX_DIGITS]; u64 tmp[2 * ECC_MAX_DIGITS];
const u64 *curve_prime = curve->p;
const unsigned int ndigits = curve->g.ndigits;
/* Currently, both NIST primes have -1 in lowest qword. */ /* All NIST curves have name prefix 'nist_' */
if (curve_prime[0] != -1ull) { if (strncmp(curve->name, "nist_", 5) != 0) {
/* Try to handle Pseudo-Marsenne primes. */ /* Try to handle Pseudo-Marsenne primes. */
if (curve_prime[ndigits - 1] == -1ull) { if (curve_prime[ndigits - 1] == -1ull) {
vli_mmod_special(result, product, curve_prime, vli_mmod_special(result, product, curve_prime,
@ -809,6 +935,9 @@ static bool vli_mmod_fast(u64 *result, u64 *product,
case 4: case 4:
vli_mmod_fast_256(result, product, curve_prime, tmp); vli_mmod_fast_256(result, product, curve_prime, tmp);
break; break;
case 6:
vli_mmod_fast_384(result, product, curve_prime, tmp);
break;
default: default:
pr_err_ratelimited("ecc: unsupported digits size!\n"); pr_err_ratelimited("ecc: unsupported digits size!\n");
return false; return false;
@ -832,22 +961,22 @@ EXPORT_SYMBOL(vli_mod_mult_slow);
/* Computes result = (left * right) % curve_prime. */ /* Computes result = (left * right) % curve_prime. */
static void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right, static void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right,
const u64 *curve_prime, unsigned int ndigits) const struct ecc_curve *curve)
{ {
u64 product[2 * ECC_MAX_DIGITS]; u64 product[2 * ECC_MAX_DIGITS];
vli_mult(product, left, right, ndigits); vli_mult(product, left, right, curve->g.ndigits);
vli_mmod_fast(result, product, curve_prime, ndigits); vli_mmod_fast(result, product, curve);
} }
/* Computes result = left^2 % curve_prime. */ /* Computes result = left^2 % curve_prime. */
static void vli_mod_square_fast(u64 *result, const u64 *left, static void vli_mod_square_fast(u64 *result, const u64 *left,
const u64 *curve_prime, unsigned int ndigits) const struct ecc_curve *curve)
{ {
u64 product[2 * ECC_MAX_DIGITS]; u64 product[2 * ECC_MAX_DIGITS];
vli_square(product, left, ndigits); vli_square(product, left, curve->g.ndigits);
vli_mmod_fast(result, product, curve_prime, ndigits); vli_mmod_fast(result, product, curve);
} }
#define EVEN(vli) (!(vli[0] & 1)) #define EVEN(vli) (!(vli[0] & 1))
@ -945,25 +1074,27 @@ static bool ecc_point_is_zero(const struct ecc_point *point)
/* Double in place */ /* Double in place */
static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1, static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1,
u64 *curve_prime, unsigned int ndigits) const struct ecc_curve *curve)
{ {
/* t1 = x, t2 = y, t3 = z */ /* t1 = x, t2 = y, t3 = z */
u64 t4[ECC_MAX_DIGITS]; u64 t4[ECC_MAX_DIGITS];
u64 t5[ECC_MAX_DIGITS]; u64 t5[ECC_MAX_DIGITS];
const u64 *curve_prime = curve->p;
const unsigned int ndigits = curve->g.ndigits;
if (vli_is_zero(z1, ndigits)) if (vli_is_zero(z1, ndigits))
return; return;
/* t4 = y1^2 */ /* t4 = y1^2 */
vli_mod_square_fast(t4, y1, curve_prime, ndigits); vli_mod_square_fast(t4, y1, curve);
/* t5 = x1*y1^2 = A */ /* t5 = x1*y1^2 = A */
vli_mod_mult_fast(t5, x1, t4, curve_prime, ndigits); vli_mod_mult_fast(t5, x1, t4, curve);
/* t4 = y1^4 */ /* t4 = y1^4 */
vli_mod_square_fast(t4, t4, curve_prime, ndigits); vli_mod_square_fast(t4, t4, curve);
/* t2 = y1*z1 = z3 */ /* t2 = y1*z1 = z3 */
vli_mod_mult_fast(y1, y1, z1, curve_prime, ndigits); vli_mod_mult_fast(y1, y1, z1, curve);
/* t3 = z1^2 */ /* t3 = z1^2 */
vli_mod_square_fast(z1, z1, curve_prime, ndigits); vli_mod_square_fast(z1, z1, curve);
/* t1 = x1 + z1^2 */ /* t1 = x1 + z1^2 */
vli_mod_add(x1, x1, z1, curve_prime, ndigits); vli_mod_add(x1, x1, z1, curve_prime, ndigits);
@ -972,7 +1103,7 @@ static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1,
/* t3 = x1 - z1^2 */ /* t3 = x1 - z1^2 */
vli_mod_sub(z1, x1, z1, curve_prime, ndigits); vli_mod_sub(z1, x1, z1, curve_prime, ndigits);
/* t1 = x1^2 - z1^4 */ /* t1 = x1^2 - z1^4 */
vli_mod_mult_fast(x1, x1, z1, curve_prime, ndigits); vli_mod_mult_fast(x1, x1, z1, curve);
/* t3 = 2*(x1^2 - z1^4) */ /* t3 = 2*(x1^2 - z1^4) */
vli_mod_add(z1, x1, x1, curve_prime, ndigits); vli_mod_add(z1, x1, x1, curve_prime, ndigits);
@ -989,7 +1120,7 @@ static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1,
/* t1 = 3/2*(x1^2 - z1^4) = B */ /* t1 = 3/2*(x1^2 - z1^4) = B */
/* t3 = B^2 */ /* t3 = B^2 */
vli_mod_square_fast(z1, x1, curve_prime, ndigits); vli_mod_square_fast(z1, x1, curve);
/* t3 = B^2 - A */ /* t3 = B^2 - A */
vli_mod_sub(z1, z1, t5, curve_prime, ndigits); vli_mod_sub(z1, z1, t5, curve_prime, ndigits);
/* t3 = B^2 - 2A = x3 */ /* t3 = B^2 - 2A = x3 */
@ -997,7 +1128,7 @@ static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1,
/* t5 = A - x3 */ /* t5 = A - x3 */
vli_mod_sub(t5, t5, z1, curve_prime, ndigits); vli_mod_sub(t5, t5, z1, curve_prime, ndigits);
/* t1 = B * (A - x3) */ /* t1 = B * (A - x3) */
vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits); vli_mod_mult_fast(x1, x1, t5, curve);
/* t4 = B * (A - x3) - y1^4 = y3 */ /* t4 = B * (A - x3) - y1^4 = y3 */
vli_mod_sub(t4, x1, t4, curve_prime, ndigits); vli_mod_sub(t4, x1, t4, curve_prime, ndigits);
@ -1007,23 +1138,22 @@ static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1,
} }
/* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */ /* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */
static void apply_z(u64 *x1, u64 *y1, u64 *z, u64 *curve_prime, static void apply_z(u64 *x1, u64 *y1, u64 *z, const struct ecc_curve *curve)
unsigned int ndigits)
{ {
u64 t1[ECC_MAX_DIGITS]; u64 t1[ECC_MAX_DIGITS];
vli_mod_square_fast(t1, z, curve_prime, ndigits); /* z^2 */ vli_mod_square_fast(t1, z, curve); /* z^2 */
vli_mod_mult_fast(x1, x1, t1, curve_prime, ndigits); /* x1 * z^2 */ vli_mod_mult_fast(x1, x1, t1, curve); /* x1 * z^2 */
vli_mod_mult_fast(t1, t1, z, curve_prime, ndigits); /* z^3 */ vli_mod_mult_fast(t1, t1, z, curve); /* z^3 */
vli_mod_mult_fast(y1, y1, t1, curve_prime, ndigits); /* y1 * z^3 */ vli_mod_mult_fast(y1, y1, t1, curve); /* y1 * z^3 */
} }
/* P = (x1, y1) => 2P, (x2, y2) => P' */ /* P = (x1, y1) => 2P, (x2, y2) => P' */
static void xycz_initial_double(u64 *x1, u64 *y1, u64 *x2, u64 *y2, static void xycz_initial_double(u64 *x1, u64 *y1, u64 *x2, u64 *y2,
u64 *p_initial_z, u64 *curve_prime, u64 *p_initial_z, const struct ecc_curve *curve)
unsigned int ndigits)
{ {
u64 z[ECC_MAX_DIGITS]; u64 z[ECC_MAX_DIGITS];
const unsigned int ndigits = curve->g.ndigits;
vli_set(x2, x1, ndigits); vli_set(x2, x1, ndigits);
vli_set(y2, y1, ndigits); vli_set(y2, y1, ndigits);
@ -1034,35 +1164,37 @@ static void xycz_initial_double(u64 *x1, u64 *y1, u64 *x2, u64 *y2,
if (p_initial_z) if (p_initial_z)
vli_set(z, p_initial_z, ndigits); vli_set(z, p_initial_z, ndigits);
apply_z(x1, y1, z, curve_prime, ndigits); apply_z(x1, y1, z, curve);
ecc_point_double_jacobian(x1, y1, z, curve_prime, ndigits); ecc_point_double_jacobian(x1, y1, z, curve);
apply_z(x2, y2, z, curve_prime, ndigits); apply_z(x2, y2, z, curve);
} }
/* Input P = (x1, y1, Z), Q = (x2, y2, Z) /* Input P = (x1, y1, Z), Q = (x2, y2, Z)
* Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3) * Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3)
* or P => P', Q => P + Q * or P => P', Q => P + Q
*/ */
static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime, static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2,
unsigned int ndigits) const struct ecc_curve *curve)
{ {
/* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */ /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
u64 t5[ECC_MAX_DIGITS]; u64 t5[ECC_MAX_DIGITS];
const u64 *curve_prime = curve->p;
const unsigned int ndigits = curve->g.ndigits;
/* t5 = x2 - x1 */ /* t5 = x2 - x1 */
vli_mod_sub(t5, x2, x1, curve_prime, ndigits); vli_mod_sub(t5, x2, x1, curve_prime, ndigits);
/* t5 = (x2 - x1)^2 = A */ /* t5 = (x2 - x1)^2 = A */
vli_mod_square_fast(t5, t5, curve_prime, ndigits); vli_mod_square_fast(t5, t5, curve);
/* t1 = x1*A = B */ /* t1 = x1*A = B */
vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits); vli_mod_mult_fast(x1, x1, t5, curve);
/* t3 = x2*A = C */ /* t3 = x2*A = C */
vli_mod_mult_fast(x2, x2, t5, curve_prime, ndigits); vli_mod_mult_fast(x2, x2, t5, curve);
/* t4 = y2 - y1 */ /* t4 = y2 - y1 */
vli_mod_sub(y2, y2, y1, curve_prime, ndigits); vli_mod_sub(y2, y2, y1, curve_prime, ndigits);
/* t5 = (y2 - y1)^2 = D */ /* t5 = (y2 - y1)^2 = D */
vli_mod_square_fast(t5, y2, curve_prime, ndigits); vli_mod_square_fast(t5, y2, curve);
/* t5 = D - B */ /* t5 = D - B */
vli_mod_sub(t5, t5, x1, curve_prime, ndigits); vli_mod_sub(t5, t5, x1, curve_prime, ndigits);
@ -1071,11 +1203,11 @@ static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
/* t3 = C - B */ /* t3 = C - B */
vli_mod_sub(x2, x2, x1, curve_prime, ndigits); vli_mod_sub(x2, x2, x1, curve_prime, ndigits);
/* t2 = y1*(C - B) */ /* t2 = y1*(C - B) */
vli_mod_mult_fast(y1, y1, x2, curve_prime, ndigits); vli_mod_mult_fast(y1, y1, x2, curve);
/* t3 = B - x3 */ /* t3 = B - x3 */
vli_mod_sub(x2, x1, t5, curve_prime, ndigits); vli_mod_sub(x2, x1, t5, curve_prime, ndigits);
/* t4 = (y2 - y1)*(B - x3) */ /* t4 = (y2 - y1)*(B - x3) */
vli_mod_mult_fast(y2, y2, x2, curve_prime, ndigits); vli_mod_mult_fast(y2, y2, x2, curve);
/* t4 = y3 */ /* t4 = y3 */
vli_mod_sub(y2, y2, y1, curve_prime, ndigits); vli_mod_sub(y2, y2, y1, curve_prime, ndigits);
@ -1086,22 +1218,24 @@ static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
* Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3) * Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3)
* or P => P - Q, Q => P + Q * or P => P - Q, Q => P + Q
*/ */
static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime, static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2,
unsigned int ndigits) const struct ecc_curve *curve)
{ {
/* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */ /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
u64 t5[ECC_MAX_DIGITS]; u64 t5[ECC_MAX_DIGITS];
u64 t6[ECC_MAX_DIGITS]; u64 t6[ECC_MAX_DIGITS];
u64 t7[ECC_MAX_DIGITS]; u64 t7[ECC_MAX_DIGITS];
const u64 *curve_prime = curve->p;
const unsigned int ndigits = curve->g.ndigits;
/* t5 = x2 - x1 */ /* t5 = x2 - x1 */
vli_mod_sub(t5, x2, x1, curve_prime, ndigits); vli_mod_sub(t5, x2, x1, curve_prime, ndigits);
/* t5 = (x2 - x1)^2 = A */ /* t5 = (x2 - x1)^2 = A */
vli_mod_square_fast(t5, t5, curve_prime, ndigits); vli_mod_square_fast(t5, t5, curve);
/* t1 = x1*A = B */ /* t1 = x1*A = B */
vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits); vli_mod_mult_fast(x1, x1, t5, curve);
/* t3 = x2*A = C */ /* t3 = x2*A = C */
vli_mod_mult_fast(x2, x2, t5, curve_prime, ndigits); vli_mod_mult_fast(x2, x2, t5, curve);
/* t4 = y2 + y1 */ /* t4 = y2 + y1 */
vli_mod_add(t5, y2, y1, curve_prime, ndigits); vli_mod_add(t5, y2, y1, curve_prime, ndigits);
/* t4 = y2 - y1 */ /* t4 = y2 - y1 */
@ -1110,29 +1244,29 @@ static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
/* t6 = C - B */ /* t6 = C - B */
vli_mod_sub(t6, x2, x1, curve_prime, ndigits); vli_mod_sub(t6, x2, x1, curve_prime, ndigits);
/* t2 = y1 * (C - B) */ /* t2 = y1 * (C - B) */
vli_mod_mult_fast(y1, y1, t6, curve_prime, ndigits); vli_mod_mult_fast(y1, y1, t6, curve);
/* t6 = B + C */ /* t6 = B + C */
vli_mod_add(t6, x1, x2, curve_prime, ndigits); vli_mod_add(t6, x1, x2, curve_prime, ndigits);
/* t3 = (y2 - y1)^2 */ /* t3 = (y2 - y1)^2 */
vli_mod_square_fast(x2, y2, curve_prime, ndigits); vli_mod_square_fast(x2, y2, curve);
/* t3 = x3 */ /* t3 = x3 */
vli_mod_sub(x2, x2, t6, curve_prime, ndigits); vli_mod_sub(x2, x2, t6, curve_prime, ndigits);
/* t7 = B - x3 */ /* t7 = B - x3 */
vli_mod_sub(t7, x1, x2, curve_prime, ndigits); vli_mod_sub(t7, x1, x2, curve_prime, ndigits);
/* t4 = (y2 - y1)*(B - x3) */ /* t4 = (y2 - y1)*(B - x3) */
vli_mod_mult_fast(y2, y2, t7, curve_prime, ndigits); vli_mod_mult_fast(y2, y2, t7, curve);
/* t4 = y3 */ /* t4 = y3 */
vli_mod_sub(y2, y2, y1, curve_prime, ndigits); vli_mod_sub(y2, y2, y1, curve_prime, ndigits);
/* t7 = (y2 + y1)^2 = F */ /* t7 = (y2 + y1)^2 = F */
vli_mod_square_fast(t7, t5, curve_prime, ndigits); vli_mod_square_fast(t7, t5, curve);
/* t7 = x3' */ /* t7 = x3' */
vli_mod_sub(t7, t7, t6, curve_prime, ndigits); vli_mod_sub(t7, t7, t6, curve_prime, ndigits);
/* t6 = x3' - B */ /* t6 = x3' - B */
vli_mod_sub(t6, t7, x1, curve_prime, ndigits); vli_mod_sub(t6, t7, x1, curve_prime, ndigits);
/* t6 = (y2 + y1)*(x3' - B) */ /* t6 = (y2 + y1)*(x3' - B) */
vli_mod_mult_fast(t6, t6, t5, curve_prime, ndigits); vli_mod_mult_fast(t6, t6, t5, curve);
/* t2 = y3' */ /* t2 = y3' */
vli_mod_sub(y1, t6, y1, curve_prime, ndigits); vli_mod_sub(y1, t6, y1, curve_prime, ndigits);
@ -1162,41 +1296,37 @@ static void ecc_point_mult(struct ecc_point *result,
vli_set(rx[1], point->x, ndigits); vli_set(rx[1], point->x, ndigits);
vli_set(ry[1], point->y, ndigits); vli_set(ry[1], point->y, ndigits);
xycz_initial_double(rx[1], ry[1], rx[0], ry[0], initial_z, curve_prime, xycz_initial_double(rx[1], ry[1], rx[0], ry[0], initial_z, curve);
ndigits);
for (i = num_bits - 2; i > 0; i--) { for (i = num_bits - 2; i > 0; i--) {
nb = !vli_test_bit(scalar, i); nb = !vli_test_bit(scalar, i);
xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve_prime, xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve);
ndigits); xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve);
xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve_prime,
ndigits);
} }
nb = !vli_test_bit(scalar, 0); nb = !vli_test_bit(scalar, 0);
xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve_prime, xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve);
ndigits);
/* Find final 1/Z value. */ /* Find final 1/Z value. */
/* X1 - X0 */ /* X1 - X0 */
vli_mod_sub(z, rx[1], rx[0], curve_prime, ndigits); vli_mod_sub(z, rx[1], rx[0], curve_prime, ndigits);
/* Yb * (X1 - X0) */ /* Yb * (X1 - X0) */
vli_mod_mult_fast(z, z, ry[1 - nb], curve_prime, ndigits); vli_mod_mult_fast(z, z, ry[1 - nb], curve);
/* xP * Yb * (X1 - X0) */ /* xP * Yb * (X1 - X0) */
vli_mod_mult_fast(z, z, point->x, curve_prime, ndigits); vli_mod_mult_fast(z, z, point->x, curve);
/* 1 / (xP * Yb * (X1 - X0)) */ /* 1 / (xP * Yb * (X1 - X0)) */
vli_mod_inv(z, z, curve_prime, point->ndigits); vli_mod_inv(z, z, curve_prime, point->ndigits);
/* yP / (xP * Yb * (X1 - X0)) */ /* yP / (xP * Yb * (X1 - X0)) */
vli_mod_mult_fast(z, z, point->y, curve_prime, ndigits); vli_mod_mult_fast(z, z, point->y, curve);
/* Xb * yP / (xP * Yb * (X1 - X0)) */ /* Xb * yP / (xP * Yb * (X1 - X0)) */
vli_mod_mult_fast(z, z, rx[1 - nb], curve_prime, ndigits); vli_mod_mult_fast(z, z, rx[1 - nb], curve);
/* End 1/Z calculation */ /* End 1/Z calculation */
xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve_prime, ndigits); xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve);
apply_z(rx[0], ry[0], z, curve_prime, ndigits); apply_z(rx[0], ry[0], z, curve);
vli_set(result->x, rx[0], ndigits); vli_set(result->x, rx[0], ndigits);
vli_set(result->y, ry[0], ndigits); vli_set(result->y, ry[0], ndigits);
@ -1217,9 +1347,9 @@ static void ecc_point_add(const struct ecc_point *result,
vli_mod_sub(z, result->x, p->x, curve->p, ndigits); vli_mod_sub(z, result->x, p->x, curve->p, ndigits);
vli_set(px, p->x, ndigits); vli_set(px, p->x, ndigits);
vli_set(py, p->y, ndigits); vli_set(py, p->y, ndigits);
xycz_add(px, py, result->x, result->y, curve->p, ndigits); xycz_add(px, py, result->x, result->y, curve);
vli_mod_inv(z, z, curve->p, ndigits); vli_mod_inv(z, z, curve->p, ndigits);
apply_z(result->x, result->y, z, curve->p, ndigits); apply_z(result->x, result->y, z, curve);
} }
/* Computes R = u1P + u2Q mod p using Shamir's trick. /* Computes R = u1P + u2Q mod p using Shamir's trick.
@ -1248,8 +1378,7 @@ void ecc_point_mult_shamir(const struct ecc_point *result,
points[2] = q; points[2] = q;
points[3] = &sum; points[3] = &sum;
num_bits = max(vli_num_bits(u1, ndigits), num_bits = max(vli_num_bits(u1, ndigits), vli_num_bits(u2, ndigits));
vli_num_bits(u2, ndigits));
i = num_bits - 1; i = num_bits - 1;
idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1); idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1);
point = points[idx]; point = points[idx];
@ -1260,7 +1389,7 @@ void ecc_point_mult_shamir(const struct ecc_point *result,
z[0] = 1; z[0] = 1;
for (--i; i >= 0; i--) { for (--i; i >= 0; i--) {
ecc_point_double_jacobian(rx, ry, z, curve->p, ndigits); ecc_point_double_jacobian(rx, ry, z, curve);
idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1); idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1);
point = points[idx]; point = points[idx];
if (point) { if (point) {
@ -1270,27 +1399,17 @@ void ecc_point_mult_shamir(const struct ecc_point *result,
vli_set(tx, point->x, ndigits); vli_set(tx, point->x, ndigits);
vli_set(ty, point->y, ndigits); vli_set(ty, point->y, ndigits);
apply_z(tx, ty, z, curve->p, ndigits); apply_z(tx, ty, z, curve);
vli_mod_sub(tz, rx, tx, curve->p, ndigits); vli_mod_sub(tz, rx, tx, curve->p, ndigits);
xycz_add(tx, ty, rx, ry, curve->p, ndigits); xycz_add(tx, ty, rx, ry, curve);
vli_mod_mult_fast(z, z, tz, curve->p, ndigits); vli_mod_mult_fast(z, z, tz, curve);
} }
} }
vli_mod_inv(z, z, curve->p, ndigits); vli_mod_inv(z, z, curve->p, ndigits);
apply_z(rx, ry, z, curve->p, ndigits); apply_z(rx, ry, z, curve);
} }
EXPORT_SYMBOL(ecc_point_mult_shamir); EXPORT_SYMBOL(ecc_point_mult_shamir);
static inline void ecc_swap_digits(const u64 *in, u64 *out,
unsigned int ndigits)
{
const __be64 *src = (__force __be64 *)in;
int i;
for (i = 0; i < ndigits; i++)
out[i] = be64_to_cpu(src[ndigits - 1 - i]);
}
static int __ecc_is_key_valid(const struct ecc_curve *curve, static int __ecc_is_key_valid(const struct ecc_curve *curve,
const u64 *private_key, unsigned int ndigits) const u64 *private_key, unsigned int ndigits)
{ {
@ -1441,10 +1560,10 @@ int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
return -EINVAL; return -EINVAL;
/* Check 3: Verify that y^2 == (x^3 + a·x + b) mod p */ /* Check 3: Verify that y^2 == (x^3 + a·x + b) mod p */
vli_mod_square_fast(yy, pk->y, curve->p, pk->ndigits); /* y^2 */ vli_mod_square_fast(yy, pk->y, curve); /* y^2 */
vli_mod_square_fast(xxx, pk->x, curve->p, pk->ndigits); /* x^2 */ vli_mod_square_fast(xxx, pk->x, curve); /* x^2 */
vli_mod_mult_fast(xxx, xxx, pk->x, curve->p, pk->ndigits); /* x^3 */ vli_mod_mult_fast(xxx, xxx, pk->x, curve); /* x^3 */
vli_mod_mult_fast(w, curve->a, pk->x, curve->p, pk->ndigits); /* a·x */ vli_mod_mult_fast(w, curve->a, pk->x, curve); /* a·x */
vli_mod_add(w, w, curve->b, curve->p, pk->ndigits); /* a·x + b */ vli_mod_add(w, w, curve->b, curve->p, pk->ndigits); /* a·x + b */
vli_mod_add(w, w, xxx, curve->p, pk->ndigits); /* x^3 + a·x + b */ vli_mod_add(w, w, xxx, curve->p, pk->ndigits); /* x^3 + a·x + b */
if (vli_cmp(yy, w, pk->ndigits) != 0) /* Equation */ if (vli_cmp(yy, w, pk->ndigits) != 0) /* Equation */

View File

@ -26,49 +26,34 @@
#ifndef _CRYPTO_ECC_H #ifndef _CRYPTO_ECC_H
#define _CRYPTO_ECC_H #define _CRYPTO_ECC_H
#include <crypto/ecc_curve.h>
/* One digit is u64 qword. */ /* One digit is u64 qword. */
#define ECC_CURVE_NIST_P192_DIGITS 3 #define ECC_CURVE_NIST_P192_DIGITS 3
#define ECC_CURVE_NIST_P256_DIGITS 4 #define ECC_CURVE_NIST_P256_DIGITS 4
#define ECC_MAX_DIGITS (512 / 64) #define ECC_CURVE_NIST_P384_DIGITS 6
#define ECC_MAX_DIGITS (512 / 64) /* due to ecrdsa */
#define ECC_DIGITS_TO_BYTES_SHIFT 3 #define ECC_DIGITS_TO_BYTES_SHIFT 3
/** #define ECC_MAX_BYTES (ECC_MAX_DIGITS << ECC_DIGITS_TO_BYTES_SHIFT)
* struct ecc_point - elliptic curve point in affine coordinates
*
* @x: X coordinate in vli form.
* @y: Y coordinate in vli form.
* @ndigits: Length of vlis in u64 qwords.
*/
struct ecc_point {
u64 *x;
u64 *y;
u8 ndigits;
};
#define ECC_POINT_INIT(x, y, ndigits) (struct ecc_point) { x, y, ndigits } #define ECC_POINT_INIT(x, y, ndigits) (struct ecc_point) { x, y, ndigits }
/** /**
* struct ecc_curve - definition of elliptic curve * ecc_swap_digits() - Copy ndigits from big endian array to native array
* * @in: Input array
* @name: Short name of the curve. * @out: Output array
* @g: Generator point of the curve. * @ndigits: Number of digits to copy
* @p: Prime number, if Barrett's reduction is used for this curve
* pre-calculated value 'mu' is appended to the @p after ndigits.
* Use of Barrett's reduction is heuristically determined in
* vli_mmod_fast().
* @n: Order of the curve group.
* @a: Curve parameter a.
* @b: Curve parameter b.
*/ */
struct ecc_curve { static inline void ecc_swap_digits(const u64 *in, u64 *out, unsigned int ndigits)
char *name; {
struct ecc_point g; const __be64 *src = (__force __be64 *)in;
u64 *p; int i;
u64 *n;
u64 *a; for (i = 0; i < ndigits; i++)
u64 *b; out[i] = be64_to_cpu(src[ndigits - 1 - i]);
}; }
/** /**
* ecc_is_key_valid() - Validate a given ECDH private key * ecc_is_key_valid() - Validate a given ECDH private key

View File

@ -54,4 +54,53 @@ static struct ecc_curve nist_p256 = {
.b = nist_p256_b .b = nist_p256_b
}; };
/* NIST P-384 */
static u64 nist_p384_g_x[] = { 0x3A545E3872760AB7ull, 0x5502F25DBF55296Cull,
0x59F741E082542A38ull, 0x6E1D3B628BA79B98ull,
0x8Eb1C71EF320AD74ull, 0xAA87CA22BE8B0537ull };
static u64 nist_p384_g_y[] = { 0x7A431D7C90EA0E5Full, 0x0A60B1CE1D7E819Dull,
0xE9DA3113B5F0B8C0ull, 0xF8F41DBD289A147Cull,
0x5D9E98BF9292DC29ull, 0x3617DE4A96262C6Full };
static u64 nist_p384_p[] = { 0x00000000FFFFFFFFull, 0xFFFFFFFF00000000ull,
0xFFFFFFFFFFFFFFFEull, 0xFFFFFFFFFFFFFFFFull,
0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull };
static u64 nist_p384_n[] = { 0xECEC196ACCC52973ull, 0x581A0DB248B0A77Aull,
0xC7634D81F4372DDFull, 0xFFFFFFFFFFFFFFFFull,
0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull };
static u64 nist_p384_a[] = { 0x00000000FFFFFFFCull, 0xFFFFFFFF00000000ull,
0xFFFFFFFFFFFFFFFEull, 0xFFFFFFFFFFFFFFFFull,
0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull };
static u64 nist_p384_b[] = { 0x2a85c8edd3ec2aefull, 0xc656398d8a2ed19dull,
0x0314088f5013875aull, 0x181d9c6efe814112ull,
0x988e056be3f82d19ull, 0xb3312fa7e23ee7e4ull };
static struct ecc_curve nist_p384 = {
.name = "nist_384",
.g = {
.x = nist_p384_g_x,
.y = nist_p384_g_y,
.ndigits = 6,
},
.p = nist_p384_p,
.n = nist_p384_n,
.a = nist_p384_a,
.b = nist_p384_b
};
/* curve25519 */
static u64 curve25519_g_x[] = { 0x0000000000000009, 0x0000000000000000,
0x0000000000000000, 0x0000000000000000 };
static u64 curve25519_p[] = { 0xffffffffffffffed, 0xffffffffffffffff,
0xffffffffffffffff, 0x7fffffffffffffff };
static u64 curve25519_a[] = { 0x000000000001DB41, 0x0000000000000000,
0x0000000000000000, 0x0000000000000000 };
static const struct ecc_curve ecc_25519 = {
.name = "curve25519",
.g = {
.x = curve25519_g_x,
.ndigits = 4,
},
.p = curve25519_p,
.a = curve25519_a,
};
#endif #endif

View File

@ -23,33 +23,16 @@ static inline struct ecdh_ctx *ecdh_get_ctx(struct crypto_kpp *tfm)
return kpp_tfm_ctx(tfm); return kpp_tfm_ctx(tfm);
} }
static unsigned int ecdh_supported_curve(unsigned int curve_id)
{
switch (curve_id) {
case ECC_CURVE_NIST_P192: return ECC_CURVE_NIST_P192_DIGITS;
case ECC_CURVE_NIST_P256: return ECC_CURVE_NIST_P256_DIGITS;
default: return 0;
}
}
static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len) unsigned int len)
{ {
struct ecdh_ctx *ctx = ecdh_get_ctx(tfm); struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
struct ecdh params; struct ecdh params;
unsigned int ndigits;
if (crypto_ecdh_decode_key(buf, len, &params) < 0 || if (crypto_ecdh_decode_key(buf, len, &params) < 0 ||
params.key_size > sizeof(ctx->private_key)) params.key_size > sizeof(u64) * ctx->ndigits)
return -EINVAL; return -EINVAL;
ndigits = ecdh_supported_curve(params.curve_id);
if (!ndigits)
return -EINVAL;
ctx->curve_id = params.curve_id;
ctx->ndigits = ndigits;
if (!params.key || !params.key_size) if (!params.key || !params.key_size)
return ecc_gen_privkey(ctx->curve_id, ctx->ndigits, return ecc_gen_privkey(ctx->curve_id, ctx->ndigits,
ctx->private_key); ctx->private_key);
@ -140,13 +123,24 @@ static unsigned int ecdh_max_size(struct crypto_kpp *tfm)
return ctx->ndigits << (ECC_DIGITS_TO_BYTES_SHIFT + 1); return ctx->ndigits << (ECC_DIGITS_TO_BYTES_SHIFT + 1);
} }
static struct kpp_alg ecdh = { static int ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
{
struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
ctx->curve_id = ECC_CURVE_NIST_P192;
ctx->ndigits = ECC_CURVE_NIST_P192_DIGITS;
return 0;
}
static struct kpp_alg ecdh_nist_p192 = {
.set_secret = ecdh_set_secret, .set_secret = ecdh_set_secret,
.generate_public_key = ecdh_compute_value, .generate_public_key = ecdh_compute_value,
.compute_shared_secret = ecdh_compute_value, .compute_shared_secret = ecdh_compute_value,
.max_size = ecdh_max_size, .max_size = ecdh_max_size,
.init = ecdh_nist_p192_init_tfm,
.base = { .base = {
.cra_name = "ecdh", .cra_name = "ecdh-nist-p192",
.cra_driver_name = "ecdh-generic", .cra_driver_name = "ecdh-generic",
.cra_priority = 100, .cra_priority = 100,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
@ -154,14 +148,48 @@ static struct kpp_alg ecdh = {
}, },
}; };
static int ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
{
struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
ctx->curve_id = ECC_CURVE_NIST_P256;
ctx->ndigits = ECC_CURVE_NIST_P256_DIGITS;
return 0;
}
static struct kpp_alg ecdh_nist_p256 = {
.set_secret = ecdh_set_secret,
.generate_public_key = ecdh_compute_value,
.compute_shared_secret = ecdh_compute_value,
.max_size = ecdh_max_size,
.init = ecdh_nist_p256_init_tfm,
.base = {
.cra_name = "ecdh-nist-p256",
.cra_driver_name = "ecdh-generic",
.cra_priority = 100,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct ecdh_ctx),
},
};
static bool ecdh_nist_p192_registered;
static int ecdh_init(void) static int ecdh_init(void)
{ {
return crypto_register_kpp(&ecdh); int ret;
ret = crypto_register_kpp(&ecdh_nist_p192);
ecdh_nist_p192_registered = ret == 0;
return crypto_register_kpp(&ecdh_nist_p256);
} }
static void ecdh_exit(void) static void ecdh_exit(void)
{ {
crypto_unregister_kpp(&ecdh); if (ecdh_nist_p192_registered)
crypto_unregister_kpp(&ecdh_nist_p192);
crypto_unregister_kpp(&ecdh_nist_p256);
} }
subsys_initcall(ecdh_init); subsys_initcall(ecdh_init);

View File

@ -10,7 +10,7 @@
#include <crypto/ecdh.h> #include <crypto/ecdh.h>
#include <crypto/kpp.h> #include <crypto/kpp.h>
#define ECDH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 2 * sizeof(short)) #define ECDH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + sizeof(short))
static inline u8 *ecdh_pack_data(void *dst, const void *src, size_t sz) static inline u8 *ecdh_pack_data(void *dst, const void *src, size_t sz)
{ {
@ -46,7 +46,6 @@ int crypto_ecdh_encode_key(char *buf, unsigned int len,
return -EINVAL; return -EINVAL;
ptr = ecdh_pack_data(ptr, &secret, sizeof(secret)); ptr = ecdh_pack_data(ptr, &secret, sizeof(secret));
ptr = ecdh_pack_data(ptr, &params->curve_id, sizeof(params->curve_id));
ptr = ecdh_pack_data(ptr, &params->key_size, sizeof(params->key_size)); ptr = ecdh_pack_data(ptr, &params->key_size, sizeof(params->key_size));
ecdh_pack_data(ptr, params->key, params->key_size); ecdh_pack_data(ptr, params->key, params->key_size);
@ -70,7 +69,6 @@ int crypto_ecdh_decode_key(const char *buf, unsigned int len,
if (unlikely(len < secret.len)) if (unlikely(len < secret.len))
return -EINVAL; return -EINVAL;
ptr = ecdh_unpack_data(&params->curve_id, ptr, sizeof(params->curve_id));
ptr = ecdh_unpack_data(&params->key_size, ptr, sizeof(params->key_size)); ptr = ecdh_unpack_data(&params->key_size, ptr, sizeof(params->key_size));
if (secret.len != crypto_ecdh_key_len(params)) if (secret.len != crypto_ecdh_key_len(params))
return -EINVAL; return -EINVAL;

376
crypto/ecdsa.c Normal file
View File

@ -0,0 +1,376 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (c) 2021 IBM Corporation
*/
#include <linux/module.h>
#include <crypto/internal/akcipher.h>
#include <crypto/akcipher.h>
#include <crypto/ecdh.h>
#include <linux/asn1_decoder.h>
#include <linux/scatterlist.h>
#include "ecc.h"
#include "ecdsasignature.asn1.h"
struct ecc_ctx {
unsigned int curve_id;
const struct ecc_curve *curve;
bool pub_key_set;
u64 x[ECC_MAX_DIGITS]; /* pub key x and y coordinates */
u64 y[ECC_MAX_DIGITS];
struct ecc_point pub_key;
};
struct ecdsa_signature_ctx {
const struct ecc_curve *curve;
u64 r[ECC_MAX_DIGITS];
u64 s[ECC_MAX_DIGITS];
};
/*
* Get the r and s components of a signature from the X509 certificate.
*/
static int ecdsa_get_signature_rs(u64 *dest, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen, unsigned int ndigits)
{
size_t keylen = ndigits * sizeof(u64);
ssize_t diff = vlen - keylen;
const char *d = value;
u8 rs[ECC_MAX_BYTES];
if (!value || !vlen)
return -EINVAL;
/* diff = 0: 'value' has exacly the right size
* diff > 0: 'value' has too many bytes; one leading zero is allowed that
* makes the value a positive integer; error on more
* diff < 0: 'value' is missing leading zeros, which we add
*/
if (diff > 0) {
/* skip over leading zeros that make 'value' a positive int */
if (*d == 0) {
vlen -= 1;
diff--;
d++;
}
if (diff)
return -EINVAL;
}
if (-diff >= keylen)
return -EINVAL;
if (diff) {
/* leading zeros not given in 'value' */
memset(rs, 0, -diff);
}
memcpy(&rs[-diff], d, vlen);
ecc_swap_digits((u64 *)rs, dest, ndigits);
return 0;
}
int ecdsa_get_signature_r(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct ecdsa_signature_ctx *sig = context;
return ecdsa_get_signature_rs(sig->r, hdrlen, tag, value, vlen,
sig->curve->g.ndigits);
}
int ecdsa_get_signature_s(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
struct ecdsa_signature_ctx *sig = context;
return ecdsa_get_signature_rs(sig->s, hdrlen, tag, value, vlen,
sig->curve->g.ndigits);
}
static int _ecdsa_verify(struct ecc_ctx *ctx, const u64 *hash, const u64 *r, const u64 *s)
{
const struct ecc_curve *curve = ctx->curve;
unsigned int ndigits = curve->g.ndigits;
u64 s1[ECC_MAX_DIGITS];
u64 u1[ECC_MAX_DIGITS];
u64 u2[ECC_MAX_DIGITS];
u64 x1[ECC_MAX_DIGITS];
u64 y1[ECC_MAX_DIGITS];
struct ecc_point res = ECC_POINT_INIT(x1, y1, ndigits);
/* 0 < r < n and 0 < s < n */
if (vli_is_zero(r, ndigits) || vli_cmp(r, curve->n, ndigits) >= 0 ||
vli_is_zero(s, ndigits) || vli_cmp(s, curve->n, ndigits) >= 0)
return -EBADMSG;
/* hash is given */
pr_devel("hash : %016llx %016llx ... %016llx\n",
hash[ndigits - 1], hash[ndigits - 2], hash[0]);
/* s1 = (s^-1) mod n */
vli_mod_inv(s1, s, curve->n, ndigits);
/* u1 = (hash * s1) mod n */
vli_mod_mult_slow(u1, hash, s1, curve->n, ndigits);
/* u2 = (r * s1) mod n */
vli_mod_mult_slow(u2, r, s1, curve->n, ndigits);
/* res = u1*G + u2 * pub_key */
ecc_point_mult_shamir(&res, u1, &curve->g, u2, &ctx->pub_key, curve);
/* res.x = res.x mod n (if res.x > order) */
if (unlikely(vli_cmp(res.x, curve->n, ndigits) == 1))
/* faster alternative for NIST p384, p256 & p192 */
vli_sub(res.x, res.x, curve->n, ndigits);
if (!vli_cmp(res.x, r, ndigits))
return 0;
return -EKEYREJECTED;
}
/*
* Verify an ECDSA signature.
*/
static int ecdsa_verify(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
size_t keylen = ctx->curve->g.ndigits * sizeof(u64);
struct ecdsa_signature_ctx sig_ctx = {
.curve = ctx->curve,
};
u8 rawhash[ECC_MAX_BYTES];
u64 hash[ECC_MAX_DIGITS];
unsigned char *buffer;
ssize_t diff;
int ret;
if (unlikely(!ctx->pub_key_set))
return -EINVAL;
buffer = kmalloc(req->src_len + req->dst_len, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
sg_pcopy_to_buffer(req->src,
sg_nents_for_len(req->src, req->src_len + req->dst_len),
buffer, req->src_len + req->dst_len, 0);
ret = asn1_ber_decoder(&ecdsasignature_decoder, &sig_ctx,
buffer, req->src_len);
if (ret < 0)
goto error;
/* if the hash is shorter then we will add leading zeros to fit to ndigits */
diff = keylen - req->dst_len;
if (diff >= 0) {
if (diff)
memset(rawhash, 0, diff);
memcpy(&rawhash[diff], buffer + req->src_len, req->dst_len);
} else if (diff < 0) {
/* given hash is longer, we take the left-most bytes */
memcpy(&rawhash, buffer + req->src_len, keylen);
}
ecc_swap_digits((u64 *)rawhash, hash, ctx->curve->g.ndigits);
ret = _ecdsa_verify(ctx, hash, sig_ctx.r, sig_ctx.s);
error:
kfree(buffer);
return ret;
}
static int ecdsa_ecc_ctx_init(struct ecc_ctx *ctx, unsigned int curve_id)
{
ctx->curve_id = curve_id;
ctx->curve = ecc_get_curve(curve_id);
if (!ctx->curve)
return -EINVAL;
return 0;
}
static void ecdsa_ecc_ctx_deinit(struct ecc_ctx *ctx)
{
ctx->pub_key_set = false;
}
static int ecdsa_ecc_ctx_reset(struct ecc_ctx *ctx)
{
unsigned int curve_id = ctx->curve_id;
int ret;
ecdsa_ecc_ctx_deinit(ctx);
ret = ecdsa_ecc_ctx_init(ctx, curve_id);
if (ret == 0)
ctx->pub_key = ECC_POINT_INIT(ctx->x, ctx->y,
ctx->curve->g.ndigits);
return ret;
}
/*
* Set the public key given the raw uncompressed key data from an X509
* certificate. The key data contain the concatenated X and Y coordinates of
* the public key.
*/
static int ecdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen)
{
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
const unsigned char *d = key;
const u64 *digits = (const u64 *)&d[1];
unsigned int ndigits;
int ret;
ret = ecdsa_ecc_ctx_reset(ctx);
if (ret < 0)
return ret;
if (keylen < 1 || (((keylen - 1) >> 1) % sizeof(u64)) != 0)
return -EINVAL;
/* we only accept uncompressed format indicated by '4' */
if (d[0] != 4)
return -EINVAL;
keylen--;
ndigits = (keylen >> 1) / sizeof(u64);
if (ndigits != ctx->curve->g.ndigits)
return -EINVAL;
ecc_swap_digits(digits, ctx->pub_key.x, ndigits);
ecc_swap_digits(&digits[ndigits], ctx->pub_key.y, ndigits);
ret = ecc_is_pubkey_valid_full(ctx->curve, &ctx->pub_key);
ctx->pub_key_set = ret == 0;
return ret;
}
static void ecdsa_exit_tfm(struct crypto_akcipher *tfm)
{
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
ecdsa_ecc_ctx_deinit(ctx);
}
static unsigned int ecdsa_max_size(struct crypto_akcipher *tfm)
{
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
return ctx->pub_key.ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
}
static int ecdsa_nist_p384_init_tfm(struct crypto_akcipher *tfm)
{
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P384);
}
static struct akcipher_alg ecdsa_nist_p384 = {
.verify = ecdsa_verify,
.set_pub_key = ecdsa_set_pub_key,
.max_size = ecdsa_max_size,
.init = ecdsa_nist_p384_init_tfm,
.exit = ecdsa_exit_tfm,
.base = {
.cra_name = "ecdsa-nist-p384",
.cra_driver_name = "ecdsa-nist-p384-generic",
.cra_priority = 100,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct ecc_ctx),
},
};
static int ecdsa_nist_p256_init_tfm(struct crypto_akcipher *tfm)
{
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P256);
}
static struct akcipher_alg ecdsa_nist_p256 = {
.verify = ecdsa_verify,
.set_pub_key = ecdsa_set_pub_key,
.max_size = ecdsa_max_size,
.init = ecdsa_nist_p256_init_tfm,
.exit = ecdsa_exit_tfm,
.base = {
.cra_name = "ecdsa-nist-p256",
.cra_driver_name = "ecdsa-nist-p256-generic",
.cra_priority = 100,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct ecc_ctx),
},
};
static int ecdsa_nist_p192_init_tfm(struct crypto_akcipher *tfm)
{
struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P192);
}
static struct akcipher_alg ecdsa_nist_p192 = {
.verify = ecdsa_verify,
.set_pub_key = ecdsa_set_pub_key,
.max_size = ecdsa_max_size,
.init = ecdsa_nist_p192_init_tfm,
.exit = ecdsa_exit_tfm,
.base = {
.cra_name = "ecdsa-nist-p192",
.cra_driver_name = "ecdsa-nist-p192-generic",
.cra_priority = 100,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct ecc_ctx),
},
};
static bool ecdsa_nist_p192_registered;
static int ecdsa_init(void)
{
int ret;
/* NIST p192 may not be available in FIPS mode */
ret = crypto_register_akcipher(&ecdsa_nist_p192);
ecdsa_nist_p192_registered = ret == 0;
ret = crypto_register_akcipher(&ecdsa_nist_p256);
if (ret)
goto nist_p256_error;
ret = crypto_register_akcipher(&ecdsa_nist_p384);
if (ret)
goto nist_p384_error;
return 0;
nist_p384_error:
crypto_unregister_akcipher(&ecdsa_nist_p256);
nist_p256_error:
if (ecdsa_nist_p192_registered)
crypto_unregister_akcipher(&ecdsa_nist_p192);
return ret;
}
static void ecdsa_exit(void)
{
if (ecdsa_nist_p192_registered)
crypto_unregister_akcipher(&ecdsa_nist_p192);
crypto_unregister_akcipher(&ecdsa_nist_p256);
crypto_unregister_akcipher(&ecdsa_nist_p384);
}
subsys_initcall(ecdsa_init);
module_exit(ecdsa_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Stefan Berger <stefanb@linux.ibm.com>");
MODULE_DESCRIPTION("ECDSA generic algorithm");
MODULE_ALIAS_CRYPTO("ecdsa-generic");

View File

@ -0,0 +1,4 @@
ECDSASignature ::= SEQUENCE {
r INTEGER ({ ecdsa_get_signature_r }),
s INTEGER ({ ecdsa_get_signature_s })
}

View File

@ -63,10 +63,7 @@ do { \
} while (0) } while (0)
/* Rotate right one 64 bit number as a 56 bit number */ /* Rotate right one 64 bit number as a 56 bit number */
#define ror56_64(k, n) \ #define ror56_64(k, n) (k = (k >> n) | ((k & ((1 << n) - 1)) << (56 - n)))
do { \
k = (k >> n) | ((k & ((1 << n) - 1)) << (56 - n)); \
} while (0)
/* /*
* Sboxes for Feistel network derived from * Sboxes for Feistel network derived from

View File

@ -597,7 +597,7 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data,
if (!ec) if (!ec)
return -1; return -1;
while (0 < len) { while (len > 0) {
unsigned int tocopy; unsigned int tocopy;
jent_gen_entropy(ec); jent_gen_entropy(ec);
@ -678,7 +678,7 @@ struct rand_data *jent_entropy_collector_alloc(unsigned int osr,
} }
/* verify and set the oversampling rate */ /* verify and set the oversampling rate */
if (0 == osr) if (osr == 0)
osr = 1; /* minimum sampling rate is 1 */ osr = 1; /* minimum sampling rate is 1 */
entropy_collector->osr = osr; entropy_collector->osr = osr;
@ -769,7 +769,7 @@ int jent_entropy_init(void)
* etc. with the goal to clear it to get the worst case * etc. with the goal to clear it to get the worst case
* measurements. * measurements.
*/ */
if (CLEARCACHE > i) if (i < CLEARCACHE)
continue; continue;
if (stuck) if (stuck)
@ -826,7 +826,7 @@ int jent_entropy_init(void)
* should not fail. The value of 3 should cover the NTP case being * should not fail. The value of 3 should cover the NTP case being
* performed during our test run. * performed during our test run.
*/ */
if (3 < time_backwards) if (time_backwards > 3)
return JENT_ENOMONOTONIC; return JENT_ENOMONOTONIC;
/* /*

View File

@ -114,9 +114,9 @@ static void crypto_kw_scatterlist_ff(struct scatter_walk *walk,
scatterwalk_start(walk, sg); scatterwalk_start(walk, sg);
scatterwalk_advance(walk, skip); scatterwalk_advance(walk, skip);
break; break;
} else }
skip -= sg->length;
skip -= sg->length;
sg = sg_next(sg); sg = sg_next(sg);
} }
} }

View File

@ -34,22 +34,18 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
u8 *buf = NULL; u8 *buf = NULL;
int err; int err;
crypto_stats_get(alg);
if (!seed && slen) { if (!seed && slen) {
buf = kmalloc(slen, GFP_KERNEL); buf = kmalloc(slen, GFP_KERNEL);
if (!buf) { if (!buf)
crypto_alg_put(alg);
return -ENOMEM; return -ENOMEM;
}
err = get_random_bytes_wait(buf, slen); err = get_random_bytes_wait(buf, slen);
if (err) { if (err)
crypto_alg_put(alg);
goto out; goto out;
}
seed = buf; seed = buf;
} }
crypto_stats_get(alg);
err = crypto_rng_alg(tfm)->seed(tfm, seed, slen); err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
crypto_stats_rng_seed(alg, err); crypto_stats_rng_seed(alg, err);
out: out:

View File

@ -272,6 +272,7 @@ int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key,
u32 *k = ctx->expkey; u32 *k = ctx->expkey;
u8 *k8 = (u8 *)k; u8 *k8 = (u8 *)k;
u32 r0, r1, r2, r3, r4; u32 r0, r1, r2, r3, r4;
__le32 *lk;
int i; int i;
/* Copy key, add padding */ /* Copy key, add padding */
@ -283,22 +284,32 @@ int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key,
while (i < SERPENT_MAX_KEY_SIZE) while (i < SERPENT_MAX_KEY_SIZE)
k8[i++] = 0; k8[i++] = 0;
lk = (__le32 *)k;
k[0] = le32_to_cpu(lk[0]);
k[1] = le32_to_cpu(lk[1]);
k[2] = le32_to_cpu(lk[2]);
k[3] = le32_to_cpu(lk[3]);
k[4] = le32_to_cpu(lk[4]);
k[5] = le32_to_cpu(lk[5]);
k[6] = le32_to_cpu(lk[6]);
k[7] = le32_to_cpu(lk[7]);
/* Expand key using polynomial */ /* Expand key using polynomial */
r0 = le32_to_cpu(k[3]); r0 = k[3];
r1 = le32_to_cpu(k[4]); r1 = k[4];
r2 = le32_to_cpu(k[5]); r2 = k[5];
r3 = le32_to_cpu(k[6]); r3 = k[6];
r4 = le32_to_cpu(k[7]); r4 = k[7];
keyiter(le32_to_cpu(k[0]), r0, r4, r2, 0, 0); keyiter(k[0], r0, r4, r2, 0, 0);
keyiter(le32_to_cpu(k[1]), r1, r0, r3, 1, 1); keyiter(k[1], r1, r0, r3, 1, 1);
keyiter(le32_to_cpu(k[2]), r2, r1, r4, 2, 2); keyiter(k[2], r2, r1, r4, 2, 2);
keyiter(le32_to_cpu(k[3]), r3, r2, r0, 3, 3); keyiter(k[3], r3, r2, r0, 3, 3);
keyiter(le32_to_cpu(k[4]), r4, r3, r1, 4, 4); keyiter(k[4], r4, r3, r1, 4, 4);
keyiter(le32_to_cpu(k[5]), r0, r4, r2, 5, 5); keyiter(k[5], r0, r4, r2, 5, 5);
keyiter(le32_to_cpu(k[6]), r1, r0, r3, 6, 6); keyiter(k[6], r1, r0, r3, 6, 6);
keyiter(le32_to_cpu(k[7]), r2, r1, r4, 7, 7); keyiter(k[7], r2, r1, r4, 7, 7);
keyiter(k[0], r3, r2, r0, 8, 8); keyiter(k[0], r3, r2, r0, 8, 8);
keyiter(k[1], r4, r3, r1, 9, 9); keyiter(k[1], r4, r3, r1, 9, 9);

View File

@ -1168,11 +1168,6 @@ static inline int check_shash_op(const char *op, int err,
return err; return err;
} }
static inline const void *sg_data(struct scatterlist *sg)
{
return page_address(sg_page(sg)) + sg->offset;
}
/* Test one hash test vector in one configuration, using the shash API */ /* Test one hash test vector in one configuration, using the shash API */
static int test_shash_vec_cfg(const struct hash_testvec *vec, static int test_shash_vec_cfg(const struct hash_testvec *vec,
const char *vec_name, const char *vec_name,
@ -1230,7 +1225,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec,
return 0; return 0;
if (cfg->nosimd) if (cfg->nosimd)
crypto_disable_simd_for_test(); crypto_disable_simd_for_test();
err = crypto_shash_digest(desc, sg_data(&tsgl->sgl[0]), err = crypto_shash_digest(desc, sg_virt(&tsgl->sgl[0]),
tsgl->sgl[0].length, result); tsgl->sgl[0].length, result);
if (cfg->nosimd) if (cfg->nosimd)
crypto_reenable_simd_for_test(); crypto_reenable_simd_for_test();
@ -1266,7 +1261,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec,
cfg->finalization_type == FINALIZATION_TYPE_FINUP) { cfg->finalization_type == FINALIZATION_TYPE_FINUP) {
if (divs[i]->nosimd) if (divs[i]->nosimd)
crypto_disable_simd_for_test(); crypto_disable_simd_for_test();
err = crypto_shash_finup(desc, sg_data(&tsgl->sgl[i]), err = crypto_shash_finup(desc, sg_virt(&tsgl->sgl[i]),
tsgl->sgl[i].length, result); tsgl->sgl[i].length, result);
if (divs[i]->nosimd) if (divs[i]->nosimd)
crypto_reenable_simd_for_test(); crypto_reenable_simd_for_test();
@ -1278,7 +1273,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec,
} }
if (divs[i]->nosimd) if (divs[i]->nosimd)
crypto_disable_simd_for_test(); crypto_disable_simd_for_test();
err = crypto_shash_update(desc, sg_data(&tsgl->sgl[i]), err = crypto_shash_update(desc, sg_virt(&tsgl->sgl[i]),
tsgl->sgl[i].length); tsgl->sgl[i].length);
if (divs[i]->nosimd) if (divs[i]->nosimd)
crypto_reenable_simd_for_test(); crypto_reenable_simd_for_test();
@ -4904,11 +4899,38 @@ static const struct alg_test_desc alg_test_descs[] = {
} }
}, { }, {
#endif #endif
.alg = "ecdh", #ifndef CONFIG_CRYPTO_FIPS
.alg = "ecdh-nist-p192",
.test = alg_test_kpp, .test = alg_test_kpp,
.fips_allowed = 1, .fips_allowed = 1,
.suite = { .suite = {
.kpp = __VECS(ecdh_tv_template) .kpp = __VECS(ecdh_p192_tv_template)
}
}, {
#endif
.alg = "ecdh-nist-p256",
.test = alg_test_kpp,
.fips_allowed = 1,
.suite = {
.kpp = __VECS(ecdh_p256_tv_template)
}
}, {
.alg = "ecdsa-nist-p192",
.test = alg_test_akcipher,
.suite = {
.akcipher = __VECS(ecdsa_nist_p192_tv_template)
}
}, {
.alg = "ecdsa-nist-p256",
.test = alg_test_akcipher,
.suite = {
.akcipher = __VECS(ecdsa_nist_p256_tv_template)
}
}, {
.alg = "ecdsa-nist-p384",
.test = alg_test_akcipher,
.suite = {
.akcipher = __VECS(ecdsa_nist_p384_tv_template)
} }
}, { }, {
.alg = "ecrdsa", .alg = "ecrdsa",

View File

@ -566,6 +566,430 @@ static const struct akcipher_testvec rsa_tv_template[] = {
} }
}; };
/*
* ECDSA test vectors.
*/
static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = {
{
.key =
"\x04\xf7\x46\xf8\x2f\x15\xf6\x22\x8e\xd7\x57\x4f\xcc\xe7\xbb\xc1"
"\xd4\x09\x73\xcf\xea\xd0\x15\x07\x3d\xa5\x8a\x8a\x95\x43\xe4\x68"
"\xea\xc6\x25\xc1\xc1\x01\x25\x4c\x7e\xc3\x3c\xa6\x04\x0a\xe7\x08"
"\x98",
.key_len = 49,
.params =
"\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
"\xce\x3d\x03\x01\x01",
.param_len = 21,
.m =
"\xcd\xb9\xd2\x1c\xb7\x6f\xcd\x44\xb3\xfd\x63\xea\xa3\x66\x7f\xae"
"\x63\x85\xe7\x82",
.m_size = 20,
.algo = OID_id_ecdsa_with_sha1,
.c =
"\x30\x35\x02\x19\x00\xba\xe5\x93\x83\x6e\xb6\x3b\x63\xa0\x27\x91"
"\xc6\xf6\x7f\xc3\x09\xad\x59\xad\x88\x27\xd6\x92\x6b\x02\x18\x10"
"\x68\x01\x9d\xba\xce\x83\x08\xef\x95\x52\x7b\xa0\x0f\xe4\x18\x86"
"\x80\x6f\xa5\x79\x77\xda\xd0",
.c_size = 55,
.public_key_vec = true,
.siggen_sigver_test = true,
}, {
.key =
"\x04\xb6\x4b\xb1\xd1\xac\xba\x24\x8f\x65\xb2\x60\x00\x90\xbf\xbd"
"\x78\x05\x73\xe9\x79\x1d\x6f\x7c\x0b\xd2\xc3\x93\xa7\x28\xe1\x75"
"\xf7\xd5\x95\x1d\x28\x10\xc0\x75\x50\x5c\x1a\x4f\x3f\x8f\xa5\xee"
"\xa3",
.key_len = 49,
.params =
"\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
"\xce\x3d\x03\x01\x01",
.param_len = 21,
.m =
"\x8d\xd6\xb8\x3e\xe5\xff\x23\xf6\x25\xa2\x43\x42\x74\x45\xa7\x40"
"\x3a\xff\x2f\xe1\xd3\xf6\x9f\xe8\x33\xcb\x12\x11",
.m_size = 28,
.algo = OID_id_ecdsa_with_sha224,
.c =
"\x30\x34\x02\x18\x5a\x8b\x82\x69\x7e\x8a\x0a\x09\x14\xf8\x11\x2b"
"\x55\xdc\xae\x37\x83\x7b\x12\xe6\xb6\x5b\xcb\xd4\x02\x18\x6a\x14"
"\x4f\x53\x75\xc8\x02\x48\xeb\xc3\x92\x0f\x1e\x72\xee\xc4\xa3\xe3"
"\x5c\x99\xdb\x92\x5b\x36",
.c_size = 54,
.public_key_vec = true,
.siggen_sigver_test = true,
}, {
.key =
"\x04\xe2\x51\x24\x9b\xf7\xb6\x32\x82\x39\x66\x3d\x5b\xec\x3b\xae"
"\x0c\xd5\xf2\x67\xd1\xc7\xe1\x02\xe4\xbf\x90\x62\xb8\x55\x75\x56"
"\x69\x20\x5e\xcb\x4e\xca\x33\xd6\xcb\x62\x6b\x94\xa9\xa2\xe9\x58"
"\x91",
.key_len = 49,
.params =
"\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
"\xce\x3d\x03\x01\x01",
.param_len = 21,
.m =
"\x35\xec\xa1\xa0\x9e\x14\xde\x33\x03\xb6\xf6\xbd\x0c\x2f\xb2\xfd"
"\x1f\x27\x82\xa5\xd7\x70\x3f\xef\xa0\x82\x69\x8e\x73\x31\x8e\xd7",
.m_size = 32,
.algo = OID_id_ecdsa_with_sha256,
.c =
"\x30\x35\x02\x18\x3f\x72\x3f\x1f\x42\xd2\x3f\x1d\x6b\x1a\x58\x56"
"\xf1\x8f\xf7\xfd\x01\x48\xfb\x5f\x72\x2a\xd4\x8f\x02\x19\x00\xb3"
"\x69\x43\xfd\x48\x19\x86\xcf\x32\xdd\x41\x74\x6a\x51\xc7\xd9\x7d"
"\x3a\x97\xd9\xcd\x1a\x6a\x49",
.c_size = 55,
.public_key_vec = true,
.siggen_sigver_test = true,
}, {
.key =
"\x04\x5a\x13\xfe\x68\x86\x4d\xf4\x17\xc7\xa4\xe5\x8c\x65\x57\xb7"
"\x03\x73\x26\x57\xfb\xe5\x58\x40\xd8\xfd\x49\x05\xab\xf1\x66\x1f"
"\xe2\x9d\x93\x9e\xc2\x22\x5a\x8b\x4f\xf3\x77\x22\x59\x7e\xa6\x4e"
"\x8b",
.key_len = 49,
.params =
"\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
"\xce\x3d\x03\x01\x01",
.param_len = 21,
.m =
"\x9d\x2e\x1a\x8f\xed\x6c\x4b\x61\xae\xac\xd5\x19\x79\xce\x67\xf9"
"\xa0\x34\xeb\xb0\x81\xf9\xd9\xdc\x6e\xb3\x5c\xa8\x69\xfc\x8a\x61"
"\x39\x81\xfb\xfd\x5c\x30\x6b\xa8\xee\xed\x89\xaf\xa3\x05\xe4\x78",
.m_size = 48,
.algo = OID_id_ecdsa_with_sha384,
.c =
"\x30\x35\x02\x19\x00\xf0\xa3\x38\xce\x2b\xf8\x9d\x1a\xcf\x7f\x34"
"\xb4\xb4\xe5\xc5\x00\xdd\x15\xbb\xd6\x8c\xa7\x03\x78\x02\x18\x64"
"\xbc\x5a\x1f\x82\x96\x61\xd7\xd1\x01\x77\x44\x5d\x53\xa4\x7c\x93"
"\x12\x3b\x3b\x28\xfb\x6d\xe1",
.c_size = 55,
.public_key_vec = true,
.siggen_sigver_test = true,
}, {
.key =
"\x04\xd5\xf2\x6e\xc3\x94\x5c\x52\xbc\xdf\x86\x6c\x14\xd1\xca\xea"
"\xcc\x72\x3a\x8a\xf6\x7a\x3a\x56\x36\x3b\xca\xc6\x94\x0e\x17\x1d"
"\x9e\xa0\x58\x28\xf9\x4b\xe6\xd1\xa5\x44\x91\x35\x0d\xe7\xf5\x11"
"\x57",
.key_len = 49,
.params =
"\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
"\xce\x3d\x03\x01\x01",
.param_len = 21,
.m =
"\xd5\x4b\xe9\x36\xda\xd8\x6e\xc0\x50\x03\xbe\x00\x43\xff\xf0\x23"
"\xac\xa2\x42\xe7\x37\x77\x79\x52\x8f\x3e\xc0\x16\xc1\xfc\x8c\x67"
"\x16\xbc\x8a\x5d\x3b\xd3\x13\xbb\xb6\xc0\x26\x1b\xeb\x33\xcc\x70"
"\x4a\xf2\x11\x37\xe8\x1b\xba\x55\xac\x69\xe1\x74\x62\x7c\x6e\xb5",
.m_size = 64,
.algo = OID_id_ecdsa_with_sha512,
.c =
"\x30\x35\x02\x19\x00\x88\x5b\x8f\x59\x43\xbf\xcf\xc6\xdd\x3f\x07"
"\x87\x12\xa0\xd4\xac\x2b\x11\x2d\x1c\xb6\x06\xc9\x6c\x02\x18\x73"
"\xb4\x22\x9a\x98\x73\x3c\x83\xa9\x14\x2a\x5e\xf5\xe5\xfb\x72\x28"
"\x6a\xdf\x97\xfd\x82\x76\x24",
.c_size = 55,
.public_key_vec = true,
.siggen_sigver_test = true,
},
};
static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = {
{
.key =
"\x04\xb9\x7b\xbb\xd7\x17\x64\xd2\x7e\xfc\x81\x5d\x87\x06\x83\x41"
"\x22\xd6\x9a\xaa\x87\x17\xec\x4f\x63\x55\x2f\x94\xba\xdd\x83\xe9"
"\x34\x4b\xf3\xe9\x91\x13\x50\xb6\xcb\xca\x62\x08\xe7\x3b\x09\xdc"
"\xc3\x63\x4b\x2d\xb9\x73\x53\xe4\x45\xe6\x7c\xad\xe7\x6b\xb0\xe8"
"\xaf",
.key_len = 65,
.params =
"\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
"\xce\x3d\x03\x01\x07",
.param_len = 21,
.m =
"\xc2\x2b\x5f\x91\x78\x34\x26\x09\x42\x8d\x6f\x51\xb2\xc5\xaf\x4c"
"\x0b\xde\x6a\x42",
.m_size = 20,
.algo = OID_id_ecdsa_with_sha1,
.c =
"\x30\x46\x02\x21\x00\xf9\x25\xce\x9f\x3a\xa6\x35\x81\xcf\xd4\xe7"
"\xb7\xf0\x82\x56\x41\xf7\xd4\xad\x8d\x94\x5a\x69\x89\xee\xca\x6a"
"\x52\x0e\x48\x4d\xcc\x02\x21\x00\xd7\xe4\xef\x52\x66\xd3\x5b\x9d"
"\x8a\xfa\x54\x93\x29\xa7\x70\x86\xf1\x03\x03\xf3\x3b\xe2\x73\xf7"
"\xfb\x9d\x8b\xde\xd4\x8d\x6f\xad",
.c_size = 72,
.public_key_vec = true,
.siggen_sigver_test = true,
}, {
.key =
"\x04\x8b\x6d\xc0\x33\x8e\x2d\x8b\x67\xf5\xeb\xc4\x7f\xa0\xf5\xd9"
"\x7b\x03\xa5\x78\x9a\xb5\xea\x14\xe4\x23\xd0\xaf\xd7\x0e\x2e\xa0"
"\xc9\x8b\xdb\x95\xf8\xb3\xaf\xac\x00\x2c\x2c\x1f\x7a\xfd\x95\x88"
"\x43\x13\xbf\xf3\x1c\x05\x1a\x14\x18\x09\x3f\xd6\x28\x3e\xc5\xa0"
"\xd4",
.key_len = 65,
.params =
"\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
"\xce\x3d\x03\x01\x07",
.param_len = 21,
.m =
"\x1a\x15\xbc\xa3\xe4\xed\x3a\xb8\x23\x67\xc6\xc4\x34\xf8\x6c\x41"
"\x04\x0b\xda\xc5\x77\xfa\x1c\x2d\xe6\x2c\x3b\xe0",
.m_size = 28,
.algo = OID_id_ecdsa_with_sha224,
.c =
"\x30\x44\x02\x20\x20\x43\xfa\xc0\x9f\x9d\x7b\xe7\xae\xce\x77\x59"
"\x1a\xdb\x59\xd5\x34\x62\x79\xcb\x6a\x91\x67\x2e\x7d\x25\xd8\x25"
"\xf5\x81\xd2\x1e\x02\x20\x5f\xf8\x74\xf8\x57\xd0\x5e\x54\x76\x20"
"\x4a\x77\x22\xec\xc8\x66\xbf\x50\x05\x58\x39\x0e\x26\x92\xce\xd5"
"\x2e\x8b\xde\x5a\x04\x0e",
.c_size = 70,
.public_key_vec = true,
.siggen_sigver_test = true,
}, {
.key =
"\x04\xf1\xea\xc4\x53\xf3\xb9\x0e\x9f\x7e\xad\xe3\xea\xd7\x0e\x0f"
"\xd6\x98\x9a\xca\x92\x4d\x0a\x80\xdb\x2d\x45\xc7\xec\x4b\x97\x00"
"\x2f\xe9\x42\x6c\x29\xdc\x55\x0e\x0b\x53\x12\x9b\x2b\xad\x2c\xe9"
"\x80\xe6\xc5\x43\xc2\x1d\x5e\xbb\x65\x21\x50\xb6\x37\xb0\x03\x8e"
"\xb8",
.key_len = 65,
.params =
"\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
"\xce\x3d\x03\x01\x07",
.param_len = 21,
.m =
"\x8f\x43\x43\x46\x64\x8f\x6b\x96\xdf\x89\xdd\xa9\x01\xc5\x17\x6b"
"\x10\xa6\xd8\x39\x61\xdd\x3c\x1a\xc8\x8b\x59\xb2\xdc\x32\x7a\xa4",
.m_size = 32,
.algo = OID_id_ecdsa_with_sha256,
.c =
"\x30\x45\x02\x20\x08\x31\xfa\x74\x0d\x1d\x21\x5d\x09\xdc\x29\x63"
"\xa8\x1a\xad\xfc\xac\x44\xc3\xe8\x24\x11\x2d\xa4\x91\xdc\x02\x67"
"\xdc\x0c\xd0\x82\x02\x21\x00\xbd\xff\xce\xee\x42\xc3\x97\xff\xf9"
"\xa9\x81\xac\x4a\x50\xd0\x91\x0a\x6e\x1b\xc4\xaf\xe1\x83\xc3\x4f"
"\x2a\x65\x35\x23\xe3\x1d\xfa",
.c_size = 71,
.public_key_vec = true,
.siggen_sigver_test = true,
}, {
.key =
"\x04\xc5\xc6\xea\x60\xc9\xce\xad\x02\x8d\xf5\x3e\x24\xe3\x52\x1d"
"\x28\x47\x3b\xc3\x6b\xa4\x99\x35\x99\x11\x88\x88\xc8\xf4\xee\x7e"
"\x8c\x33\x8f\x41\x03\x24\x46\x2b\x1a\x82\xf9\x9f\xe1\x97\x1b\x00"
"\xda\x3b\x24\x41\xf7\x66\x33\x58\x3d\x3a\x81\xad\xcf\x16\xe9\xe2"
"\x7c",
.key_len = 65,
.params =
"\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
"\xce\x3d\x03\x01\x07",
.param_len = 21,
.m =
"\x3e\x78\x70\xfb\xcd\x66\xba\x91\xa1\x79\xff\x1e\x1c\x6b\x78\xe6"
"\xc0\x81\x3a\x65\x97\x14\x84\x36\x14\x1a\x9a\xb7\xc5\xab\x84\x94"
"\x5e\xbb\x1b\x34\x71\xcb\x41\xe1\xf6\xfc\x92\x7b\x34\xbb\x86\xbb",
.m_size = 48,
.algo = OID_id_ecdsa_with_sha384,
.c =
"\x30\x46\x02\x21\x00\x8e\xf3\x6f\xdc\xf8\x69\xa6\x2e\xd0\x2e\x95"
"\x54\xd1\x95\x64\x93\x08\xb2\x6b\x24\x94\x48\x46\x5e\xf2\xe4\x6c"
"\xc7\x94\xb1\xd5\xfe\x02\x21\x00\xeb\xa7\x80\x26\xdc\xf9\x3a\x44"
"\x19\xfb\x5f\x92\xf4\xc9\x23\x37\x69\xf4\x3b\x4f\x47\xcf\x9b\x16"
"\xc0\x60\x11\x92\xdc\x17\x89\x12",
.c_size = 72,
.public_key_vec = true,
.siggen_sigver_test = true,
}, {
.key =
"\x04\xd7\x27\x46\x49\xf6\x26\x85\x12\x40\x76\x8e\xe2\xe6\x2a\x7a"
"\x83\xb1\x4e\x7a\xeb\x3b\x5c\x67\x4a\xb5\xa4\x92\x8c\x69\xff\x38"
"\xee\xd9\x4e\x13\x29\x59\xad\xde\x6b\xbb\x45\x31\xee\xfd\xd1\x1b"
"\x64\xd3\xb5\xfc\xaf\x9b\x4b\x88\x3b\x0e\xb7\xd6\xdf\xf1\xd5\x92"
"\xbf",
.key_len = 65,
.params =
"\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
"\xce\x3d\x03\x01\x07",
.param_len = 21,
.m =
"\x57\xb7\x9e\xe9\x05\x0a\x8c\x1b\xc9\x13\xe5\x4a\x24\xc7\xe2\xe9"
"\x43\xc3\xd1\x76\x62\xf4\x98\x1a\x9c\x13\xb0\x20\x1b\xe5\x39\xca"
"\x4f\xd9\x85\x34\x95\xa2\x31\xbc\xbb\xde\xdd\x76\xbb\x61\xe3\xcf"
"\x9d\xc0\x49\x7a\xf3\x7a\xc4\x7d\xa8\x04\x4b\x8d\xb4\x4d\x5b\xd6",
.m_size = 64,
.algo = OID_id_ecdsa_with_sha512,
.c =
"\x30\x45\x02\x21\x00\xb8\x6d\x87\x81\x43\xdf\xfb\x9f\x40\xea\x44"
"\x81\x00\x4e\x29\x08\xed\x8c\x73\x30\x6c\x22\xb3\x97\x76\xf6\x04"
"\x99\x09\x37\x4d\xfa\x02\x20\x1e\xb9\x75\x31\xf6\x04\xa5\x4d\xf8"
"\x00\xdd\xab\xd4\xc0\x2b\xe6\x5c\xad\xc3\x78\x1c\xc2\xc1\x19\x76"
"\x31\x79\x4a\xe9\x81\x6a\xee",
.c_size = 71,
.public_key_vec = true,
.siggen_sigver_test = true,
},
};
static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = {
{
.key = /* secp384r1(sha1) */
"\x04\x89\x25\xf3\x97\x88\xcb\xb0\x78\xc5\x72\x9a\x14\x6e\x7a\xb1"
"\x5a\xa5\x24\xf1\x95\x06\x9e\x28\xfb\xc4\xb9\xbe\x5a\x0d\xd9\x9f"
"\xf3\xd1\x4d\x2d\x07\x99\xbd\xda\xa7\x66\xec\xbb\xea\xba\x79\x42"
"\xc9\x34\x89\x6a\xe7\x0b\xc3\xf2\xfe\x32\x30\xbe\xba\xf9\xdf\x7e"
"\x4b\x6a\x07\x8e\x26\x66\x3f\x1d\xec\xa2\x57\x91\x51\xdd\x17\x0e"
"\x0b\x25\xd6\x80\x5c\x3b\xe6\x1a\x98\x48\x91\x45\x7a\x73\xb0\xc3"
"\xf1",
.key_len = 97,
.params =
"\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
"\x00\x22",
.param_len = 18,
.m =
"\x12\x55\x28\xf0\x77\xd5\xb6\x21\x71\x32\x48\xcd\x28\xa8\x25\x22"
"\x3a\x69\xc1\x93",
.m_size = 20,
.algo = OID_id_ecdsa_with_sha1,
.c =
"\x30\x66\x02\x31\x00\xf5\x0f\x24\x4c\x07\x93\x6f\x21\x57\x55\x07"
"\x20\x43\x30\xde\xa0\x8d\x26\x8e\xae\x63\x3f\xbc\x20\x3a\xc6\xf1"
"\x32\x3c\xce\x70\x2b\x78\xf1\x4c\x26\xe6\x5b\x86\xcf\xec\x7c\x7e"
"\xd0\x87\xd7\xd7\x6e\x02\x31\x00\xcd\xbb\x7e\x81\x5d\x8f\x63\xc0"
"\x5f\x63\xb1\xbe\x5e\x4c\x0e\xa1\xdf\x28\x8c\x1b\xfa\xf9\x95\x88"
"\x74\xa0\x0f\xbf\xaf\xc3\x36\x76\x4a\xa1\x59\xf1\x1c\xa4\x58\x26"
"\x79\x12\x2a\xb7\xc5\x15\x92\xc5",
.c_size = 104,
.public_key_vec = true,
.siggen_sigver_test = true,
}, {
.key = /* secp384r1(sha224) */
"\x04\x69\x6c\xcf\x62\xee\xd0\x0d\xe5\xb5\x2f\x70\x54\xcf\x26\xa0"
"\xd9\x98\x8d\x92\x2a\xab\x9b\x11\xcb\x48\x18\xa1\xa9\x0d\xd5\x18"
"\x3e\xe8\x29\x6e\xf6\xe4\xb5\x8e\xc7\x4a\xc2\x5f\x37\x13\x99\x05"
"\xb6\xa4\x9d\xf9\xfb\x79\x41\xe7\xd7\x96\x9f\x73\x3b\x39\x43\xdc"
"\xda\xf4\x06\xb9\xa5\x29\x01\x9d\x3b\xe1\xd8\x68\x77\x2a\xf4\x50"
"\x6b\x93\x99\x6c\x66\x4c\x42\x3f\x65\x60\x6c\x1c\x0b\x93\x9b\x9d"
"\xe0",
.key_len = 97,
.params =
"\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
"\x00\x22",
.param_len = 18,
.m =
"\x12\x80\xb6\xeb\x25\xe2\x3d\xf0\x21\x32\x96\x17\x3a\x38\x39\xfd"
"\x1f\x05\x34\x7b\xb8\xf9\x71\x66\x03\x4f\xd5\xe5",
.m_size = 28,
.algo = OID_id_ecdsa_with_sha224,
.c =
"\x30\x66\x02\x31\x00\x8a\x51\x84\xce\x13\x1e\xd2\xdc\xec\xcb\xe4"
"\x89\x47\xb2\xf7\xbc\x97\xf1\xc8\x72\x26\xcf\x5a\x5e\xc5\xda\xb4"
"\xe3\x93\x07\xe0\x99\xc9\x9c\x11\xb8\x10\x01\xc5\x41\x3f\xdd\x15"
"\x1b\x68\x2b\x9d\x8b\x02\x31\x00\x8b\x03\x2c\xfc\x1f\xd1\xa9\xa4"
"\x4b\x00\x08\x31\x6c\xf5\xd5\xf6\xdf\xd8\x68\xa2\x64\x42\x65\xf3"
"\x4d\xd0\xc6\x6e\xb0\xe9\xfc\x14\x9f\x19\xd0\x42\x8b\x93\xc2\x11"
"\x88\x2b\x82\x26\x5e\x1c\xda\xfb",
.c_size = 104,
.public_key_vec = true,
.siggen_sigver_test = true,
}, {
.key = /* secp384r1(sha256) */
"\x04\xee\xd6\xda\x3e\x94\x90\x00\x27\xed\xf8\x64\x55\xd6\x51\x9a"
"\x1f\x52\x00\x63\x78\xf1\xa9\xfd\x75\x4c\x9e\xb2\x20\x1a\x91\x5a"
"\xba\x7a\xa3\xe5\x6c\xb6\x25\x68\x4b\xe8\x13\xa6\x54\x87\x2c\x0e"
"\xd0\x83\x95\xbc\xbf\xc5\x28\x4f\x77\x1c\x46\xa6\xf0\xbc\xd4\xa4"
"\x8d\xc2\x8f\xb3\x32\x37\x40\xd6\xca\xf8\xae\x07\x34\x52\x39\x52"
"\x17\xc3\x34\x29\xd6\x40\xea\x5c\xb9\x3f\xfb\x32\x2e\x12\x33\xbc"
"\xab",
.key_len = 97,
.params =
"\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
"\x00\x22",
.param_len = 18,
.m =
"\xaa\xe7\xfd\x03\x26\xcb\x94\x71\xe4\xce\x0f\xc5\xff\xa6\x29\xa3"
"\xe1\xcc\x4c\x35\x4e\xde\xca\x80\xab\x26\x0c\x25\xe6\x68\x11\xc2",
.m_size = 32,
.algo = OID_id_ecdsa_with_sha256,
.c =
"\x30\x64\x02\x30\x08\x09\x12\x9d\x6e\x96\x64\xa6\x8e\x3f\x7e\xce"
"\x0a\x9b\xaa\x59\xcc\x47\x53\x87\xbc\xbd\x83\x3f\xaf\x06\x3f\x84"
"\x04\xe2\xf9\x67\xb6\xc6\xfc\x70\x2e\x66\x3c\x77\xc8\x8d\x2c\x79"
"\x3a\x8e\x32\xc4\x02\x30\x40\x34\xb8\x90\xa9\x80\xab\x47\x26\xa2"
"\xb0\x89\x42\x0a\xda\xd9\xdd\xce\xbc\xb2\x97\xf4\x9c\xf3\x15\x68"
"\xc0\x75\x3e\x23\x5e\x36\x4f\x8d\xde\x1e\x93\x8d\x95\xbb\x10\x0e"
"\xf4\x1f\x39\xca\x4d\x43",
.c_size = 102,
.public_key_vec = true,
.siggen_sigver_test = true,
}, {
.key = /* secp384r1(sha384) */
"\x04\x3a\x2f\x62\xe7\x1a\xcf\x24\xd0\x0b\x7c\xe0\xed\x46\x0a\x4f"
"\x74\x16\x43\xe9\x1a\x25\x7c\x55\xff\xf0\x29\x68\x66\x20\x91\xf9"
"\xdb\x2b\xf6\xb3\x6c\x54\x01\xca\xc7\x6a\x5c\x0d\xeb\x68\xd9\x3c"
"\xf1\x01\x74\x1f\xf9\x6c\xe5\x5b\x60\xe9\x7f\x5d\xb3\x12\x80\x2a"
"\xd8\x67\x92\xc9\x0e\x4c\x4c\x6b\xa1\xb2\xa8\x1e\xac\x1c\x97\xd9"
"\x21\x67\xe5\x1b\x5a\x52\x31\x68\xd6\xee\xf0\x19\xb0\x55\xed\x89"
"\x9e",
.key_len = 97,
.params =
"\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
"\x00\x22",
.param_len = 18,
.m =
"\x8d\xf2\xc0\xe9\xa8\xf3\x8e\x44\xc4\x8c\x1a\xa0\xb8\xd7\x17\xdf"
"\xf2\x37\x1b\xc6\xe3\xf5\x62\xcc\x68\xf5\xd5\x0b\xbf\x73\x2b\xb1"
"\xb0\x4c\x04\x00\x31\xab\xfe\xc8\xd6\x09\xc8\xf2\xea\xd3\x28\xff",
.m_size = 48,
.algo = OID_id_ecdsa_with_sha384,
.c =
"\x30\x66\x02\x31\x00\x9b\x28\x68\xc0\xa1\xea\x8c\x50\xee\x2e\x62"
"\x35\x46\xfa\x00\xd8\x2d\x7a\x91\x5f\x49\x2d\x22\x08\x29\xe6\xfb"
"\xca\x8c\xd6\xb6\xb4\x3b\x1f\x07\x8f\x15\x02\xfe\x1d\xa2\xa4\xc8"
"\xf2\xea\x9d\x11\x1f\x02\x31\x00\xfc\x50\xf6\x43\xbd\x50\x82\x0e"
"\xbf\xe3\x75\x24\x49\xac\xfb\xc8\x71\xcd\x8f\x18\x99\xf0\x0f\x13"
"\x44\x92\x8c\x86\x99\x65\xb3\x97\x96\x17\x04\xc9\x05\x77\xf1\x8e"
"\xab\x8d\x4e\xde\xe6\x6d\x9b\x66",
.c_size = 104,
.public_key_vec = true,
.siggen_sigver_test = true,
}, {
.key = /* secp384r1(sha512) */
"\x04\xb4\xe7\xc1\xeb\x64\x25\x22\x46\xc3\x86\x61\x80\xbe\x1e\x46"
"\xcb\xf6\x05\xc2\xee\x73\x83\xbc\xea\x30\x61\x4d\x40\x05\x41\xf4"
"\x8c\xe3\x0e\x5c\xf0\x50\xf2\x07\x19\xe8\x4f\x25\xbe\xee\x0c\x95"
"\x54\x36\x86\xec\xc2\x20\x75\xf3\x89\xb5\x11\xa1\xb7\xf5\xaf\xbe"
"\x81\xe4\xc3\x39\x06\xbd\xe4\xfe\x68\x1c\x6d\x99\x2b\x1b\x63\xfa"
"\xdf\x42\x5c\xc2\x5a\xc7\x0c\xf4\x15\xf7\x1b\xa3\x2e\xd7\x00\xac"
"\xa3",
.key_len = 97,
.params =
"\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
"\x00\x22",
.param_len = 18,
.m =
"\xe8\xb7\x52\x7d\x1a\x44\x20\x05\x53\x6b\x3a\x68\xf2\xe7\x6c\xa1"
"\xae\x9d\x84\xbb\xba\x52\x43\x3e\x2c\x42\x78\x49\xbf\x78\xb2\x71"
"\xeb\xe1\xe0\xe8\x42\x7b\x11\xad\x2b\x99\x05\x1d\x36\xe6\xac\xfc"
"\x55\x73\xf0\x15\x63\x39\xb8\x6a\x6a\xc5\x91\x5b\xca\x6a\xa8\x0e",
.m_size = 64,
.algo = OID_id_ecdsa_with_sha512,
.c =
"\x30\x63\x02\x2f\x1d\x20\x94\x77\xfe\x31\xfa\x4d\xc6\xef\xda\x02"
"\xe7\x0f\x52\x9a\x02\xde\x93\xe8\x83\xe4\x84\x4c\xfc\x6f\x80\xe3"
"\xaf\xb3\xd9\xdc\x2b\x43\x0e\x6a\xb3\x53\x6f\x3e\xb3\xc7\xa8\xb3"
"\x17\x77\xd1\x02\x30\x63\xf6\xf0\x3d\x5f\x5f\x99\x3f\xde\x3a\x3d"
"\x16\xaf\xb4\x52\x6a\xec\x63\xe3\x0c\xec\x50\xdc\xcc\xc4\x6a\x03"
"\x5f\x8d\x7a\xf9\xfb\x34\xe4\x8b\x80\xa5\xb6\xda\x2c\x4e\x45\xcf"
"\x3c\x93\xff\x50\x5d",
.c_size = 101,
.public_key_vec = true,
.siggen_sigver_test = true,
},
};
/* /*
* EC-RDSA test vectors are generated by gost-engine. * EC-RDSA test vectors are generated by gost-engine.
*/ */
@ -2261,19 +2685,17 @@ static const struct kpp_testvec curve25519_tv_template[] = {
} }
}; };
static const struct kpp_testvec ecdh_tv_template[] = {
{
#ifndef CONFIG_CRYPTO_FIPS #ifndef CONFIG_CRYPTO_FIPS
static const struct kpp_testvec ecdh_p192_tv_template[] = {
{
.secret = .secret =
#ifdef __LITTLE_ENDIAN #ifdef __LITTLE_ENDIAN
"\x02\x00" /* type */ "\x02\x00" /* type */
"\x20\x00" /* len */ "\x1e\x00" /* len */
"\x01\x00" /* curve_id */
"\x18\x00" /* key_size */ "\x18\x00" /* key_size */
#else #else
"\x00\x02" /* type */ "\x00\x02" /* type */
"\x00\x20" /* len */ "\x00\x1e" /* len */
"\x00\x01" /* curve_id */
"\x00\x18" /* key_size */ "\x00\x18" /* key_size */
#endif #endif
"\xb5\x05\xb1\x71\x1e\xbf\x8c\xda" "\xb5\x05\xb1\x71\x1e\xbf\x8c\xda"
@ -2301,18 +2723,20 @@ static const struct kpp_testvec ecdh_tv_template[] = {
.b_public_size = 48, .b_public_size = 48,
.expected_a_public_size = 48, .expected_a_public_size = 48,
.expected_ss_size = 24 .expected_ss_size = 24
}, { }
};
#endif #endif
static const struct kpp_testvec ecdh_p256_tv_template[] = {
{
.secret = .secret =
#ifdef __LITTLE_ENDIAN #ifdef __LITTLE_ENDIAN
"\x02\x00" /* type */ "\x02\x00" /* type */
"\x28\x00" /* len */ "\x26\x00" /* len */
"\x02\x00" /* curve_id */
"\x20\x00" /* key_size */ "\x20\x00" /* key_size */
#else #else
"\x00\x02" /* type */ "\x00\x02" /* type */
"\x00\x28" /* len */ "\x00\x26" /* len */
"\x00\x02" /* curve_id */
"\x00\x20" /* key_size */ "\x00\x20" /* key_size */
#endif #endif
"\x24\xd1\x21\xeb\xe5\xcf\x2d\x83" "\x24\xd1\x21\xeb\xe5\xcf\x2d\x83"
@ -2350,25 +2774,21 @@ static const struct kpp_testvec ecdh_tv_template[] = {
.secret = .secret =
#ifdef __LITTLE_ENDIAN #ifdef __LITTLE_ENDIAN
"\x02\x00" /* type */ "\x02\x00" /* type */
"\x08\x00" /* len */ "\x06\x00" /* len */
"\x02\x00" /* curve_id */
"\x00\x00", /* key_size */ "\x00\x00", /* key_size */
#else #else
"\x00\x02" /* type */ "\x00\x02" /* type */
"\x00\x08" /* len */ "\x00\x06" /* len */
"\x00\x02" /* curve_id */
"\x00\x00", /* key_size */ "\x00\x00", /* key_size */
#endif #endif
.b_secret = .b_secret =
#ifdef __LITTLE_ENDIAN #ifdef __LITTLE_ENDIAN
"\x02\x00" /* type */ "\x02\x00" /* type */
"\x28\x00" /* len */ "\x26\x00" /* len */
"\x02\x00" /* curve_id */
"\x20\x00" /* key_size */ "\x20\x00" /* key_size */
#else #else
"\x00\x02" /* type */ "\x00\x02" /* type */
"\x00\x28" /* len */ "\x00\x26" /* len */
"\x00\x02" /* curve_id */
"\x00\x20" /* key_size */ "\x00\x20" /* key_size */
#endif #endif
"\x24\xd1\x21\xeb\xe5\xcf\x2d\x83" "\x24\xd1\x21\xeb\xe5\xcf\x2d\x83"

View File

@ -170,7 +170,6 @@ static int ba431_trng_init(struct hwrng *rng)
static int ba431_trng_probe(struct platform_device *pdev) static int ba431_trng_probe(struct platform_device *pdev)
{ {
struct ba431_trng *ba431; struct ba431_trng *ba431;
struct resource *res;
int ret; int ret;
ba431 = devm_kzalloc(&pdev->dev, sizeof(*ba431), GFP_KERNEL); ba431 = devm_kzalloc(&pdev->dev, sizeof(*ba431), GFP_KERNEL);
@ -179,8 +178,7 @@ static int ba431_trng_probe(struct platform_device *pdev)
ba431->dev = &pdev->dev; ba431->dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ba431->base = devm_platform_ioremap_resource(pdev, 0);
ba431->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(ba431->base)) if (IS_ERR(ba431->base))
return PTR_ERR(ba431->base); return PTR_ERR(ba431->base);
@ -193,7 +191,7 @@ static int ba431_trng_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ba431); platform_set_drvdata(pdev, ba431);
ret = hwrng_register(&ba431->rng); ret = devm_hwrng_register(&pdev->dev, &ba431->rng);
if (ret) { if (ret) {
dev_err(&pdev->dev, "BA431 registration failed (%d)\n", ret); dev_err(&pdev->dev, "BA431 registration failed (%d)\n", ret);
return ret; return ret;
@ -204,15 +202,6 @@ static int ba431_trng_probe(struct platform_device *pdev)
return 0; return 0;
} }
static int ba431_trng_remove(struct platform_device *pdev)
{
struct ba431_trng *ba431 = platform_get_drvdata(pdev);
hwrng_unregister(&ba431->rng);
return 0;
}
static const struct of_device_id ba431_trng_dt_ids[] = { static const struct of_device_id ba431_trng_dt_ids[] = {
{ .compatible = "silex-insight,ba431-rng", .data = NULL }, { .compatible = "silex-insight,ba431-rng", .data = NULL },
{ /* sentinel */ } { /* sentinel */ }
@ -225,7 +214,6 @@ static struct platform_driver ba431_trng_driver = {
.of_match_table = ba431_trng_dt_ids, .of_match_table = ba431_trng_dt_ids,
}, },
.probe = ba431_trng_probe, .probe = ba431_trng_probe,
.remove = ba431_trng_remove,
}; };
module_platform_driver(ba431_trng_driver); module_platform_driver(ba431_trng_driver);

View File

@ -13,6 +13,7 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/printk.h> #include <linux/printk.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/reset.h>
#define RNG_CTRL 0x0 #define RNG_CTRL 0x0
#define RNG_STATUS 0x4 #define RNG_STATUS 0x4
@ -32,6 +33,7 @@ struct bcm2835_rng_priv {
void __iomem *base; void __iomem *base;
bool mask_interrupts; bool mask_interrupts;
struct clk *clk; struct clk *clk;
struct reset_control *reset;
}; };
static inline struct bcm2835_rng_priv *to_rng_priv(struct hwrng *rng) static inline struct bcm2835_rng_priv *to_rng_priv(struct hwrng *rng)
@ -88,11 +90,13 @@ static int bcm2835_rng_init(struct hwrng *rng)
int ret = 0; int ret = 0;
u32 val; u32 val;
if (!IS_ERR(priv->clk)) { ret = clk_prepare_enable(priv->clk);
ret = clk_prepare_enable(priv->clk); if (ret)
if (ret) return ret;
return ret;
} ret = reset_control_reset(priv->reset);
if (ret)
return ret;
if (priv->mask_interrupts) { if (priv->mask_interrupts) {
/* mask the interrupt */ /* mask the interrupt */
@ -115,8 +119,7 @@ static void bcm2835_rng_cleanup(struct hwrng *rng)
/* disable rng hardware */ /* disable rng hardware */
rng_writel(priv, 0, RNG_CTRL); rng_writel(priv, 0, RNG_CTRL);
if (!IS_ERR(priv->clk)) clk_disable_unprepare(priv->clk);
clk_disable_unprepare(priv->clk);
} }
struct bcm2835_rng_of_data { struct bcm2835_rng_of_data {
@ -155,9 +158,13 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
return PTR_ERR(priv->base); return PTR_ERR(priv->base);
/* Clock is optional on most platforms */ /* Clock is optional on most platforms */
priv->clk = devm_clk_get(dev, NULL); priv->clk = devm_clk_get_optional(dev, NULL);
if (PTR_ERR(priv->clk) == -EPROBE_DEFER) if (IS_ERR(priv->clk))
return -EPROBE_DEFER; return PTR_ERR(priv->clk);
priv->reset = devm_reset_control_get_optional_exclusive(dev, NULL);
if (IS_ERR(priv->reset))
return PTR_ERR(priv->reset);
priv->rng.name = pdev->name; priv->rng.name = pdev->name;
priv->rng.init = bcm2835_rng_init; priv->rng.init = bcm2835_rng_init;

View File

@ -486,7 +486,6 @@ static void cc_trng_clk_fini(struct cctrng_drvdata *drvdata)
static int cctrng_probe(struct platform_device *pdev) static int cctrng_probe(struct platform_device *pdev)
{ {
struct resource *req_mem_cc_regs = NULL;
struct cctrng_drvdata *drvdata; struct cctrng_drvdata *drvdata;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
int rc = 0; int rc = 0;
@ -510,27 +509,16 @@ static int cctrng_probe(struct platform_device *pdev)
drvdata->circ.buf = (char *)drvdata->data_buf; drvdata->circ.buf = (char *)drvdata->data_buf;
/* Get device resources */ drvdata->cc_base = devm_platform_ioremap_resource(pdev, 0);
/* First CC registers space */
req_mem_cc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
/* Map registers space */
drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
if (IS_ERR(drvdata->cc_base)) { if (IS_ERR(drvdata->cc_base)) {
dev_err(dev, "Failed to ioremap registers"); dev_err(dev, "Failed to ioremap registers");
return PTR_ERR(drvdata->cc_base); return PTR_ERR(drvdata->cc_base);
} }
dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
req_mem_cc_regs);
dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
&req_mem_cc_regs->start, drvdata->cc_base);
/* Then IRQ */ /* Then IRQ */
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (irq < 0) { if (irq < 0)
dev_err(dev, "Failed getting IRQ resource\n");
return irq; return irq;
}
/* parse sampling rate from device tree */ /* parse sampling rate from device tree */
rc = cc_trng_parse_sampling_ratio(drvdata); rc = cc_trng_parse_sampling_ratio(drvdata);
@ -585,7 +573,7 @@ static int cctrng_probe(struct platform_device *pdev)
atomic_set(&drvdata->pending_hw, 1); atomic_set(&drvdata->pending_hw, 1);
/* registration of the hwrng device */ /* registration of the hwrng device */
rc = hwrng_register(&drvdata->rng); rc = devm_hwrng_register(dev, &drvdata->rng);
if (rc) { if (rc) {
dev_err(dev, "Could not register hwrng device.\n"); dev_err(dev, "Could not register hwrng device.\n");
goto post_pm_err; goto post_pm_err;
@ -618,8 +606,6 @@ static int cctrng_remove(struct platform_device *pdev)
dev_dbg(dev, "Releasing cctrng resources...\n"); dev_dbg(dev, "Releasing cctrng resources...\n");
hwrng_unregister(&drvdata->rng);
cc_trng_pm_fini(drvdata); cc_trng_pm_fini(drvdata);
cc_trng_clk_fini(drvdata); cc_trng_clk_fini(drvdata);

View File

@ -396,7 +396,7 @@ static ssize_t hwrng_attr_selected_show(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
char *buf) char *buf)
{ {
return snprintf(buf, PAGE_SIZE, "%d\n", cur_rng_set_by_user); return sysfs_emit(buf, "%d\n", cur_rng_set_by_user);
} }
static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR, static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,

View File

@ -25,13 +25,13 @@
*/ */
#include <linux/hw_random.h> #include <linux/hw_random.h>
#include <linux/io.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/stop_machine.h> #include <linux/stop_machine.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <asm/io.h>
#define PFX KBUILD_MODNAME ": " #define PFX KBUILD_MODNAME ": "

View File

@ -30,8 +30,7 @@
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/io.h>
#include <asm/io.h>
#define RNG_REG_STATUS_RDY (1 << 0) #define RNG_REG_STATUS_RDY (1 << 0)
@ -378,16 +377,13 @@ MODULE_DEVICE_TABLE(of, omap_rng_of_match);
static int of_get_omap_rng_device_details(struct omap_rng_dev *priv, static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
struct platform_device *pdev) struct platform_device *pdev)
{ {
const struct of_device_id *match;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
int irq, err; int irq, err;
match = of_match_device(of_match_ptr(omap_rng_of_match), dev); priv->pdata = of_device_get_match_data(dev);
if (!match) { if (!priv->pdata)
dev_err(dev, "no compatible OF match\n"); return -ENODEV;
return -EINVAL;
}
priv->pdata = match->data;
if (of_device_is_compatible(dev->of_node, "ti,omap4-rng") || if (of_device_is_compatible(dev->of_node, "ti,omap4-rng") ||
of_device_is_compatible(dev->of_node, "inside-secure,safexcel-eip76")) { of_device_is_compatible(dev->of_node, "inside-secure,safexcel-eip76")) {

View File

@ -96,7 +96,7 @@ static int pic32_rng_probe(struct platform_device *pdev)
priv->rng.name = pdev->name; priv->rng.name = pdev->name;
priv->rng.read = pic32_rng_read; priv->rng.read = pic32_rng_read;
ret = hwrng_register(&priv->rng); ret = devm_hwrng_register(&pdev->dev, &priv->rng);
if (ret) if (ret)
goto err_register; goto err_register;
@ -113,7 +113,6 @@ static int pic32_rng_remove(struct platform_device *pdev)
{ {
struct pic32_rng *rng = platform_get_drvdata(pdev); struct pic32_rng *rng = platform_get_drvdata(pdev);
hwrng_unregister(&rng->rng);
writel(0, rng->base + RNGCON); writel(0, rng->base + RNGCON);
clk_disable_unprepare(rng->clk); clk_disable_unprepare(rng->clk);
return 0; return 0;

View File

@ -63,14 +63,12 @@ static int xiphera_trng_probe(struct platform_device *pdev)
int ret; int ret;
struct xiphera_trng *trng; struct xiphera_trng *trng;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct resource *res;
trng = devm_kzalloc(dev, sizeof(*trng), GFP_KERNEL); trng = devm_kzalloc(dev, sizeof(*trng), GFP_KERNEL);
if (!trng) if (!trng)
return -ENOMEM; return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); trng->mem = devm_platform_ioremap_resource(pdev, 0);
trng->mem = devm_ioremap_resource(dev, res);
if (IS_ERR(trng->mem)) if (IS_ERR(trng->mem))
return PTR_ERR(trng->mem); return PTR_ERR(trng->mem);

View File

@ -500,7 +500,6 @@ struct entropy_store {
unsigned short add_ptr; unsigned short add_ptr;
unsigned short input_rotate; unsigned short input_rotate;
int entropy_count; int entropy_count;
unsigned int initialized:1;
unsigned int last_data_init:1; unsigned int last_data_init:1;
__u8 last_data[EXTRACT_SIZE]; __u8 last_data[EXTRACT_SIZE];
}; };
@ -660,7 +659,7 @@ static void process_random_ready_list(void)
*/ */
static void credit_entropy_bits(struct entropy_store *r, int nbits) static void credit_entropy_bits(struct entropy_store *r, int nbits)
{ {
int entropy_count, orig, has_initialized = 0; int entropy_count, orig;
const int pool_size = r->poolinfo->poolfracbits; const int pool_size = r->poolinfo->poolfracbits;
int nfrac = nbits << ENTROPY_SHIFT; int nfrac = nbits << ENTROPY_SHIFT;
@ -717,23 +716,14 @@ retry:
if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
goto retry; goto retry;
if (has_initialized) {
r->initialized = 1;
kill_fasync(&fasync, SIGIO, POLL_IN);
}
trace_credit_entropy_bits(r->name, nbits, trace_credit_entropy_bits(r->name, nbits,
entropy_count >> ENTROPY_SHIFT, _RET_IP_); entropy_count >> ENTROPY_SHIFT, _RET_IP_);
if (r == &input_pool) { if (r == &input_pool) {
int entropy_bits = entropy_count >> ENTROPY_SHIFT; int entropy_bits = entropy_count >> ENTROPY_SHIFT;
if (crng_init < 2) { if (crng_init < 2 && entropy_bits >= 128)
if (entropy_bits < 128)
return;
crng_reseed(&primary_crng, r); crng_reseed(&primary_crng, r);
entropy_bits = ENTROPY_BITS(r);
}
} }
} }
@ -819,7 +809,7 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng)
static void __maybe_unused crng_initialize_secondary(struct crng_state *crng) static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
{ {
memcpy(&crng->state[0], "expand 32-byte k", 16); chacha_init_consts(crng->state);
_get_random_bytes(&crng->state[4], sizeof(__u32) * 12); _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
crng_init_try_arch(crng); crng_init_try_arch(crng);
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
@ -827,7 +817,7 @@ static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
static void __init crng_initialize_primary(struct crng_state *crng) static void __init crng_initialize_primary(struct crng_state *crng)
{ {
memcpy(&crng->state[0], "expand 32-byte k", 16); chacha_init_consts(crng->state);
_extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0); _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
if (crng_init_try_arch_early(crng) && trust_cpu) { if (crng_init_try_arch_early(crng) && trust_cpu) {
invalidate_batched_entropy(); invalidate_batched_entropy();
@ -1372,8 +1362,7 @@ retry:
} }
/* /*
* This function does the actual extraction for extract_entropy and * This function does the actual extraction for extract_entropy.
* extract_entropy_user.
* *
* Note: we assume that .poolwords is a multiple of 16 words. * Note: we assume that .poolwords is a multiple of 16 words.
*/ */

View File

@ -71,10 +71,10 @@ config CRYPTO_DEV_SUN8I_CE_DEBUG
config CRYPTO_DEV_SUN8I_CE_HASH config CRYPTO_DEV_SUN8I_CE_HASH
bool "Enable support for hash on sun8i-ce" bool "Enable support for hash on sun8i-ce"
depends on CRYPTO_DEV_SUN8I_CE depends on CRYPTO_DEV_SUN8I_CE
select MD5 select CRYPTO_MD5
select SHA1 select CRYPTO_SHA1
select SHA256 select CRYPTO_SHA256
select SHA512 select CRYPTO_SHA512
help help
Say y to enable support for hash algorithms. Say y to enable support for hash algorithms.
@ -132,8 +132,8 @@ config CRYPTO_DEV_SUN8I_SS_PRNG
config CRYPTO_DEV_SUN8I_SS_HASH config CRYPTO_DEV_SUN8I_SS_HASH
bool "Enable support for hash on sun8i-ss" bool "Enable support for hash on sun8i-ss"
depends on CRYPTO_DEV_SUN8I_SS depends on CRYPTO_DEV_SUN8I_SS
select MD5 select CRYPTO_MD5
select SHA1 select CRYPTO_SHA1
select SHA256 select CRYPTO_SHA256
help help
Say y to enable support for hash algorithms. Say y to enable support for hash algorithms.

View File

@ -561,7 +561,7 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
sizeof(struct sun4i_cipher_req_ctx) + sizeof(struct sun4i_cipher_req_ctx) +
crypto_skcipher_reqsize(op->fallback_tfm)); crypto_skcipher_reqsize(op->fallback_tfm));
err = pm_runtime_get_sync(op->ss->dev); err = pm_runtime_resume_and_get(op->ss->dev);
if (err < 0) if (err < 0)
goto error_pm; goto error_pm;

View File

@ -288,8 +288,7 @@ static int sun4i_ss_pm_suspend(struct device *dev)
{ {
struct sun4i_ss_ctx *ss = dev_get_drvdata(dev); struct sun4i_ss_ctx *ss = dev_get_drvdata(dev);
if (ss->reset) reset_control_assert(ss->reset);
reset_control_assert(ss->reset);
clk_disable_unprepare(ss->ssclk); clk_disable_unprepare(ss->ssclk);
clk_disable_unprepare(ss->busclk); clk_disable_unprepare(ss->busclk);
@ -314,12 +313,10 @@ static int sun4i_ss_pm_resume(struct device *dev)
goto err_enable; goto err_enable;
} }
if (ss->reset) { err = reset_control_deassert(ss->reset);
err = reset_control_deassert(ss->reset); if (err) {
if (err) { dev_err(ss->dev, "Cannot deassert reset control\n");
dev_err(ss->dev, "Cannot deassert reset control\n"); goto err_enable;
goto err_enable;
}
} }
return err; return err;
@ -401,12 +398,10 @@ static int sun4i_ss_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "clock ahb_ss acquired\n"); dev_dbg(&pdev->dev, "clock ahb_ss acquired\n");
ss->reset = devm_reset_control_get_optional(&pdev->dev, "ahb"); ss->reset = devm_reset_control_get_optional(&pdev->dev, "ahb");
if (IS_ERR(ss->reset)) { if (IS_ERR(ss->reset))
if (PTR_ERR(ss->reset) == -EPROBE_DEFER) return PTR_ERR(ss->reset);
return PTR_ERR(ss->reset); if (!ss->reset)
dev_info(&pdev->dev, "no reset control found\n"); dev_info(&pdev->dev, "no reset control found\n");
ss->reset = NULL;
}
/* /*
* Check that clock have the correct rates given in the datasheet * Check that clock have the correct rates given in the datasheet
@ -459,7 +454,7 @@ static int sun4i_ss_probe(struct platform_device *pdev)
* this info could be useful * this info could be useful
*/ */
err = pm_runtime_get_sync(ss->dev); err = pm_runtime_resume_and_get(ss->dev);
if (err < 0) if (err < 0)
goto error_pm; goto error_pm;

View File

@ -27,7 +27,7 @@ int sun4i_hash_crainit(struct crypto_tfm *tfm)
algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash); algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
op->ss = algt->ss; op->ss = algt->ss;
err = pm_runtime_get_sync(op->ss->dev); err = pm_runtime_resume_and_get(op->ss->dev);
if (err < 0) if (err < 0)
return err; return err;

View File

@ -29,7 +29,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng); algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
ss = algt->ss; ss = algt->ss;
err = pm_runtime_get_sync(ss->dev); err = pm_runtime_resume_and_get(ss->dev);
if (err < 0) if (err < 0)
return err; return err;

View File

@ -240,11 +240,14 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
theend_sgs: theend_sgs:
if (areq->src == areq->dst) { if (areq->src == areq->dst) {
dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL); dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
DMA_BIDIRECTIONAL);
} else { } else {
if (nr_sgs > 0) if (nr_sgs > 0)
dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE); dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE); DMA_TO_DEVICE);
dma_unmap_sg(ce->dev, areq->dst, sg_nents(areq->dst),
DMA_FROM_DEVICE);
} }
theend_iv: theend_iv:

View File

@ -932,7 +932,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
if (err) if (err)
goto error_alg; goto error_alg;
err = pm_runtime_get_sync(ce->dev); err = pm_runtime_resume_and_get(ce->dev);
if (err < 0) if (err < 0)
goto error_alg; goto error_alg;

View File

@ -405,7 +405,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm)); err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE); dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE); dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
DMA_TO_DEVICE);
dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE); dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);

View File

@ -99,6 +99,7 @@ int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src,
dma_iv = dma_map_single(ce->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE); dma_iv = dma_map_single(ce->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE);
if (dma_mapping_error(ce->dev, dma_iv)) { if (dma_mapping_error(ce->dev, dma_iv)) {
dev_err(ce->dev, "Cannot DMA MAP IV\n"); dev_err(ce->dev, "Cannot DMA MAP IV\n");
err = -EFAULT;
goto err_iv; goto err_iv;
} }

View File

@ -232,10 +232,13 @@ sgd_next:
theend_sgs: theend_sgs:
if (areq->src == areq->dst) { if (areq->src == areq->dst) {
dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL); dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
DMA_BIDIRECTIONAL);
} else { } else {
dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE); dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
dma_unmap_sg(ss->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE); DMA_TO_DEVICE);
dma_unmap_sg(ss->dev, areq->dst, sg_nents(areq->dst),
DMA_FROM_DEVICE);
} }
theend_iv: theend_iv:
@ -351,7 +354,7 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
op->enginectx.op.prepare_request = NULL; op->enginectx.op.prepare_request = NULL;
op->enginectx.op.unprepare_request = NULL; op->enginectx.op.unprepare_request = NULL;
err = pm_runtime_get_sync(op->ss->dev); err = pm_runtime_resume_and_get(op->ss->dev);
if (err < 0) { if (err < 0) {
dev_err(op->ss->dev, "pm error %d\n", err); dev_err(op->ss->dev, "pm error %d\n", err);
goto error_pm; goto error_pm;

View File

@ -753,7 +753,7 @@ static int sun8i_ss_probe(struct platform_device *pdev)
if (err) if (err)
goto error_alg; goto error_alg;
err = pm_runtime_get_sync(ss->dev); err = pm_runtime_resume_and_get(ss->dev);
if (err < 0) if (err < 0)
goto error_alg; goto error_alg;

View File

@ -348,8 +348,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
bf = (__le32 *)pad; bf = (__le32 *)pad;
result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA); result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
if (!result) if (!result) {
kfree(pad);
return -ENOMEM; return -ENOMEM;
}
for (i = 0; i < MAX_SG; i++) { for (i = 0; i < MAX_SG; i++) {
rctx->t_dst[i].addr = 0; rctx->t_dst[i].addr = 0;
@ -432,14 +434,14 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
err = sun8i_ss_run_hash_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm)); err = sun8i_ss_run_hash_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm));
dma_unmap_single(ss->dev, addr_pad, j * 4, DMA_TO_DEVICE); dma_unmap_single(ss->dev, addr_pad, j * 4, DMA_TO_DEVICE);
dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE); dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
DMA_TO_DEVICE);
dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE); dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE);
kfree(pad);
memcpy(areq->result, result, algt->alg.hash.halg.digestsize); memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
kfree(result);
theend: theend:
kfree(pad);
kfree(result);
crypto_finalize_hash_request(engine, breq, err); crypto_finalize_hash_request(engine, breq, err);
return 0; return 0;
} }

View File

@ -103,7 +103,8 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
dma_iv = dma_map_single(ss->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE); dma_iv = dma_map_single(ss->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE);
if (dma_mapping_error(ss->dev, dma_iv)) { if (dma_mapping_error(ss->dev, dma_iv)) {
dev_err(ss->dev, "Cannot DMA MAP IV\n"); dev_err(ss->dev, "Cannot DMA MAP IV\n");
return -EFAULT; err = -EFAULT;
goto err_free;
} }
dma_dst = dma_map_single(ss->dev, d, todo, DMA_FROM_DEVICE); dma_dst = dma_map_single(ss->dev, d, todo, DMA_FROM_DEVICE);
@ -167,6 +168,7 @@ err_iv:
memcpy(ctx->seed, d + dlen, ctx->slen); memcpy(ctx->seed, d + dlen, ctx->slen);
} }
memzero_explicit(d, todo); memzero_explicit(d, todo);
err_free:
kfree(d); kfree(d);
return err; return err;

View File

@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later // SPDX-License-Identifier: GPL-2.0-or-later
/** /*
* AMCC SoC PPC4xx Crypto Driver * AMCC SoC PPC4xx Crypto Driver
* *
* Copyright (c) 2008 Applied Micro Circuits Corporation. * Copyright (c) 2008 Applied Micro Circuits Corporation.
@ -115,7 +115,7 @@ int crypto4xx_decrypt_iv_block(struct skcipher_request *req)
return crypto4xx_crypt(req, AES_IV_SIZE, true, true); return crypto4xx_crypt(req, AES_IV_SIZE, true, true);
} }
/** /*
* AES Functions * AES Functions
*/ */
static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher, static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
@ -374,7 +374,7 @@ static int crypto4xx_aead_setup_fallback(struct crypto4xx_ctx *ctx,
return crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen); return crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen);
} }
/** /*
* AES-CCM Functions * AES-CCM Functions
*/ */
@ -489,7 +489,7 @@ int crypto4xx_setauthsize_aead(struct crypto_aead *cipher,
return crypto_aead_setauthsize(ctx->sw_cipher.aead, authsize); return crypto_aead_setauthsize(ctx->sw_cipher.aead, authsize);
} }
/** /*
* AES-GCM Functions * AES-GCM Functions
*/ */
@ -617,7 +617,7 @@ int crypto4xx_decrypt_aes_gcm(struct aead_request *req)
return crypto4xx_crypt_aes_gcm(req, true); return crypto4xx_crypt_aes_gcm(req, true);
} }
/** /*
* HASH SHA1 Functions * HASH SHA1 Functions
*/ */
static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm, static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
@ -711,7 +711,7 @@ int crypto4xx_hash_digest(struct ahash_request *req)
ctx->sa_len, 0, NULL); ctx->sa_len, 0, NULL);
} }
/** /*
* SHA1 Algorithm * SHA1 Algorithm
*/ */
int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm) int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm)

View File

@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later // SPDX-License-Identifier: GPL-2.0-or-later
/** /*
* AMCC SoC PPC4xx Crypto Driver * AMCC SoC PPC4xx Crypto Driver
* *
* Copyright (c) 2008 Applied Micro Circuits Corporation. * Copyright (c) 2008 Applied Micro Circuits Corporation.
@ -44,7 +44,7 @@
#define PPC4XX_SEC_VERSION_STR "0.5" #define PPC4XX_SEC_VERSION_STR "0.5"
/** /*
* PPC4xx Crypto Engine Initialization Routine * PPC4xx Crypto Engine Initialization Routine
*/ */
static void crypto4xx_hw_init(struct crypto4xx_device *dev) static void crypto4xx_hw_init(struct crypto4xx_device *dev)
@ -159,7 +159,7 @@ void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
ctx->sa_len = 0; ctx->sa_len = 0;
} }
/** /*
* alloc memory for the gather ring * alloc memory for the gather ring
* no need to alloc buf for the ring * no need to alloc buf for the ring
* gdr_tail, gdr_head and gdr_count are initialized by this function * gdr_tail, gdr_head and gdr_count are initialized by this function
@ -268,7 +268,7 @@ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
return tail; return tail;
} }
/** /*
* alloc memory for the gather ring * alloc memory for the gather ring
* no need to alloc buf for the ring * no need to alloc buf for the ring
* gdr_tail, gdr_head and gdr_count are initialized by this function * gdr_tail, gdr_head and gdr_count are initialized by this function
@ -346,7 +346,7 @@ static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
return &dev->gdr[idx]; return &dev->gdr[idx];
} }
/** /*
* alloc memory for the scatter ring * alloc memory for the scatter ring
* need to alloc buf for the ring * need to alloc buf for the ring
* sdr_tail, sdr_head and sdr_count are initialized by this function * sdr_tail, sdr_head and sdr_count are initialized by this function
@ -930,7 +930,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
return is_busy ? -EBUSY : -EINPROGRESS; return is_busy ? -EBUSY : -EINPROGRESS;
} }
/** /*
* Algorithm Registration Functions * Algorithm Registration Functions
*/ */
static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg, static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg,
@ -1097,7 +1097,7 @@ static void crypto4xx_bh_tasklet_cb(unsigned long data)
} while (head != tail); } while (head != tail);
} }
/** /*
* Top Half of isr. * Top Half of isr.
*/ */
static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data, static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
@ -1186,7 +1186,7 @@ static int crypto4xx_prng_seed(struct crypto_rng *tfm, const u8 *seed,
return 0; return 0;
} }
/** /*
* Supported Crypto Algorithms * Supported Crypto Algorithms
*/ */
static struct crypto4xx_alg_common crypto4xx_alg[] = { static struct crypto4xx_alg_common crypto4xx_alg[] = {
@ -1369,7 +1369,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
} }, } },
}; };
/** /*
* Module Initialization Routine * Module Initialization Routine
*/ */
static int crypto4xx_probe(struct platform_device *ofdev) static int crypto4xx_probe(struct platform_device *ofdev)

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* SPDX-License-Identifier: GPL-2.0-or-later */
/** /*
* AMCC SoC PPC4xx Crypto Driver * AMCC SoC PPC4xx Crypto Driver
* *
* Copyright (c) 2008 Applied Micro Circuits Corporation. * Copyright (c) 2008 Applied Micro Circuits Corporation.
@ -188,7 +188,7 @@ int crypto4xx_hash_final(struct ahash_request *req);
int crypto4xx_hash_update(struct ahash_request *req); int crypto4xx_hash_update(struct ahash_request *req);
int crypto4xx_hash_init(struct ahash_request *req); int crypto4xx_hash_init(struct ahash_request *req);
/** /*
* Note: Only use this function to copy items that is word aligned. * Note: Only use this function to copy items that is word aligned.
*/ */
static inline void crypto4xx_memcpy_swab32(u32 *dst, const void *buf, static inline void crypto4xx_memcpy_swab32(u32 *dst, const void *buf,

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* SPDX-License-Identifier: GPL-2.0-or-later */
/** /*
* AMCC SoC PPC4xx Crypto Driver * AMCC SoC PPC4xx Crypto Driver
* *
* Copyright (c) 2008 Applied Micro Circuits Corporation. * Copyright (c) 2008 Applied Micro Circuits Corporation.
@ -104,7 +104,7 @@
#define CRYPTO4XX_PRNG_LFSR_L 0x00070030 #define CRYPTO4XX_PRNG_LFSR_L 0x00070030
#define CRYPTO4XX_PRNG_LFSR_H 0x00070034 #define CRYPTO4XX_PRNG_LFSR_H 0x00070034
/** /*
* Initialize CRYPTO ENGINE registers, and memory bases. * Initialize CRYPTO ENGINE registers, and memory bases.
*/ */
#define PPC4XX_PDR_POLL 0x3ff #define PPC4XX_PDR_POLL 0x3ff
@ -123,7 +123,7 @@
#define PPC4XX_INT_TIMEOUT_CNT 0 #define PPC4XX_INT_TIMEOUT_CNT 0
#define PPC4XX_INT_TIMEOUT_CNT_REVB 0x3FF #define PPC4XX_INT_TIMEOUT_CNT_REVB 0x3FF
#define PPC4XX_INT_CFG 1 #define PPC4XX_INT_CFG 1
/** /*
* all follow define are ad hoc * all follow define are ad hoc
*/ */
#define PPC4XX_RING_RETRY 100 #define PPC4XX_RING_RETRY 100
@ -131,7 +131,7 @@
#define PPC4XX_SDR_SIZE PPC4XX_NUM_SD #define PPC4XX_SDR_SIZE PPC4XX_NUM_SD
#define PPC4XX_GDR_SIZE PPC4XX_NUM_GD #define PPC4XX_GDR_SIZE PPC4XX_NUM_GD
/** /*
* Generic Security Association (SA) with all possible fields. These will * Generic Security Association (SA) with all possible fields. These will
* never likely used except for reference purpose. These structure format * never likely used except for reference purpose. These structure format
* can be not changed as the hardware expects them to be layout as defined. * can be not changed as the hardware expects them to be layout as defined.

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* SPDX-License-Identifier: GPL-2.0-or-later */
/** /*
* AMCC SoC PPC4xx Crypto Driver * AMCC SoC PPC4xx Crypto Driver
* *
* Copyright (c) 2008 Applied Micro Circuits Corporation. * Copyright (c) 2008 Applied Micro Circuits Corporation.
@ -14,7 +14,7 @@
#define AES_IV_SIZE 16 #define AES_IV_SIZE 16
/** /*
* Contents of Dynamic Security Association (SA) with all possible fields * Contents of Dynamic Security Association (SA) with all possible fields
*/ */
union dynamic_sa_contents { union dynamic_sa_contents {
@ -122,7 +122,7 @@ union sa_command_0 {
#define SA_AES_KEY_LEN_256 4 #define SA_AES_KEY_LEN_256 4
#define SA_REV2 1 #define SA_REV2 1
/** /*
* The follow defines bits sa_command_1 * The follow defines bits sa_command_1
* In Basic hash mode this bit define simple hash or hmac. * In Basic hash mode this bit define simple hash or hmac.
* In IPsec mode, this bit define muting control. * In IPsec mode, this bit define muting control.
@ -172,7 +172,7 @@ struct dynamic_sa_ctl {
union sa_command_1 sa_command_1; union sa_command_1 sa_command_1;
} __attribute__((packed)); } __attribute__((packed));
/** /*
* State Record for Security Association (SA) * State Record for Security Association (SA)
*/ */
struct sa_state_record { struct sa_state_record {
@ -184,7 +184,7 @@ struct sa_state_record {
}; };
} __attribute__((packed)); } __attribute__((packed));
/** /*
* Security Association (SA) for AES128 * Security Association (SA) for AES128
* *
*/ */
@ -213,7 +213,7 @@ struct dynamic_sa_aes192 {
#define SA_AES192_LEN (sizeof(struct dynamic_sa_aes192)/4) #define SA_AES192_LEN (sizeof(struct dynamic_sa_aes192)/4)
#define SA_AES192_CONTENTS 0x3e000062 #define SA_AES192_CONTENTS 0x3e000062
/** /*
* Security Association (SA) for AES256 * Security Association (SA) for AES256
*/ */
struct dynamic_sa_aes256 { struct dynamic_sa_aes256 {
@ -228,7 +228,7 @@ struct dynamic_sa_aes256 {
#define SA_AES256_CONTENTS 0x3e000082 #define SA_AES256_CONTENTS 0x3e000082
#define SA_AES_CONTENTS 0x3e000002 #define SA_AES_CONTENTS 0x3e000002
/** /*
* Security Association (SA) for AES128 CCM * Security Association (SA) for AES128 CCM
*/ */
struct dynamic_sa_aes128_ccm { struct dynamic_sa_aes128_ccm {
@ -242,7 +242,7 @@ struct dynamic_sa_aes128_ccm {
#define SA_AES128_CCM_CONTENTS 0x3e000042 #define SA_AES128_CCM_CONTENTS 0x3e000042
#define SA_AES_CCM_CONTENTS 0x3e000002 #define SA_AES_CCM_CONTENTS 0x3e000002
/** /*
* Security Association (SA) for AES128_GCM * Security Association (SA) for AES128_GCM
*/ */
struct dynamic_sa_aes128_gcm { struct dynamic_sa_aes128_gcm {
@ -258,7 +258,7 @@ struct dynamic_sa_aes128_gcm {
#define SA_AES128_GCM_CONTENTS 0x3e000442 #define SA_AES128_GCM_CONTENTS 0x3e000442
#define SA_AES_GCM_CONTENTS 0x3e000402 #define SA_AES_GCM_CONTENTS 0x3e000402
/** /*
* Security Association (SA) for HASH160: HMAC-SHA1 * Security Association (SA) for HASH160: HMAC-SHA1
*/ */
struct dynamic_sa_hash160 { struct dynamic_sa_hash160 {

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* SPDX-License-Identifier: GPL-2.0-or-later */
/** /*
* AMCC SoC PPC4xx Crypto Driver * AMCC SoC PPC4xx Crypto Driver
* *
* Copyright (c) 2008 Applied Micro Circuits Corporation. * Copyright (c) 2008 Applied Micro Circuits Corporation.

View File

@ -236,10 +236,10 @@ static int meson_cipher(struct skcipher_request *areq)
dma_unmap_single(mc->dev, phykeyiv, keyivlen, DMA_TO_DEVICE); dma_unmap_single(mc->dev, phykeyiv, keyivlen, DMA_TO_DEVICE);
if (areq->src == areq->dst) { if (areq->src == areq->dst) {
dma_unmap_sg(mc->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL); dma_unmap_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_BIDIRECTIONAL);
} else { } else {
dma_unmap_sg(mc->dev, areq->src, nr_sgs, DMA_TO_DEVICE); dma_unmap_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
dma_unmap_sg(mc->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE); dma_unmap_sg(mc->dev, areq->dst, sg_nents(areq->dst), DMA_FROM_DEVICE);
} }
if (areq->iv && ivsize > 0) { if (areq->iv && ivsize > 0) {

View File

@ -217,9 +217,6 @@ static int meson_crypto_probe(struct platform_device *pdev)
struct meson_dev *mc; struct meson_dev *mc;
int err, i; int err, i;
if (!pdev->dev.of_node)
return -ENODEV;
mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL); mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
if (!mc) if (!mc)
return -ENOMEM; return -ENOMEM;

View File

@ -26,7 +26,7 @@
static struct atmel_ecc_driver_data driver_data; static struct atmel_ecc_driver_data driver_data;
/** /**
* atmel_ecdh_ctx - transformation context * struct atmel_ecdh_ctx - transformation context
* @client : pointer to i2c client device * @client : pointer to i2c client device
* @fallback : used for unsupported curves or when user wants to use its own * @fallback : used for unsupported curves or when user wants to use its own
* private key. * private key.
@ -34,7 +34,6 @@ static struct atmel_ecc_driver_data driver_data;
* of the user to not call set_secret() while * of the user to not call set_secret() while
* generate_public_key() or compute_shared_secret() are in flight. * generate_public_key() or compute_shared_secret() are in flight.
* @curve_id : elliptic curve id * @curve_id : elliptic curve id
* @n_sz : size in bytes of the n prime
* @do_fallback: true when the device doesn't support the curve or when the user * @do_fallback: true when the device doesn't support the curve or when the user
* wants to use its own private key. * wants to use its own private key.
*/ */
@ -43,7 +42,6 @@ struct atmel_ecdh_ctx {
struct crypto_kpp *fallback; struct crypto_kpp *fallback;
const u8 *public_key; const u8 *public_key;
unsigned int curve_id; unsigned int curve_id;
size_t n_sz;
bool do_fallback; bool do_fallback;
}; };
@ -51,7 +49,6 @@ static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq,
int status) int status)
{ {
struct kpp_request *req = areq; struct kpp_request *req = areq;
struct atmel_ecdh_ctx *ctx = work_data->ctx;
struct atmel_i2c_cmd *cmd = &work_data->cmd; struct atmel_i2c_cmd *cmd = &work_data->cmd;
size_t copied, n_sz; size_t copied, n_sz;
@ -59,7 +56,7 @@ static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq,
goto free_work_data; goto free_work_data;
/* might want less than we've got */ /* might want less than we've got */
n_sz = min_t(size_t, ctx->n_sz, req->dst_len); n_sz = min_t(size_t, ATMEL_ECC_NIST_P256_N_SIZE, req->dst_len);
/* copy the shared secret */ /* copy the shared secret */
copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, n_sz), copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, n_sz),
@ -73,14 +70,6 @@ free_work_data:
kpp_request_complete(req, status); kpp_request_complete(req, status);
} }
static unsigned int atmel_ecdh_supported_curve(unsigned int curve_id)
{
if (curve_id == ECC_CURVE_NIST_P256)
return ATMEL_ECC_NIST_P256_N_SIZE;
return 0;
}
/* /*
* A random private key is generated and stored in the device. The device * A random private key is generated and stored in the device. The device
* returns the pair public key. * returns the pair public key.
@ -104,8 +93,7 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
return -EINVAL; return -EINVAL;
} }
ctx->n_sz = atmel_ecdh_supported_curve(params.curve_id); if (params.key_size) {
if (!ctx->n_sz || params.key_size) {
/* fallback to ecdh software implementation */ /* fallback to ecdh software implementation */
ctx->do_fallback = true; ctx->do_fallback = true;
return crypto_kpp_set_secret(ctx->fallback, buf, len); return crypto_kpp_set_secret(ctx->fallback, buf, len);
@ -125,7 +113,6 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
goto free_cmd; goto free_cmd;
ctx->do_fallback = false; ctx->do_fallback = false;
ctx->curve_id = params.curve_id;
atmel_i2c_init_genkey_cmd(cmd, DATA_SLOT_2); atmel_i2c_init_genkey_cmd(cmd, DATA_SLOT_2);
@ -263,6 +250,7 @@ static int atmel_ecdh_init_tfm(struct crypto_kpp *tfm)
struct crypto_kpp *fallback; struct crypto_kpp *fallback;
struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm); struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm);
ctx->curve_id = ECC_CURVE_NIST_P256;
ctx->client = atmel_ecc_i2c_client_alloc(); ctx->client = atmel_ecc_i2c_client_alloc();
if (IS_ERR(ctx->client)) { if (IS_ERR(ctx->client)) {
pr_err("tfm - i2c_client binding failed\n"); pr_err("tfm - i2c_client binding failed\n");
@ -306,7 +294,7 @@ static unsigned int atmel_ecdh_max_size(struct crypto_kpp *tfm)
return ATMEL_ECC_PUBKEY_SIZE; return ATMEL_ECC_PUBKEY_SIZE;
} }
static struct kpp_alg atmel_ecdh = { static struct kpp_alg atmel_ecdh_nist_p256 = {
.set_secret = atmel_ecdh_set_secret, .set_secret = atmel_ecdh_set_secret,
.generate_public_key = atmel_ecdh_generate_public_key, .generate_public_key = atmel_ecdh_generate_public_key,
.compute_shared_secret = atmel_ecdh_compute_shared_secret, .compute_shared_secret = atmel_ecdh_compute_shared_secret,
@ -315,7 +303,7 @@ static struct kpp_alg atmel_ecdh = {
.max_size = atmel_ecdh_max_size, .max_size = atmel_ecdh_max_size,
.base = { .base = {
.cra_flags = CRYPTO_ALG_NEED_FALLBACK, .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
.cra_name = "ecdh", .cra_name = "ecdh-nist-p256",
.cra_driver_name = "atmel-ecdh", .cra_driver_name = "atmel-ecdh",
.cra_priority = ATMEL_ECC_PRIORITY, .cra_priority = ATMEL_ECC_PRIORITY,
.cra_module = THIS_MODULE, .cra_module = THIS_MODULE,
@ -340,14 +328,14 @@ static int atmel_ecc_probe(struct i2c_client *client,
&driver_data.i2c_client_list); &driver_data.i2c_client_list);
spin_unlock(&driver_data.i2c_list_lock); spin_unlock(&driver_data.i2c_list_lock);
ret = crypto_register_kpp(&atmel_ecdh); ret = crypto_register_kpp(&atmel_ecdh_nist_p256);
if (ret) { if (ret) {
spin_lock(&driver_data.i2c_list_lock); spin_lock(&driver_data.i2c_list_lock);
list_del(&i2c_priv->i2c_client_list_node); list_del(&i2c_priv->i2c_client_list_node);
spin_unlock(&driver_data.i2c_list_lock); spin_unlock(&driver_data.i2c_list_lock);
dev_err(&client->dev, "%s alg registration failed\n", dev_err(&client->dev, "%s alg registration failed\n",
atmel_ecdh.base.cra_driver_name); atmel_ecdh_nist_p256.base.cra_driver_name);
} else { } else {
dev_info(&client->dev, "atmel ecc algorithms registered in /proc/crypto\n"); dev_info(&client->dev, "atmel ecc algorithms registered in /proc/crypto\n");
} }
@ -365,7 +353,7 @@ static int atmel_ecc_remove(struct i2c_client *client)
return -EBUSY; return -EBUSY;
} }
crypto_unregister_kpp(&atmel_ecdh); crypto_unregister_kpp(&atmel_ecdh_nist_p256);
spin_lock(&driver_data.i2c_list_lock); spin_lock(&driver_data.i2c_list_lock);
list_del(&i2c_priv->i2c_client_list_node); list_del(&i2c_priv->i2c_client_list_node);

View File

@ -339,7 +339,7 @@ int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
} }
if (bus_clk_rate > 1000000L) { if (bus_clk_rate > 1000000L) {
dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n", dev_err(dev, "%u exceeds maximum supported clock frequency (1MHz)\n",
bus_clk_rate); bus_clk_rate);
return -EINVAL; return -EINVAL;
} }

View File

@ -434,7 +434,7 @@ static int atmel_sha_init(struct ahash_request *req)
ctx->flags = 0; ctx->flags = 0;
dev_dbg(dd->dev, "init: digest size: %d\n", dev_dbg(dd->dev, "init: digest size: %u\n",
crypto_ahash_digestsize(tfm)); crypto_ahash_digestsize(tfm));
switch (crypto_ahash_digestsize(tfm)) { switch (crypto_ahash_digestsize(tfm)) {
@ -1102,7 +1102,7 @@ static int atmel_sha_start(struct atmel_sha_dev *dd)
struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
int err; int err;
dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %u\n",
ctx->op, req->nbytes); ctx->op, req->nbytes);
err = atmel_sha_hw_init(dd); err = atmel_sha_hw_init(dd);

View File

@ -1217,7 +1217,6 @@ static int atmel_tdes_probe(struct platform_device *pdev)
tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res); tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
if (IS_ERR(tdes_dd->io_base)) { if (IS_ERR(tdes_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
err = PTR_ERR(tdes_dd->io_base); err = PTR_ERR(tdes_dd->io_base);
goto err_tasklet_kill; goto err_tasklet_kill;
} }

View File

@ -1019,6 +1019,7 @@ static void handle_ahash_resp(struct iproc_reqctx_s *rctx)
* a SPU response message for an AEAD request. Includes buffers to catch SPU * a SPU response message for an AEAD request. Includes buffers to catch SPU
* message headers and the response data. * message headers and the response data.
* @mssg: mailbox message containing the receive sg * @mssg: mailbox message containing the receive sg
* @req: Crypto API request
* @rctx: crypto request context * @rctx: crypto request context
* @rx_frag_num: number of scatterlist elements required to hold the * @rx_frag_num: number of scatterlist elements required to hold the
* SPU response message * SPU response message
@ -2952,9 +2953,9 @@ static int aead_gcm_esp_setkey(struct crypto_aead *cipher,
/** /**
* rfc4543_gcm_esp_setkey() - setkey operation for RFC4543 variant of GCM/GMAC. * rfc4543_gcm_esp_setkey() - setkey operation for RFC4543 variant of GCM/GMAC.
* cipher: AEAD structure * @cipher: AEAD structure
* key: Key followed by 4 bytes of salt * @key: Key followed by 4 bytes of salt
* keylen: Length of key plus salt, in bytes * @keylen: Length of key plus salt, in bytes
* *
* Extracts salt from key and stores it to be prepended to IV on each request. * Extracts salt from key and stores it to be prepended to IV on each request.
* Digest is always 16 bytes * Digest is always 16 bytes

View File

@ -457,7 +457,7 @@ u16 spum_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode,
* @cipher_mode: Algo type * @cipher_mode: Algo type
* @data_size: Length of plaintext (bytes) * @data_size: Length of plaintext (bytes)
* *
* @Return: Length of padding, in bytes * Return: Length of padding, in bytes
*/ */
u32 spum_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode, u32 spum_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode,
unsigned int data_size) unsigned int data_size)
@ -510,10 +510,10 @@ u32 spum_assoc_resp_len(enum spu_cipher_mode cipher_mode,
} }
/** /**
* spu_aead_ivlen() - Calculate the length of the AEAD IV to be included * spum_aead_ivlen() - Calculate the length of the AEAD IV to be included
* in a SPU request after the AAD and before the payload. * in a SPU request after the AAD and before the payload.
* @cipher_mode: cipher mode * @cipher_mode: cipher mode
* @iv_ctr_len: initialization vector length in bytes * @iv_len: initialization vector length in bytes
* *
* In Linux ~4.2 and later, the assoc_data sg includes the IV. So no need * In Linux ~4.2 and later, the assoc_data sg includes the IV. So no need
* to include the IV as a separate field in the SPU request msg. * to include the IV as a separate field in the SPU request msg.
@ -543,9 +543,9 @@ enum hash_type spum_hash_type(u32 src_sent)
/** /**
* spum_digest_size() - Determine the size of a hash digest to expect the SPU to * spum_digest_size() - Determine the size of a hash digest to expect the SPU to
* return. * return.
* alg_digest_size: Number of bytes in the final digest for the given algo * @alg_digest_size: Number of bytes in the final digest for the given algo
* alg: The hash algorithm * @alg: The hash algorithm
* htype: Type of hash operation (init, update, full, etc) * @htype: Type of hash operation (init, update, full, etc)
* *
* When doing incremental hashing for an algorithm with a truncated hash * When doing incremental hashing for an algorithm with a truncated hash
* (e.g., SHA224), the SPU returns the full digest so that it can be fed back as * (e.g., SHA224), the SPU returns the full digest so that it can be fed back as
@ -580,7 +580,7 @@ u32 spum_digest_size(u32 alg_digest_size, enum hash_alg alg,
* @aead_parms: Parameters related to AEAD operation * @aead_parms: Parameters related to AEAD operation
* @data_size: Length of data to be encrypted or authenticated. If AEAD, does * @data_size: Length of data to be encrypted or authenticated. If AEAD, does
* not include length of AAD. * not include length of AAD.
*
* Return: the length of the SPU header in bytes. 0 if an error occurs. * Return: the length of the SPU header in bytes. 0 if an error occurs.
*/ */
u32 spum_create_request(u8 *spu_hdr, u32 spum_create_request(u8 *spu_hdr,
@ -911,7 +911,7 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
* setkey() time in spu_cipher_req_init(). * setkey() time in spu_cipher_req_init().
* @spu_hdr: Start of the request message header (MH field) * @spu_hdr: Start of the request message header (MH field)
* @spu_req_hdr_len: Length in bytes of the SPU request header * @spu_req_hdr_len: Length in bytes of the SPU request header
* @isInbound: 0 encrypt, 1 decrypt * @is_inbound: 0 encrypt, 1 decrypt
* @cipher_parms: Parameters describing cipher operation to be performed * @cipher_parms: Parameters describing cipher operation to be performed
* @data_size: Length of the data in the BD field * @data_size: Length of the data in the BD field
* *

View File

@ -543,7 +543,8 @@ void spu2_dump_msg_hdr(u8 *buf, unsigned int buf_len)
/** /**
* spu2_fmd_init() - At setkey time, initialize the fixed meta data for * spu2_fmd_init() - At setkey time, initialize the fixed meta data for
* subsequent skcipher requests for this context. * subsequent skcipher requests for this context.
* @spu2_cipher_type: Cipher algorithm * @fmd: Start of FMD field to be written
* @spu2_type: Cipher algorithm
* @spu2_mode: Cipher mode * @spu2_mode: Cipher mode
* @cipher_key_len: Length of cipher key, in bytes * @cipher_key_len: Length of cipher key, in bytes
* @cipher_iv_len: Length of cipher initialization vector, in bytes * @cipher_iv_len: Length of cipher initialization vector, in bytes
@ -598,7 +599,7 @@ static int spu2_fmd_init(struct SPU2_FMD *fmd,
* SPU request packet. * SPU request packet.
* @fmd: Start of FMD field to be written * @fmd: Start of FMD field to be written
* @is_inbound: true if decrypting. false if encrypting. * @is_inbound: true if decrypting. false if encrypting.
* @authFirst: true if alg authenticates before encrypting * @auth_first: true if alg authenticates before encrypting
* @protocol: protocol selector * @protocol: protocol selector
* @cipher_type: cipher algorithm * @cipher_type: cipher algorithm
* @cipher_mode: cipher mode * @cipher_mode: cipher mode
@ -640,6 +641,7 @@ static void spu2_fmd_ctrl0_write(struct SPU2_FMD *fmd,
* spu2_fmd_ctrl1_write() - Write ctrl1 field in fixed metadata (FMD) field of * spu2_fmd_ctrl1_write() - Write ctrl1 field in fixed metadata (FMD) field of
* SPU request packet. * SPU request packet.
* @fmd: Start of FMD field to be written * @fmd: Start of FMD field to be written
* @is_inbound: true if decrypting. false if encrypting.
* @assoc_size: Length of additional associated data, in bytes * @assoc_size: Length of additional associated data, in bytes
* @auth_key_len: Length of authentication key, in bytes * @auth_key_len: Length of authentication key, in bytes
* @cipher_key_len: Length of cipher key, in bytes * @cipher_key_len: Length of cipher key, in bytes
@ -793,7 +795,7 @@ u32 spu2_ctx_max_payload(enum spu_cipher_alg cipher_alg,
} }
/** /**
* spu_payload_length() - Given a SPU2 message header, extract the payload * spu2_payload_length() - Given a SPU2 message header, extract the payload
* length. * length.
* @spu_hdr: Start of SPU message header (FMD) * @spu_hdr: Start of SPU message header (FMD)
* *
@ -812,10 +814,11 @@ u32 spu2_payload_length(u8 *spu_hdr)
} }
/** /**
* spu_response_hdr_len() - Determine the expected length of a SPU response * spu2_response_hdr_len() - Determine the expected length of a SPU response
* header. * header.
* @auth_key_len: Length of authentication key, in bytes * @auth_key_len: Length of authentication key, in bytes
* @enc_key_len: Length of encryption key, in bytes * @enc_key_len: Length of encryption key, in bytes
* @is_hash: Unused
* *
* For SPU2, includes just FMD. OMD is never requested. * For SPU2, includes just FMD. OMD is never requested.
* *
@ -827,7 +830,7 @@ u16 spu2_response_hdr_len(u16 auth_key_len, u16 enc_key_len, bool is_hash)
} }
/** /**
* spu_hash_pad_len() - Calculate the length of hash padding required to extend * spu2_hash_pad_len() - Calculate the length of hash padding required to extend
* data to a full block size. * data to a full block size.
* @hash_alg: hash algorithm * @hash_alg: hash algorithm
* @hash_mode: hash mode * @hash_mode: hash mode
@ -845,8 +848,10 @@ u16 spu2_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode,
} }
/** /**
* spu2_gcm_ccm_padlen() - Determine the length of GCM/CCM padding for either * spu2_gcm_ccm_pad_len() - Determine the length of GCM/CCM padding for either
* the AAD field or the data. * the AAD field or the data.
* @cipher_mode: Unused
* @data_size: Unused
* *
* Return: 0. Unlike SPU-M, SPU2 hardware does any GCM/CCM padding required. * Return: 0. Unlike SPU-M, SPU2 hardware does any GCM/CCM padding required.
*/ */
@ -857,7 +862,7 @@ u32 spu2_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode,
} }
/** /**
* spu_assoc_resp_len() - Determine the size of the AAD2 buffer needed to catch * spu2_assoc_resp_len() - Determine the size of the AAD2 buffer needed to catch
* associated data in a SPU2 output packet. * associated data in a SPU2 output packet.
* @cipher_mode: cipher mode * @cipher_mode: cipher mode
* @assoc_len: length of additional associated data, in bytes * @assoc_len: length of additional associated data, in bytes
@ -878,11 +883,11 @@ u32 spu2_assoc_resp_len(enum spu_cipher_mode cipher_mode,
return resp_len; return resp_len;
} }
/* /**
* spu_aead_ivlen() - Calculate the length of the AEAD IV to be included * spu2_aead_ivlen() - Calculate the length of the AEAD IV to be included
* in a SPU request after the AAD and before the payload. * in a SPU request after the AAD and before the payload.
* @cipher_mode: cipher mode * @cipher_mode: cipher mode
* @iv_ctr_len: initialization vector length in bytes * @iv_len: initialization vector length in bytes
* *
* For SPU2, AEAD IV is included in OMD and does not need to be repeated * For SPU2, AEAD IV is included in OMD and does not need to be repeated
* prior to the payload. * prior to the payload.
@ -909,9 +914,9 @@ enum hash_type spu2_hash_type(u32 src_sent)
/** /**
* spu2_digest_size() - Determine the size of a hash digest to expect the SPU to * spu2_digest_size() - Determine the size of a hash digest to expect the SPU to
* return. * return.
* alg_digest_size: Number of bytes in the final digest for the given algo * @alg_digest_size: Number of bytes in the final digest for the given algo
* alg: The hash algorithm * @alg: The hash algorithm
* htype: Type of hash operation (init, update, full, etc) * @htype: Type of hash operation (init, update, full, etc)
* *
*/ */
u32 spu2_digest_size(u32 alg_digest_size, enum hash_alg alg, u32 spu2_digest_size(u32 alg_digest_size, enum hash_alg alg,
@ -921,7 +926,7 @@ u32 spu2_digest_size(u32 alg_digest_size, enum hash_alg alg,
} }
/** /**
* spu_create_request() - Build a SPU2 request message header, includint FMD and * spu2_create_request() - Build a SPU2 request message header, includint FMD and
* OMD. * OMD.
* @spu_hdr: Start of buffer where SPU request header is to be written * @spu_hdr: Start of buffer where SPU request header is to be written
* @req_opts: SPU request message options * @req_opts: SPU request message options
@ -1105,7 +1110,7 @@ u32 spu2_create_request(u8 *spu_hdr,
} }
/** /**
* spu_cipher_req_init() - Build an skcipher SPU2 request message header, * spu2_cipher_req_init() - Build an skcipher SPU2 request message header,
* including FMD and OMD. * including FMD and OMD.
* @spu_hdr: Location of start of SPU request (FMD field) * @spu_hdr: Location of start of SPU request (FMD field)
* @cipher_parms: Parameters describing cipher request * @cipher_parms: Parameters describing cipher request
@ -1162,11 +1167,11 @@ u16 spu2_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms)
} }
/** /**
* spu_cipher_req_finish() - Finish building a SPU request message header for a * spu2_cipher_req_finish() - Finish building a SPU request message header for a
* block cipher request. * block cipher request.
* @spu_hdr: Start of the request message header (MH field) * @spu_hdr: Start of the request message header (MH field)
* @spu_req_hdr_len: Length in bytes of the SPU request header * @spu_req_hdr_len: Length in bytes of the SPU request header
* @isInbound: 0 encrypt, 1 decrypt * @is_inbound: 0 encrypt, 1 decrypt
* @cipher_parms: Parameters describing cipher operation to be performed * @cipher_parms: Parameters describing cipher operation to be performed
* @data_size: Length of the data in the BD field * @data_size: Length of the data in the BD field
* *
@ -1222,7 +1227,7 @@ void spu2_cipher_req_finish(u8 *spu_hdr,
} }
/** /**
* spu_request_pad() - Create pad bytes at the end of the data. * spu2_request_pad() - Create pad bytes at the end of the data.
* @pad_start: Start of buffer where pad bytes are to be written * @pad_start: Start of buffer where pad bytes are to be written
* @gcm_padding: Length of GCM padding, in bytes * @gcm_padding: Length of GCM padding, in bytes
* @hash_pad_len: Number of bytes of padding extend data to full block * @hash_pad_len: Number of bytes of padding extend data to full block
@ -1311,7 +1316,7 @@ u8 spu2_rx_status_len(void)
} }
/** /**
* spu_status_process() - Process the status from a SPU response message. * spu2_status_process() - Process the status from a SPU response message.
* @statp: start of STATUS word * @statp: start of STATUS word
* *
* Return: 0 - if status is good and response should be processed * Return: 0 - if status is good and response should be processed

View File

@ -119,8 +119,8 @@ int spu_sg_count(struct scatterlist *sg_list, unsigned int skip, int nbytes)
* @from_skip: number of bytes to skip in from_sg. Non-zero when previous * @from_skip: number of bytes to skip in from_sg. Non-zero when previous
* request included part of the buffer in entry in from_sg. * request included part of the buffer in entry in from_sg.
* Assumes from_skip < from_sg->length. * Assumes from_skip < from_sg->length.
* @from_nents number of entries in from_sg * @from_nents: number of entries in from_sg
* @length number of bytes to copy. may reach this limit before exhausting * @length: number of bytes to copy. may reach this limit before exhausting
* from_sg. * from_sg.
* *
* Copies the entries themselves, not the data in the entries. Assumes to_sg has * Copies the entries themselves, not the data in the entries. Assumes to_sg has

View File

@ -71,6 +71,9 @@ struct caam_skcipher_alg {
* @adata: authentication algorithm details * @adata: authentication algorithm details
* @cdata: encryption algorithm details * @cdata: encryption algorithm details
* @authsize: authentication tag (a.k.a. ICV / MAC) size * @authsize: authentication tag (a.k.a. ICV / MAC) size
* @xts_key_fallback: true if fallback tfm needs to be used due
* to unsupported xts key lengths
* @fallback: xts fallback tfm
*/ */
struct caam_ctx { struct caam_ctx {
struct caam_flc flc[NUM_OP]; struct caam_flc flc[NUM_OP];

View File

@ -187,7 +187,8 @@ static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
} }
/** /**
* Count leading zeros, need it to strip, from a given scatterlist * caam_rsa_count_leading_zeros - Count leading zeros, need it to strip,
* from a given scatterlist
* *
* @sgl : scatterlist to count zeros from * @sgl : scatterlist to count zeros from
* @nbytes: number of zeros, in bytes, to strip * @nbytes: number of zeros, in bytes, to strip

View File

@ -10,7 +10,6 @@
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/printk.h> #include <linux/printk.h>
#include <linux/version.h>
#include "cptpf.h" #include "cptpf.h"

View File

@ -10,7 +10,7 @@
#include "nitrox_isr.h" #include "nitrox_isr.h"
#include "nitrox_mbx.h" #include "nitrox_mbx.h"
/** /*
* One vector for each type of ring * One vector for each type of ring
* - NPS packet ring, AQMQ ring and ZQMQ ring * - NPS packet ring, AQMQ ring and ZQMQ ring
*/ */
@ -216,7 +216,7 @@ static void nps_core_int_tasklet(unsigned long data)
} }
} }
/** /*
* nps_core_int_isr - interrupt handler for NITROX errors and * nps_core_int_isr - interrupt handler for NITROX errors and
* mailbox communication * mailbox communication
*/ */

View File

@ -58,14 +58,15 @@ static void softreq_unmap_sgbufs(struct nitrox_softreq *sr)
struct device *dev = DEV(ndev); struct device *dev = DEV(ndev);
dma_unmap_sg(dev, sr->in.sg, sr->in.sgmap_cnt, DMA_BIDIRECTIONAL); dma_unmap_sg(dev, sr->in.sg, sg_nents(sr->in.sg),
DMA_BIDIRECTIONAL);
dma_unmap_single(dev, sr->in.sgcomp_dma, sr->in.sgcomp_len, dma_unmap_single(dev, sr->in.sgcomp_dma, sr->in.sgcomp_len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
kfree(sr->in.sgcomp); kfree(sr->in.sgcomp);
sr->in.sg = NULL; sr->in.sg = NULL;
sr->in.sgmap_cnt = 0; sr->in.sgmap_cnt = 0;
dma_unmap_sg(dev, sr->out.sg, sr->out.sgmap_cnt, dma_unmap_sg(dev, sr->out.sg, sg_nents(sr->out.sg),
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
dma_unmap_single(dev, sr->out.sgcomp_dma, sr->out.sgcomp_len, dma_unmap_single(dev, sr->out.sgcomp_dma, sr->out.sgcomp_len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
@ -178,7 +179,7 @@ static int dma_map_inbufs(struct nitrox_softreq *sr,
return 0; return 0;
incomp_err: incomp_err:
dma_unmap_sg(dev, req->src, nents, DMA_BIDIRECTIONAL); dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL);
sr->in.sgmap_cnt = 0; sr->in.sgmap_cnt = 0;
return ret; return ret;
} }
@ -203,7 +204,7 @@ static int dma_map_outbufs(struct nitrox_softreq *sr,
return 0; return 0;
outcomp_map_err: outcomp_map_err:
dma_unmap_sg(dev, req->dst, nents, DMA_BIDIRECTIONAL); dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_BIDIRECTIONAL);
sr->out.sgmap_cnt = 0; sr->out.sgmap_cnt = 0;
sr->out.sg = NULL; sr->out.sg = NULL;
return ret; return ret;

View File

@ -56,7 +56,6 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/version.h>
/* Device specific zlib function definitions */ /* Device specific zlib function definitions */
#include "zip_device.h" #include "zip_device.h"

View File

@ -59,7 +59,7 @@ struct ccp_crypto_queue {
#define CCP_CRYPTO_MAX_QLEN 100 #define CCP_CRYPTO_MAX_QLEN 100
static struct ccp_crypto_queue req_queue; static struct ccp_crypto_queue req_queue;
static spinlock_t req_queue_lock; static DEFINE_SPINLOCK(req_queue_lock);
struct ccp_crypto_cmd { struct ccp_crypto_cmd {
struct list_head entry; struct list_head entry;
@ -410,7 +410,6 @@ static int ccp_crypto_init(void)
return ret; return ret;
} }
spin_lock_init(&req_queue_lock);
INIT_LIST_HEAD(&req_queue.cmds); INIT_LIST_HEAD(&req_queue.cmds);
req_queue.backlog = &req_queue.cmds; req_queue.backlog = &req_queue.cmds;
req_queue.cmd_count = 0; req_queue.cmd_count = 0;

View File

@ -548,7 +548,7 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
return ccp->cmd_q_count == suspended; return ccp->cmd_q_count == suspended;
} }
int ccp_dev_suspend(struct sp_device *sp) void ccp_dev_suspend(struct sp_device *sp)
{ {
struct ccp_device *ccp = sp->ccp_data; struct ccp_device *ccp = sp->ccp_data;
unsigned long flags; unsigned long flags;
@ -556,7 +556,7 @@ int ccp_dev_suspend(struct sp_device *sp)
/* If there's no device there's nothing to do */ /* If there's no device there's nothing to do */
if (!ccp) if (!ccp)
return 0; return;
spin_lock_irqsave(&ccp->cmd_lock, flags); spin_lock_irqsave(&ccp->cmd_lock, flags);
@ -572,11 +572,9 @@ int ccp_dev_suspend(struct sp_device *sp)
while (!ccp_queues_suspended(ccp)) while (!ccp_queues_suspended(ccp))
wait_event_interruptible(ccp->suspend_queue, wait_event_interruptible(ccp->suspend_queue,
ccp_queues_suspended(ccp)); ccp_queues_suspended(ccp));
return 0;
} }
int ccp_dev_resume(struct sp_device *sp) void ccp_dev_resume(struct sp_device *sp)
{ {
struct ccp_device *ccp = sp->ccp_data; struct ccp_device *ccp = sp->ccp_data;
unsigned long flags; unsigned long flags;
@ -584,7 +582,7 @@ int ccp_dev_resume(struct sp_device *sp)
/* If there's no device there's nothing to do */ /* If there's no device there's nothing to do */
if (!ccp) if (!ccp)
return 0; return;
spin_lock_irqsave(&ccp->cmd_lock, flags); spin_lock_irqsave(&ccp->cmd_lock, flags);
@ -597,8 +595,6 @@ int ccp_dev_resume(struct sp_device *sp)
} }
spin_unlock_irqrestore(&ccp->cmd_lock, flags); spin_unlock_irqrestore(&ccp->cmd_lock, flags);
return 0;
} }
int ccp_dev_init(struct sp_device *sp) int ccp_dev_init(struct sp_device *sp)

View File

@ -2418,7 +2418,6 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
dst.address += CCP_ECC_OUTPUT_SIZE; dst.address += CCP_ECC_OUTPUT_SIZE;
ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0, ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0,
CCP_ECC_MODULUS_BYTES); CCP_ECC_MODULUS_BYTES);
dst.address += CCP_ECC_OUTPUT_SIZE;
/* Restore the workarea address */ /* Restore the workarea address */
dst.address = save; dst.address = save;

View File

@ -21,6 +21,7 @@
#include <linux/ccp.h> #include <linux/ccp.h>
#include <linux/firmware.h> #include <linux/firmware.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/cpufeature.h>
#include <asm/smp.h> #include <asm/smp.h>
@ -972,6 +973,11 @@ int sev_dev_init(struct psp_device *psp)
struct sev_device *sev; struct sev_device *sev;
int ret = -ENOMEM; int ret = -ENOMEM;
if (!boot_cpu_has(X86_FEATURE_SEV)) {
dev_info_once(dev, "SEV: memory encryption not enabled by BIOS\n");
return 0;
}
sev = devm_kzalloc(dev, sizeof(*sev), GFP_KERNEL); sev = devm_kzalloc(dev, sizeof(*sev), GFP_KERNEL);
if (!sev) if (!sev)
goto e_err; goto e_err;

View File

@ -213,12 +213,8 @@ void sp_destroy(struct sp_device *sp)
int sp_suspend(struct sp_device *sp) int sp_suspend(struct sp_device *sp)
{ {
int ret;
if (sp->dev_vdata->ccp_vdata) { if (sp->dev_vdata->ccp_vdata) {
ret = ccp_dev_suspend(sp); ccp_dev_suspend(sp);
if (ret)
return ret;
} }
return 0; return 0;
@ -226,12 +222,8 @@ int sp_suspend(struct sp_device *sp)
int sp_resume(struct sp_device *sp) int sp_resume(struct sp_device *sp)
{ {
int ret;
if (sp->dev_vdata->ccp_vdata) { if (sp->dev_vdata->ccp_vdata) {
ret = ccp_dev_resume(sp); ccp_dev_resume(sp);
if (ret)
return ret;
} }
return 0; return 0;

View File

@ -134,8 +134,8 @@ struct sp_device *sp_get_psp_master_device(void);
int ccp_dev_init(struct sp_device *sp); int ccp_dev_init(struct sp_device *sp);
void ccp_dev_destroy(struct sp_device *sp); void ccp_dev_destroy(struct sp_device *sp);
int ccp_dev_suspend(struct sp_device *sp); void ccp_dev_suspend(struct sp_device *sp);
int ccp_dev_resume(struct sp_device *sp); void ccp_dev_resume(struct sp_device *sp);
#else /* !CONFIG_CRYPTO_DEV_SP_CCP */ #else /* !CONFIG_CRYPTO_DEV_SP_CCP */
@ -144,15 +144,8 @@ static inline int ccp_dev_init(struct sp_device *sp)
return 0; return 0;
} }
static inline void ccp_dev_destroy(struct sp_device *sp) { } static inline void ccp_dev_destroy(struct sp_device *sp) { }
static inline void ccp_dev_suspend(struct sp_device *sp) { }
static inline int ccp_dev_suspend(struct sp_device *sp) static inline void ccp_dev_resume(struct sp_device *sp) { }
{
return 0;
}
static inline int ccp_dev_resume(struct sp_device *sp)
{
return 0;
}
#endif /* CONFIG_CRYPTO_DEV_SP_CCP */ #endif /* CONFIG_CRYPTO_DEV_SP_CCP */
#ifdef CONFIG_CRYPTO_DEV_SP_PSP #ifdef CONFIG_CRYPTO_DEV_SP_PSP

View File

@ -356,6 +356,7 @@ static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] }, { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
{ PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] }, { PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
{ PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] }, { PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] },
{ PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[4] },
/* Last entry must be zero */ /* Last entry must be zero */
{ 0, } { 0, }
}; };

View File

@ -5,7 +5,7 @@
* Author: Rijo Thomas <Rijo-john.Thomas@amd.com> * Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
* Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com> * Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com>
* *
* Copyright 2019 Advanced Micro Devices, Inc. * Copyright (C) 2019,2021 Advanced Micro Devices, Inc.
*/ */
#include <linux/types.h> #include <linux/types.h>
@ -36,6 +36,7 @@ static int tee_alloc_ring(struct psp_tee_device *tee, int ring_size)
if (!start_addr) if (!start_addr)
return -ENOMEM; return -ENOMEM;
memset(start_addr, 0x0, ring_size);
rb_mgr->ring_start = start_addr; rb_mgr->ring_start = start_addr;
rb_mgr->ring_size = ring_size; rb_mgr->ring_size = ring_size;
rb_mgr->ring_pa = __psp_pa(start_addr); rb_mgr->ring_pa = __psp_pa(start_addr);
@ -244,41 +245,54 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
void *buf, size_t len, struct tee_ring_cmd **resp) void *buf, size_t len, struct tee_ring_cmd **resp)
{ {
struct tee_ring_cmd *cmd; struct tee_ring_cmd *cmd;
u32 rptr, wptr;
int nloop = 1000, ret = 0; int nloop = 1000, ret = 0;
u32 rptr;
*resp = NULL; *resp = NULL;
mutex_lock(&tee->rb_mgr.mutex); mutex_lock(&tee->rb_mgr.mutex);
wptr = tee->rb_mgr.wptr; /* Loop until empty entry found in ring buffer */
/* Check if ring buffer is full */
do { do {
/* Get pointer to ring buffer command entry */
cmd = (struct tee_ring_cmd *)
(tee->rb_mgr.ring_start + tee->rb_mgr.wptr);
rptr = ioread32(tee->io_regs + tee->vdata->ring_rptr_reg); rptr = ioread32(tee->io_regs + tee->vdata->ring_rptr_reg);
if (!(wptr + sizeof(struct tee_ring_cmd) == rptr)) /* Check if ring buffer is full or command entry is waiting
* for response from TEE
*/
if (!(tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr ||
cmd->flag == CMD_WAITING_FOR_RESPONSE))
break; break;
dev_info(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n", dev_dbg(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
rptr, wptr); rptr, tee->rb_mgr.wptr);
/* Wait if ring buffer is full */ /* Wait if ring buffer is full or TEE is processing data */
mutex_unlock(&tee->rb_mgr.mutex); mutex_unlock(&tee->rb_mgr.mutex);
schedule_timeout_interruptible(msecs_to_jiffies(10)); schedule_timeout_interruptible(msecs_to_jiffies(10));
mutex_lock(&tee->rb_mgr.mutex); mutex_lock(&tee->rb_mgr.mutex);
} while (--nloop); } while (--nloop);
if (!nloop && (wptr + sizeof(struct tee_ring_cmd) == rptr)) { if (!nloop &&
dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n", (tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr ||
rptr, wptr); cmd->flag == CMD_WAITING_FOR_RESPONSE)) {
dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u response flag %u\n",
rptr, tee->rb_mgr.wptr, cmd->flag);
ret = -EBUSY; ret = -EBUSY;
goto unlock; goto unlock;
} }
/* Pointer to empty data entry in ring buffer */ /* Do not submit command if PSP got disabled while processing any
cmd = (struct tee_ring_cmd *)(tee->rb_mgr.ring_start + wptr); * command in another thread
*/
if (psp_dead) {
ret = -EBUSY;
goto unlock;
}
/* Write command data into ring buffer */ /* Write command data into ring buffer */
cmd->cmd_id = cmd_id; cmd->cmd_id = cmd_id;
@ -286,6 +300,9 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
memset(&cmd->buf[0], 0, sizeof(cmd->buf)); memset(&cmd->buf[0], 0, sizeof(cmd->buf));
memcpy(&cmd->buf[0], buf, len); memcpy(&cmd->buf[0], buf, len);
/* Indicate driver is waiting for response */
cmd->flag = CMD_WAITING_FOR_RESPONSE;
/* Update local copy of write pointer */ /* Update local copy of write pointer */
tee->rb_mgr.wptr += sizeof(struct tee_ring_cmd); tee->rb_mgr.wptr += sizeof(struct tee_ring_cmd);
if (tee->rb_mgr.wptr >= tee->rb_mgr.ring_size) if (tee->rb_mgr.wptr >= tee->rb_mgr.ring_size)
@ -309,14 +326,14 @@ static int tee_wait_cmd_completion(struct psp_tee_device *tee,
struct tee_ring_cmd *resp, struct tee_ring_cmd *resp,
unsigned int timeout) unsigned int timeout)
{ {
/* ~5ms sleep per loop => nloop = timeout * 200 */ /* ~1ms sleep per loop => nloop = timeout * 1000 */
int nloop = timeout * 200; int nloop = timeout * 1000;
while (--nloop) { while (--nloop) {
if (resp->cmd_state == TEE_CMD_STATE_COMPLETED) if (resp->cmd_state == TEE_CMD_STATE_COMPLETED)
return 0; return 0;
usleep_range(5000, 5100); usleep_range(1000, 1100);
} }
dev_err(tee->dev, "tee: command 0x%x timed out, disabling PSP\n", dev_err(tee->dev, "tee: command 0x%x timed out, disabling PSP\n",
@ -353,12 +370,16 @@ int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len,
return ret; return ret;
ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_TIMEOUT); ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_TIMEOUT);
if (ret) if (ret) {
resp->flag = CMD_RESPONSE_TIMEDOUT;
return ret; return ret;
}
memcpy(buf, &resp->buf[0], len); memcpy(buf, &resp->buf[0], len);
*status = resp->status; *status = resp->status;
resp->flag = CMD_RESPONSE_COPIED;
return 0; return 0;
} }
EXPORT_SYMBOL(psp_tee_process_cmd); EXPORT_SYMBOL(psp_tee_process_cmd);

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MIT */ /* SPDX-License-Identifier: MIT */
/* /*
* Copyright 2019 Advanced Micro Devices, Inc. * Copyright (C) 2019,2021 Advanced Micro Devices, Inc.
* *
* Author: Rijo Thomas <Rijo-john.Thomas@amd.com> * Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
* Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com> * Author: Devaraj Rangasamy <Devaraj.Rangasamy@amd.com>
@ -18,7 +18,7 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#define TEE_DEFAULT_TIMEOUT 10 #define TEE_DEFAULT_TIMEOUT 10
#define MAX_BUFFER_SIZE 992 #define MAX_BUFFER_SIZE 988
/** /**
* enum tee_ring_cmd_id - TEE interface commands for ring buffer configuration * enum tee_ring_cmd_id - TEE interface commands for ring buffer configuration
@ -81,6 +81,20 @@ enum tee_cmd_state {
TEE_CMD_STATE_COMPLETED, TEE_CMD_STATE_COMPLETED,
}; };
/**
* enum cmd_resp_state - TEE command's response status maintained by driver
* @CMD_RESPONSE_INVALID: initial state when no command is written to ring
* @CMD_WAITING_FOR_RESPONSE: driver waiting for response from TEE
* @CMD_RESPONSE_TIMEDOUT: failed to get response from TEE
* @CMD_RESPONSE_COPIED: driver has copied response from TEE
*/
enum cmd_resp_state {
CMD_RESPONSE_INVALID,
CMD_WAITING_FOR_RESPONSE,
CMD_RESPONSE_TIMEDOUT,
CMD_RESPONSE_COPIED,
};
/** /**
* struct tee_ring_cmd - Structure of the command buffer in TEE ring * struct tee_ring_cmd - Structure of the command buffer in TEE ring
* @cmd_id: refers to &enum tee_cmd_id. Command id for the ring buffer * @cmd_id: refers to &enum tee_cmd_id. Command id for the ring buffer
@ -91,6 +105,7 @@ enum tee_cmd_state {
* @pdata: private data (currently unused) * @pdata: private data (currently unused)
* @res1: reserved region * @res1: reserved region
* @buf: TEE command specific buffer * @buf: TEE command specific buffer
* @flag: refers to &enum cmd_resp_state
*/ */
struct tee_ring_cmd { struct tee_ring_cmd {
u32 cmd_id; u32 cmd_id;
@ -100,6 +115,7 @@ struct tee_ring_cmd {
u64 pdata; u64 pdata;
u32 res1[2]; u32 res1[2];
u8 buf[MAX_BUFFER_SIZE]; u8 buf[MAX_BUFFER_SIZE];
u32 flag;
/* Total size: 1024 bytes */ /* Total size: 1024 bytes */
} __packed; } __packed;

View File

@ -352,10 +352,8 @@ static int init_cc_resources(struct platform_device *plat_dev)
req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0); req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
/* Map registers space */ /* Map registers space */
new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs); new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
if (IS_ERR(new_drvdata->cc_base)) { if (IS_ERR(new_drvdata->cc_base))
dev_err(dev, "Failed to ioremap registers");
return PTR_ERR(new_drvdata->cc_base); return PTR_ERR(new_drvdata->cc_base);
}
dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name, dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
req_mem_cc_regs); req_mem_cc_regs);

View File

@ -126,11 +126,6 @@ static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
return container_of(ctx->dev, struct uld_ctx, dev); return container_of(ctx->dev, struct uld_ctx, dev);
} }
static inline int is_ofld_imm(const struct sk_buff *skb)
{
return (skb->len <= SGE_MAX_WR_LEN);
}
static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx) static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
{ {
memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr)); memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
@ -769,13 +764,14 @@ static inline void create_wreq(struct chcr_context *ctx,
struct uld_ctx *u_ctx = ULD_CTX(ctx); struct uld_ctx *u_ctx = ULD_CTX(ctx);
unsigned int tx_channel_id, rx_channel_id; unsigned int tx_channel_id, rx_channel_id;
unsigned int txqidx = 0, rxqidx = 0; unsigned int txqidx = 0, rxqidx = 0;
unsigned int qid, fid; unsigned int qid, fid, portno;
get_qidxs(req, &txqidx, &rxqidx); get_qidxs(req, &txqidx, &rxqidx);
qid = u_ctx->lldi.rxq_ids[rxqidx]; qid = u_ctx->lldi.rxq_ids[rxqidx];
fid = u_ctx->lldi.rxq_ids[0]; fid = u_ctx->lldi.rxq_ids[0];
portno = rxqidx / ctx->rxq_perchan;
tx_channel_id = txqidx / ctx->txq_perchan; tx_channel_id = txqidx / ctx->txq_perchan;
rx_channel_id = rxqidx / ctx->rxq_perchan; rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]);
chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE; chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
@ -797,15 +793,13 @@ static inline void create_wreq(struct chcr_context *ctx,
/** /**
* create_cipher_wr - form the WR for cipher operations * create_cipher_wr - form the WR for cipher operations
* @req: cipher req. * @wrparam: Container for create_cipher_wr()'s parameters
* @ctx: crypto driver context of the request.
* @qid: ingress qid where response of this WR should be received.
* @op_type: encryption or decryption
*/ */
static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
{ {
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
struct chcr_context *ctx = c_ctx(tfm); struct chcr_context *ctx = c_ctx(tfm);
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx); struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
struct chcr_wr *chcr_req; struct chcr_wr *chcr_req;
@ -822,6 +816,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
struct adapter *adap = padap(ctx->dev); struct adapter *adap = padap(ctx->dev);
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE, nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
reqctx->dst_ofst); reqctx->dst_ofst);
dst_size = get_space_for_phys_dsgl(nents); dst_size = get_space_for_phys_dsgl(nents);
@ -1559,7 +1554,8 @@ static inline void chcr_free_shash(struct crypto_shash *base_hash)
/** /**
* create_hash_wr - Create hash work request * create_hash_wr - Create hash work request
* @req - Cipher req base * @req: Cipher req base
* @param: Container for create_hash_wr()'s parameters
*/ */
static struct sk_buff *create_hash_wr(struct ahash_request *req, static struct sk_buff *create_hash_wr(struct ahash_request *req,
struct hash_wr_param *param) struct hash_wr_param *param)
@ -1580,6 +1576,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
int error = 0; int error = 0;
unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan; unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len); transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len + req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
param->sg_len) <= SGE_MAX_WR_LEN; param->sg_len) <= SGE_MAX_WR_LEN;
@ -2438,6 +2435,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
{ {
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_context *ctx = a_ctx(tfm); struct chcr_context *ctx = a_ctx(tfm);
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@ -2457,6 +2455,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
struct adapter *adap = padap(ctx->dev); struct adapter *adap = padap(ctx->dev);
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
if (req->cryptlen == 0) if (req->cryptlen == 0)
return NULL; return NULL;
@ -2710,9 +2709,11 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
struct dsgl_walk dsgl_walk; struct dsgl_walk dsgl_walk;
unsigned int authsize = crypto_aead_authsize(tfm); unsigned int authsize = crypto_aead_authsize(tfm);
struct chcr_context *ctx = a_ctx(tfm); struct chcr_context *ctx = a_ctx(tfm);
struct uld_ctx *u_ctx = ULD_CTX(ctx);
u32 temp; u32 temp;
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
dsgl_walk_init(&dsgl_walk, phys_cpl); dsgl_walk_init(&dsgl_walk, phys_cpl);
dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma); dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
temp = req->assoclen + req->cryptlen + temp = req->assoclen + req->cryptlen +
@ -2752,9 +2753,11 @@ void chcr_add_cipher_dst_ent(struct skcipher_request *req,
struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
struct chcr_context *ctx = c_ctx(tfm); struct chcr_context *ctx = c_ctx(tfm);
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct dsgl_walk dsgl_walk; struct dsgl_walk dsgl_walk;
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
dsgl_walk_init(&dsgl_walk, phys_cpl); dsgl_walk_init(&dsgl_walk, phys_cpl);
dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes, dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
reqctx->dst_ofst); reqctx->dst_ofst);
@ -2958,6 +2961,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
{ {
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_context *ctx = a_ctx(tfm); struct chcr_context *ctx = a_ctx(tfm);
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM; unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
@ -2967,6 +2971,8 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
unsigned int tag_offset = 0, auth_offset = 0; unsigned int tag_offset = 0, auth_offset = 0;
unsigned int assoclen; unsigned int assoclen;
rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
assoclen = req->assoclen - 8; assoclen = req->assoclen - 8;
else else
@ -3127,6 +3133,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
{ {
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct chcr_context *ctx = a_ctx(tfm); struct chcr_context *ctx = a_ctx(tfm);
struct uld_ctx *u_ctx = ULD_CTX(ctx);
struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
@ -3143,6 +3150,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
struct adapter *adap = padap(ctx->dev); struct adapter *adap = padap(ctx->dev);
unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
assoclen = req->assoclen - 8; assoclen = req->assoclen - 8;

View File

@ -1,4 +1,4 @@
/** /*
* This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
* *
* Copyright (C) 2011-2016 Chelsio Communications. All rights reserved. * Copyright (C) 2011-2016 Chelsio Communications. All rights reserved.
@ -184,7 +184,7 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
struct uld_ctx *u_ctx; struct uld_ctx *u_ctx;
/* Create the device and add it in the device list */ /* Create the device and add it in the device list */
pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION); pr_info_once("%s\n", DRV_DESC);
if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE))
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
@ -309,4 +309,3 @@ module_exit(chcr_crypto_exit);
MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards."); MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards.");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Chelsio Communications"); MODULE_AUTHOR("Chelsio Communications");
MODULE_VERSION(DRV_VERSION);

View File

@ -44,7 +44,6 @@
#include "cxgb4_uld.h" #include "cxgb4_uld.h"
#define DRV_MODULE_NAME "chcr" #define DRV_MODULE_NAME "chcr"
#define DRV_VERSION "1.0.0.0-ko"
#define DRV_DESC "Chelsio T6 Crypto Co-processor Driver" #define DRV_DESC "Chelsio T6 Crypto Co-processor Driver"
#define MAX_PENDING_REQ_TO_HW 20 #define MAX_PENDING_REQ_TO_HW 20

View File

@ -21,7 +21,7 @@
/* Static structures */ /* Static structures */
static void __iomem *_iobase; static void __iomem *_iobase;
static spinlock_t lock; static DEFINE_SPINLOCK(lock);
/* Write a 128 bit field (either a writable key or IV) */ /* Write a 128 bit field (either a writable key or IV) */
static inline void static inline void
@ -383,8 +383,6 @@ static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto erequest; goto erequest;
} }
spin_lock_init(&lock);
/* Clear any pending activity */ /* Clear any pending activity */
iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG); iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);

View File

@ -68,6 +68,8 @@ config CRYPTO_DEV_HISI_HPRE
select CRYPTO_DEV_HISI_QM select CRYPTO_DEV_HISI_QM
select CRYPTO_DH select CRYPTO_DH
select CRYPTO_RSA select CRYPTO_RSA
select CRYPTO_CURVE25519
select CRYPTO_ECDH
help help
Support for HiSilicon HPRE(High Performance RSA Engine) Support for HiSilicon HPRE(High Performance RSA Engine)
accelerator, which can accelerate RSA and DH algorithms. accelerator, which can accelerate RSA and DH algorithms.

View File

@ -10,6 +10,14 @@
#define HPRE_PF_DEF_Q_NUM 64 #define HPRE_PF_DEF_Q_NUM 64
#define HPRE_PF_DEF_Q_BASE 0 #define HPRE_PF_DEF_Q_BASE 0
/*
* type used in qm sqc DW6.
* 0 - Algorithm which has been supported in V2, like RSA, DH and so on;
* 1 - ECC algorithm in V3.
*/
#define HPRE_V2_ALG_TYPE 0
#define HPRE_V3_ECC_ALG_TYPE 1
enum { enum {
HPRE_CLUSTER0, HPRE_CLUSTER0,
HPRE_CLUSTER1, HPRE_CLUSTER1,
@ -18,7 +26,6 @@ enum {
}; };
enum hpre_ctrl_dbgfs_file { enum hpre_ctrl_dbgfs_file {
HPRE_CURRENT_QM,
HPRE_CLEAR_ENABLE, HPRE_CLEAR_ENABLE,
HPRE_CLUSTER_CTRL, HPRE_CLUSTER_CTRL,
HPRE_DEBUG_FILE_NUM, HPRE_DEBUG_FILE_NUM,
@ -75,6 +82,9 @@ enum hpre_alg_type {
HPRE_ALG_KG_CRT = 0x3, HPRE_ALG_KG_CRT = 0x3,
HPRE_ALG_DH_G2 = 0x4, HPRE_ALG_DH_G2 = 0x4,
HPRE_ALG_DH = 0x5, HPRE_ALG_DH = 0x5,
HPRE_ALG_ECC_MUL = 0xD,
/* shared by x25519 and x448, but x448 is not supported now */
HPRE_ALG_CURVE25519_MUL = 0x10,
}; };
struct hpre_sqe { struct hpre_sqe {
@ -92,8 +102,8 @@ struct hpre_sqe {
__le32 rsvd1[_HPRE_SQE_ALIGN_EXT]; __le32 rsvd1[_HPRE_SQE_ALIGN_EXT];
}; };
struct hisi_qp *hpre_create_qp(void); struct hisi_qp *hpre_create_qp(u8 type);
int hpre_algs_register(void); int hpre_algs_register(struct hisi_qm *qm);
void hpre_algs_unregister(void); void hpre_algs_unregister(struct hisi_qm *qm);
#endif #endif

File diff suppressed because it is too large Load Diff

View File

@ -13,7 +13,6 @@
#include <linux/uacce.h> #include <linux/uacce.h>
#include "hpre.h" #include "hpre.h"
#define HPRE_QUEUE_NUM_V2 1024
#define HPRE_QM_ABNML_INT_MASK 0x100004 #define HPRE_QM_ABNML_INT_MASK 0x100004
#define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0) #define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0)
#define HPRE_COMM_CNT_CLR_CE 0x0 #define HPRE_COMM_CNT_CLR_CE 0x0
@ -119,7 +118,6 @@ static struct hisi_qm_list hpre_devices = {
}; };
static const char * const hpre_debug_file_name[] = { static const char * const hpre_debug_file_name[] = {
[HPRE_CURRENT_QM] = "current_qm",
[HPRE_CLEAR_ENABLE] = "rdclr_en", [HPRE_CLEAR_ENABLE] = "rdclr_en",
[HPRE_CLUSTER_CTRL] = "cluster_ctrl", [HPRE_CLUSTER_CTRL] = "cluster_ctrl",
}; };
@ -226,41 +224,44 @@ static u32 vfs_num;
module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
struct hisi_qp *hpre_create_qp(void) struct hisi_qp *hpre_create_qp(u8 type)
{ {
int node = cpu_to_node(smp_processor_id()); int node = cpu_to_node(smp_processor_id());
struct hisi_qp *qp = NULL; struct hisi_qp *qp = NULL;
int ret; int ret;
ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, 0, node, &qp); if (type != HPRE_V2_ALG_TYPE && type != HPRE_V3_ECC_ALG_TYPE)
return NULL;
/*
* type: 0 - RSA/DH. algorithm supported in V2,
* 1 - ECC algorithm in V3.
*/
ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, type, node, &qp);
if (!ret) if (!ret)
return qp; return qp;
return NULL; return NULL;
} }
static void hpre_pasid_enable(struct hisi_qm *qm) static void hpre_config_pasid(struct hisi_qm *qm)
{ {
u32 val; u32 val1, val2;
val = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG); if (qm->ver >= QM_HW_V3)
val |= BIT(HPRE_PASID_EN_BIT); return;
writel_relaxed(val, qm->io_base + HPRE_DATA_RUSER_CFG);
val = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
val |= BIT(HPRE_PASID_EN_BIT);
writel_relaxed(val, qm->io_base + HPRE_DATA_WUSER_CFG);
}
static void hpre_pasid_disable(struct hisi_qm *qm) val1 = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG);
{ val2 = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
u32 val; if (qm->use_sva) {
val1 |= BIT(HPRE_PASID_EN_BIT);
val = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG); val2 |= BIT(HPRE_PASID_EN_BIT);
val &= ~BIT(HPRE_PASID_EN_BIT); } else {
writel_relaxed(val, qm->io_base + HPRE_DATA_RUSER_CFG); val1 &= ~BIT(HPRE_PASID_EN_BIT);
val = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG); val2 &= ~BIT(HPRE_PASID_EN_BIT);
val &= ~BIT(HPRE_PASID_EN_BIT); }
writel_relaxed(val, qm->io_base + HPRE_DATA_WUSER_CFG); writel_relaxed(val1, qm->io_base + HPRE_DATA_RUSER_CFG);
writel_relaxed(val2, qm->io_base + HPRE_DATA_WUSER_CFG);
} }
static int hpre_cfg_by_dsm(struct hisi_qm *qm) static int hpre_cfg_by_dsm(struct hisi_qm *qm)
@ -320,7 +321,7 @@ static int hpre_set_cluster(struct hisi_qm *qm)
} }
/* /*
* For Kunpeng 920, we shoul disable FLR triggered by hardware (BME/PM/SRIOV). * For Kunpeng 920, we should disable FLR triggered by hardware (BME/PM/SRIOV).
* Or it may stay in D3 state when we bind and unbind hpre quickly, * Or it may stay in D3 state when we bind and unbind hpre quickly,
* as it does FLR triggered by hardware. * as it does FLR triggered by hardware.
*/ */
@ -383,15 +384,14 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
if (qm->ver == QM_HW_V2) { if (qm->ver == QM_HW_V2) {
ret = hpre_cfg_by_dsm(qm); ret = hpre_cfg_by_dsm(qm);
if (ret) if (ret)
dev_err(dev, "acpi_evaluate_dsm err.\n"); return ret;
disable_flr_of_bme(qm); disable_flr_of_bme(qm);
/* Enable data buffer pasid */
if (qm->use_sva)
hpre_pasid_enable(qm);
} }
/* Config data buffer pasid needed by Kunpeng 920 */
hpre_config_pasid(qm);
return ret; return ret;
} }
@ -401,10 +401,6 @@ static void hpre_cnt_regs_clear(struct hisi_qm *qm)
unsigned long offset; unsigned long offset;
int i; int i;
/* clear current_qm */
writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
/* clear clusterX/cluster_ctrl */ /* clear clusterX/cluster_ctrl */
for (i = 0; i < clusters_num; i++) { for (i = 0; i < clusters_num; i++) {
offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL; offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
@ -456,49 +452,6 @@ static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
return &hpre->qm; return &hpre->qm;
} }
static u32 hpre_current_qm_read(struct hpre_debugfs_file *file)
{
struct hisi_qm *qm = hpre_file_to_qm(file);
return readl(qm->io_base + QM_DFX_MB_CNT_VF);
}
static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val)
{
struct hisi_qm *qm = hpre_file_to_qm(file);
u32 num_vfs = qm->vfs_num;
u32 vfq_num, tmp;
if (val > num_vfs)
return -EINVAL;
/* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
if (val == 0) {
qm->debug.curr_qm_qp_num = qm->qp_num;
} else {
vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
if (val == num_vfs) {
qm->debug.curr_qm_qp_num =
qm->ctrl_qp_num - qm->qp_num - (num_vfs - 1) * vfq_num;
} else {
qm->debug.curr_qm_qp_num = vfq_num;
}
}
writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
tmp = val |
(readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
tmp = val |
(readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
return 0;
}
static u32 hpre_clear_enable_read(struct hpre_debugfs_file *file) static u32 hpre_clear_enable_read(struct hpre_debugfs_file *file)
{ {
struct hisi_qm *qm = hpre_file_to_qm(file); struct hisi_qm *qm = hpre_file_to_qm(file);
@ -519,7 +472,7 @@ static int hpre_clear_enable_write(struct hpre_debugfs_file *file, u32 val)
~HPRE_CTRL_CNT_CLR_CE_BIT) | val; ~HPRE_CTRL_CNT_CLR_CE_BIT) | val;
writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE); writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
return 0; return 0;
} }
static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file) static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file)
@ -541,7 +494,7 @@ static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY); writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY);
return 0; return 0;
} }
static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf, static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
@ -554,9 +507,6 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
spin_lock_irq(&file->lock); spin_lock_irq(&file->lock);
switch (file->type) { switch (file->type) {
case HPRE_CURRENT_QM:
val = hpre_current_qm_read(file);
break;
case HPRE_CLEAR_ENABLE: case HPRE_CLEAR_ENABLE:
val = hpre_clear_enable_read(file); val = hpre_clear_enable_read(file);
break; break;
@ -597,11 +547,6 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
spin_lock_irq(&file->lock); spin_lock_irq(&file->lock);
switch (file->type) { switch (file->type) {
case HPRE_CURRENT_QM:
ret = hpre_current_qm_write(file, val);
if (ret)
goto err_input;
break;
case HPRE_CLEAR_ENABLE: case HPRE_CLEAR_ENABLE:
ret = hpre_clear_enable_write(file, val); ret = hpre_clear_enable_write(file, val);
if (ret) if (ret)
@ -740,11 +685,6 @@ static int hpre_ctrl_debug_init(struct hisi_qm *qm)
{ {
int ret; int ret;
ret = hpre_create_debugfs_file(qm, NULL, HPRE_CURRENT_QM,
HPRE_CURRENT_QM);
if (ret)
return ret;
ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE, ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE,
HPRE_CLEAR_ENABLE); HPRE_CLEAR_ENABLE);
if (ret) if (ret)
@ -812,9 +752,9 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
} }
if (pdev->revision >= QM_HW_V3) if (pdev->revision >= QM_HW_V3)
qm->algs = "rsa\ndh\necdh\nx25519\nx448\necdsa\nsm2\n"; qm->algs = "rsa\ndh\necdh\nx25519\nx448\necdsa\nsm2";
else else
qm->algs = "rsa\ndh\n"; qm->algs = "rsa\ndh";
qm->mode = uacce_mode; qm->mode = uacce_mode;
qm->pdev = pdev; qm->pdev = pdev;
qm->ver = pdev->revision; qm->ver = pdev->revision;
@ -867,6 +807,20 @@ static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB)); HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
} }
static void hpre_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
err_info->ce = QM_BASE_CE;
err_info->fe = 0;
err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR |
HPRE_OOO_ECC_2BIT_ERR;
err_info->dev_ce_mask = HPRE_HAC_RAS_CE_ENABLE;
err_info->msi_wr_port = HPRE_WR_MSI_PORT;
err_info->acpi_rst = "HRST";
err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT;
}
static const struct hisi_qm_err_ini hpre_err_ini = { static const struct hisi_qm_err_ini hpre_err_ini = {
.hw_init = hpre_set_user_domain_and_cache, .hw_init = hpre_set_user_domain_and_cache,
.hw_err_enable = hpre_hw_error_enable, .hw_err_enable = hpre_hw_error_enable,
@ -875,16 +829,7 @@ static const struct hisi_qm_err_ini hpre_err_ini = {
.clear_dev_hw_err_status = hpre_clear_hw_err_status, .clear_dev_hw_err_status = hpre_clear_hw_err_status,
.log_dev_hw_err = hpre_log_hw_error, .log_dev_hw_err = hpre_log_hw_error,
.open_axi_master_ooo = hpre_open_axi_master_ooo, .open_axi_master_ooo = hpre_open_axi_master_ooo,
.err_info = { .err_info_init = hpre_err_info_init,
.ce = QM_BASE_CE,
.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT,
.fe = 0,
.ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR |
HPRE_OOO_ECC_2BIT_ERR,
.dev_ce_mask = HPRE_HAC_RAS_CE_ENABLE,
.msi_wr_port = HPRE_WR_MSI_PORT,
.acpi_rst = "HRST",
}
}; };
static int hpre_pf_probe_init(struct hpre *hpre) static int hpre_pf_probe_init(struct hpre *hpre)
@ -892,13 +837,12 @@ static int hpre_pf_probe_init(struct hpre *hpre)
struct hisi_qm *qm = &hpre->qm; struct hisi_qm *qm = &hpre->qm;
int ret; int ret;
qm->ctrl_qp_num = HPRE_QUEUE_NUM_V2;
ret = hpre_set_user_domain_and_cache(qm); ret = hpre_set_user_domain_and_cache(qm);
if (ret) if (ret)
return ret; return ret;
qm->err_ini = &hpre_err_ini; qm->err_ini = &hpre_err_ini;
qm->err_ini->err_info_init(qm);
hisi_qm_dev_err_init(qm); hisi_qm_dev_err_init(qm);
return 0; return 0;
@ -1006,8 +950,6 @@ static void hpre_remove(struct pci_dev *pdev)
hisi_qm_stop(qm, QM_NORMAL); hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF) { if (qm->fun_type == QM_HW_PF) {
if (qm->use_sva && qm->ver == QM_HW_V2)
hpre_pasid_disable(qm);
hpre_cnt_regs_clear(qm); hpre_cnt_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0; qm->debug.curr_qm_qp_num = 0;
hisi_qm_dev_err_uninit(qm); hisi_qm_dev_err_uninit(qm);
@ -1016,7 +958,6 @@ static void hpre_remove(struct pci_dev *pdev)
hisi_qm_uninit(qm); hisi_qm_uninit(qm);
} }
static const struct pci_error_handlers hpre_err_handler = { static const struct pci_error_handlers hpre_err_handler = {
.error_detected = hisi_qm_dev_err_detected, .error_detected = hisi_qm_dev_err_detected,
.slot_reset = hisi_qm_dev_slot_reset, .slot_reset = hisi_qm_dev_slot_reset,
@ -1075,4 +1016,5 @@ module_exit(hpre_exit);
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>"); MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
MODULE_AUTHOR("Meng Yu <yumeng18@huawei.com>");
MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator"); MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator");

Some files were not shown because too many files have changed in this diff Show More