diff --git a/Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml b/Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml index 1d48ac712b23..a410d2cedde6 100644 --- a/Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml +++ b/Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml @@ -14,6 +14,7 @@ properties: enum: - ti,j721e-sa2ul - ti,am654-sa2ul + - ti,am64-sa2ul reg: maxItems: 1 @@ -45,6 +46,18 @@ properties: description: Address translation for the possible RNG child node for SA2UL + clocks: + items: + - description: Clock used by PKA + - description: Main Input Clock + - description: Clock used by rng + + clock-names: + items: + - const: pka_in_clk + - const: x1_clk + - const: x2_clk + patternProperties: "^rng@[a-f0-9]+$": type: object @@ -57,7 +70,16 @@ required: - power-domains - dmas - dma-names - - dma-coherent + +if: + properties: + compatible: + enum: + - ti,j721e-sa2ul + - ti,am654-sa2ul +then: + required: + - dma-coherent additionalProperties: false diff --git a/Documentation/devicetree/bindings/rng/brcm,bcm2835.yaml b/Documentation/devicetree/bindings/rng/brcm,bcm2835.yaml index c147900f9041..6da674666d45 100644 --- a/Documentation/devicetree/bindings/rng/brcm,bcm2835.yaml +++ b/Documentation/devicetree/bindings/rng/brcm,bcm2835.yaml @@ -28,6 +28,12 @@ properties: clock-names: const: ipsec + resets: + maxItems: 1 + + reset-names: + const: ipsec + interrupts: maxItems: 1 @@ -35,6 +41,18 @@ required: - compatible - reg +if: + properties: + compatible: + enum: + - brcm,bcm6368-rng +then: + required: + - clocks + - clock-names + - resets + - reset-names + additionalProperties: false examples: @@ -58,4 +76,7 @@ examples: clocks = <&periph_clk 18>; clock-names = "ipsec"; + + resets = <&periph_rst 4>; + reset-names = "ipsec"; }; diff --git a/arch/arm/crypto/aes-cipher-core.S b/arch/arm/crypto/aes-cipher-core.S index 472e56d09eea..1da3f41359aa 100644 --- a/arch/arm/crypto/aes-cipher-core.S +++ b/arch/arm/crypto/aes-cipher-core.S @@ -99,28 +99,6 @@ __hround \out2, \out3, \in2, \in1, \in0, \in3, \in1, \in0, 0, \sz, \op, \oldcpsr .endm - .macro __rev, out, in - .if __LINUX_ARM_ARCH__ < 6 - lsl t0, \in, #24 - and t1, \in, #0xff00 - and t2, \in, #0xff0000 - orr \out, t0, \in, lsr #24 - orr \out, \out, t1, lsl #8 - orr \out, \out, t2, lsr #8 - .else - rev \out, \in - .endif - .endm - - .macro __adrl, out, sym, c - .if __LINUX_ARM_ARCH__ < 7 - ldr\c \out, =\sym - .else - movw\c \out, #:lower16:\sym - movt\c \out, #:upper16:\sym - .endif - .endm - .macro do_crypt, round, ttab, ltab, bsz push {r3-r11, lr} @@ -133,10 +111,10 @@ ldr r7, [in, #12] #ifdef CONFIG_CPU_BIG_ENDIAN - __rev r4, r4 - __rev r5, r5 - __rev r6, r6 - __rev r7, r7 + rev_l r4, t0 + rev_l r5, t0 + rev_l r6, t0 + rev_l r7, t0 #endif eor r4, r4, r8 @@ -144,7 +122,7 @@ eor r6, r6, r10 eor r7, r7, r11 - __adrl ttab, \ttab + mov_l ttab, \ttab /* * Disable interrupts and prefetch the 1024-byte 'ft' or 'it' table into * L1 cache, assuming cacheline size >= 32. This is a hardening measure @@ -180,7 +158,7 @@ 2: .ifb \ltab add ttab, ttab, #1 .else - __adrl ttab, \ltab + mov_l ttab, \ltab // Prefetch inverse S-box for final round; see explanation above .set i, 0 .rept 256 / 64 @@ -194,10 +172,10 @@ \round r4, r5, r6, r7, r8, r9, r10, r11, \bsz, b, rounds #ifdef CONFIG_CPU_BIG_ENDIAN - __rev r4, r4 - __rev r5, r5 - __rev r6, r6 - __rev r7, r7 + rev_l r4, t0 + rev_l r5, t0 + rev_l r6, t0 + rev_l r7, t0 #endif ldr out, [sp] diff --git a/arch/arm/crypto/blake2b-neon-glue.c b/arch/arm/crypto/blake2b-neon-glue.c index 34d73200e7fa..4b59d027ba4a 100644 --- a/arch/arm/crypto/blake2b-neon-glue.c +++ b/arch/arm/crypto/blake2b-neon-glue.c @@ -85,8 +85,8 @@ static int __init blake2b_neon_mod_init(void) static void __exit blake2b_neon_mod_exit(void) { - return crypto_unregister_shashes(blake2b_neon_algs, - ARRAY_SIZE(blake2b_neon_algs)); + crypto_unregister_shashes(blake2b_neon_algs, + ARRAY_SIZE(blake2b_neon_algs)); } module_init(blake2b_neon_mod_init); diff --git a/arch/arm/crypto/blake2s-core.S b/arch/arm/crypto/blake2s-core.S index bed897e9a181..86345751bbf3 100644 --- a/arch/arm/crypto/blake2s-core.S +++ b/arch/arm/crypto/blake2s-core.S @@ -8,6 +8,7 @@ */ #include +#include // Registers used to hold message words temporarily. There aren't // enough ARM registers to hold the whole message block, so we have to @@ -38,6 +39,23 @@ #endif .endm +.macro _le32_bswap a, tmp +#ifdef __ARMEB__ + rev_l \a, \tmp +#endif +.endm + +.macro _le32_bswap_8x a, b, c, d, e, f, g, h, tmp + _le32_bswap \a, \tmp + _le32_bswap \b, \tmp + _le32_bswap \c, \tmp + _le32_bswap \d, \tmp + _le32_bswap \e, \tmp + _le32_bswap \f, \tmp + _le32_bswap \g, \tmp + _le32_bswap \h, \tmp +.endm + // Execute a quarter-round of BLAKE2s by mixing two columns or two diagonals. // (a0, b0, c0, d0) and (a1, b1, c1, d1) give the registers containing the two // columns/diagonals. s0-s1 are the word offsets to the message words the first @@ -180,8 +198,10 @@ ENTRY(blake2s_compress_arch) tst r1, #3 bne .Lcopy_block_misaligned ldmia r1!, {r2-r9} + _le32_bswap_8x r2, r3, r4, r5, r6, r7, r8, r9, r14 stmia r12!, {r2-r9} ldmia r1!, {r2-r9} + _le32_bswap_8x r2, r3, r4, r5, r6, r7, r8, r9, r14 stmia r12, {r2-r9} .Lcopy_block_done: str r1, [sp, #68] // Update message pointer @@ -268,6 +288,7 @@ ENTRY(blake2s_compress_arch) 1: #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS ldr r3, [r1], #4 + _le32_bswap r3, r4 #else ldrb r3, [r1, #0] ldrb r4, [r1, #1] diff --git a/arch/arm/crypto/chacha-scalar-core.S b/arch/arm/crypto/chacha-scalar-core.S index 2985b80a45b5..083fe1ab96d0 100644 --- a/arch/arm/crypto/chacha-scalar-core.S +++ b/arch/arm/crypto/chacha-scalar-core.S @@ -41,32 +41,15 @@ X14 .req r12 X15 .req r14 -.macro __rev out, in, t0, t1, t2 -.if __LINUX_ARM_ARCH__ >= 6 - rev \out, \in -.else - lsl \t0, \in, #24 - and \t1, \in, #0xff00 - and \t2, \in, #0xff0000 - orr \out, \t0, \in, lsr #24 - orr \out, \out, \t1, lsl #8 - orr \out, \out, \t2, lsr #8 -.endif -.endm - -.macro _le32_bswap x, t0, t1, t2 +.macro _le32_bswap_4x a, b, c, d, tmp #ifdef __ARMEB__ - __rev \x, \x, \t0, \t1, \t2 + rev_l \a, \tmp + rev_l \b, \tmp + rev_l \c, \tmp + rev_l \d, \tmp #endif .endm -.macro _le32_bswap_4x a, b, c, d, t0, t1, t2 - _le32_bswap \a, \t0, \t1, \t2 - _le32_bswap \b, \t0, \t1, \t2 - _le32_bswap \c, \t0, \t1, \t2 - _le32_bswap \d, \t0, \t1, \t2 -.endm - .macro __ldrd a, b, src, offset #if __LINUX_ARM_ARCH__ >= 6 ldrd \a, \b, [\src, #\offset] @@ -200,7 +183,7 @@ add X1, X1, r9 add X2, X2, r10 add X3, X3, r11 - _le32_bswap_4x X0, X1, X2, X3, r8, r9, r10 + _le32_bswap_4x X0, X1, X2, X3, r8 ldmia r12!, {r8-r11} eor X0, X0, r8 eor X1, X1, r9 @@ -216,7 +199,7 @@ ldmia r12!, {X0-X3} add X6, r10, X6, ror #brot add X7, r11, X7, ror #brot - _le32_bswap_4x X4, X5, X6, X7, r8, r9, r10 + _le32_bswap_4x X4, X5, X6, X7, r8 eor X4, X4, X0 eor X5, X5, X1 eor X6, X6, X2 @@ -231,7 +214,7 @@ add r1, r1, r9 // x9 add r6, r6, r10 // x10 add r7, r7, r11 // x11 - _le32_bswap_4x r0, r1, r6, r7, r8, r9, r10 + _le32_bswap_4x r0, r1, r6, r7, r8 ldmia r12!, {r8-r11} eor r0, r0, r8 // x8 eor r1, r1, r9 // x9 @@ -245,7 +228,7 @@ add r3, r9, r3, ror #drot // x13 add r4, r10, r4, ror #drot // x14 add r5, r11, r5, ror #drot // x15 - _le32_bswap_4x r2, r3, r4, r5, r9, r10, r11 + _le32_bswap_4x r2, r3, r4, r5, r9 ldr r9, [sp, #72] // load LEN eor r2, r2, r0 // x12 eor r3, r3, r1 // x13 @@ -301,7 +284,7 @@ add X1, X1, r9 add X2, X2, r10 add X3, X3, r11 - _le32_bswap_4x X0, X1, X2, X3, r8, r9, r10 + _le32_bswap_4x X0, X1, X2, X3, r8 stmia r14!, {X0-X3} // Save keystream for x4-x7 @@ -311,7 +294,7 @@ add X5, r9, X5, ror #brot add X6, r10, X6, ror #brot add X7, r11, X7, ror #brot - _le32_bswap_4x X4, X5, X6, X7, r8, r9, r10 + _le32_bswap_4x X4, X5, X6, X7, r8 add r8, sp, #64 stmia r14!, {X4-X7} @@ -323,7 +306,7 @@ add r1, r1, r9 // x9 add r6, r6, r10 // x10 add r7, r7, r11 // x11 - _le32_bswap_4x r0, r1, r6, r7, r8, r9, r10 + _le32_bswap_4x r0, r1, r6, r7, r8 stmia r14!, {r0,r1,r6,r7} __ldrd r8, r9, sp, 144 __ldrd r10, r11, sp, 152 @@ -331,7 +314,7 @@ add r3, r9, r3, ror #drot // x13 add r4, r10, r4, ror #drot // x14 add r5, r11, r5, ror #drot // x15 - _le32_bswap_4x r2, r3, r4, r5, r9, r10, r11 + _le32_bswap_4x r2, r3, r4, r5, r9 stmia r14, {r2-r5} // Stack: ks0-ks15 unused0-unused7 x0-x15 OUT IN LEN diff --git a/arch/arm/crypto/curve25519-core.S b/arch/arm/crypto/curve25519-core.S index be18af52e7dc..b697fa5d059a 100644 --- a/arch/arm/crypto/curve25519-core.S +++ b/arch/arm/crypto/curve25519-core.S @@ -10,8 +10,8 @@ #include .text -.fpu neon .arch armv7-a +.fpu neon .align 4 ENTRY(curve25519_neon) diff --git a/arch/arm/crypto/poly1305-glue.c b/arch/arm/crypto/poly1305-glue.c index 3023c1acfa19..c31bd8f7c092 100644 --- a/arch/arm/crypto/poly1305-glue.c +++ b/arch/arm/crypto/poly1305-glue.c @@ -29,7 +29,7 @@ void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit) static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key) +void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE]) { poly1305_init_arm(&dctx->h, key); dctx->s[0] = get_unaligned_le32(key + 16); diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S index bbdb54702aa7..247011356d11 100644 --- a/arch/arm64/crypto/aes-modes.S +++ b/arch/arm64/crypto/aes-modes.S @@ -359,6 +359,7 @@ ST5( mov v4.16b, vctr.16b ) ins vctr.d[0], x8 /* apply carry to N counter blocks for N := x12 */ + cbz x12, 2f adr x16, 1f sub x16, x16, x12, lsl #3 br x16 diff --git a/arch/arm64/crypto/poly1305-glue.c b/arch/arm64/crypto/poly1305-glue.c index 683de671741a..9c3d86e397bf 100644 --- a/arch/arm64/crypto/poly1305-glue.c +++ b/arch/arm64/crypto/poly1305-glue.c @@ -25,7 +25,7 @@ asmlinkage void poly1305_emit(void *state, u8 *digest, const u32 *nonce); static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key) +void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE]) { poly1305_init_arm64(&dctx->h, key); dctx->s[0] = get_unaligned_le32(key + 16); diff --git a/arch/mips/crypto/poly1305-glue.c b/arch/mips/crypto/poly1305-glue.c index fc881b46d911..bc6110fb98e0 100644 --- a/arch/mips/crypto/poly1305-glue.c +++ b/arch/mips/crypto/poly1305-glue.c @@ -17,7 +17,7 @@ asmlinkage void poly1305_init_mips(void *state, const u8 *key); asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit); asmlinkage void poly1305_emit_mips(void *state, u8 *digest, const u32 *nonce); -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key) +void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE]) { poly1305_init_mips(&dctx->h, key); dctx->s[0] = get_unaligned_le32(key + 16); diff --git a/arch/powerpc/crypto/sha1-spe-glue.c b/arch/powerpc/crypto/sha1-spe-glue.c index b1e577cbf00c..88e8ea73bfa7 100644 --- a/arch/powerpc/crypto/sha1-spe-glue.c +++ b/arch/powerpc/crypto/sha1-spe-glue.c @@ -107,7 +107,7 @@ static int ppc_spe_sha1_update(struct shash_desc *desc, const u8 *data, src += bytes; len -= bytes; - }; + } memcpy((char *)sctx->buffer, src, len); return 0; diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c index 646da46e8d10..1dfb8af48a3c 100644 --- a/arch/x86/crypto/poly1305_glue.c +++ b/arch/x86/crypto/poly1305_glue.c @@ -16,7 +16,7 @@ #include asmlinkage void poly1305_init_x86_64(void *ctx, - const u8 key[POLY1305_KEY_SIZE]); + const u8 key[POLY1305_BLOCK_SIZE]); asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp, const size_t len, const u32 padbit); asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[POLY1305_DIGEST_SIZE], @@ -81,7 +81,7 @@ static void convert_to_base2_64(void *ctx) state->is_base2_26 = 0; } -static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_KEY_SIZE]) +static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_BLOCK_SIZE]) { poly1305_init_x86_64(ctx, key); } @@ -129,7 +129,7 @@ static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE], poly1305_emit_avx(ctx, mac, nonce); } -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key) +void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE]) { poly1305_simd_init(&dctx->h, key); dctx->s[0] = get_unaligned_le32(&key[16]); diff --git a/crypto/Kconfig b/crypto/Kconfig index 5809cc198fa7..ca3b02dcbbfa 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -242,6 +242,16 @@ config CRYPTO_ECDH help Generic implementation of the ECDH algorithm +config CRYPTO_ECDSA + tristate "ECDSA (NIST P192, P256 etc.) algorithm" + select CRYPTO_ECC + select CRYPTO_AKCIPHER + select ASN1 + help + Elliptic Curve Digital Signature Algorithm (NIST P192, P256 etc.) + is A NIST cryptographic standard algorithm. Only signature verification + is implemented. + config CRYPTO_ECRDSA tristate "EC-RDSA (GOST 34.10) algorithm" select CRYPTO_ECC @@ -1213,7 +1223,6 @@ config CRYPTO_BLOWFISH_X86_64 config CRYPTO_CAMELLIA tristate "Camellia cipher algorithms" - depends on CRYPTO select CRYPTO_ALGAPI help Camellia cipher algorithms module. @@ -1229,7 +1238,6 @@ config CRYPTO_CAMELLIA config CRYPTO_CAMELLIA_X86_64 tristate "Camellia cipher algorithm (x86_64)" depends on X86 && 64BIT - depends on CRYPTO select CRYPTO_SKCIPHER imply CRYPTO_CTR help @@ -1246,7 +1254,6 @@ config CRYPTO_CAMELLIA_X86_64 config CRYPTO_CAMELLIA_AESNI_AVX_X86_64 tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX)" depends on X86 && 64BIT - depends on CRYPTO select CRYPTO_SKCIPHER select CRYPTO_CAMELLIA_X86_64 select CRYPTO_SIMD @@ -1265,7 +1272,6 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64 config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX2)" depends on X86 && 64BIT - depends on CRYPTO select CRYPTO_CAMELLIA_AESNI_AVX_X86_64 help Camellia cipher algorithm module (x86_64/AES-NI/AVX2). @@ -1281,7 +1287,6 @@ config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 config CRYPTO_CAMELLIA_SPARC64 tristate "Camellia cipher algorithm (SPARC64)" depends on SPARC64 - depends on CRYPTO select CRYPTO_ALGAPI select CRYPTO_SKCIPHER help diff --git a/crypto/Makefile b/crypto/Makefile index cf23affb1678..10526d4559b8 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -50,6 +50,12 @@ sm2_generic-y += sm2.o obj-$(CONFIG_CRYPTO_SM2) += sm2_generic.o +$(obj)/ecdsasignature.asn1.o: $(obj)/ecdsasignature.asn1.c $(obj)/ecdsasignature.asn1.h +$(obj)/ecdsa.o: $(obj)/ecdsasignature.asn1.h +ecdsa_generic-y += ecdsa.o +ecdsa_generic-y += ecdsasignature.asn1.o +obj-$(CONFIG_CRYPTO_ECDSA) += ecdsa_generic.o + crypto_acompress-y := acompress.o crypto_acompress-y += scompress.o obj-$(CONFIG_CRYPTO_ACOMP2) += crypto_acompress.o diff --git a/crypto/aegis.h b/crypto/aegis.h index 6920ebe77679..6ef9c174c973 100644 --- a/crypto/aegis.h +++ b/crypto/aegis.h @@ -21,9 +21,28 @@ union aegis_block { u8 bytes[AEGIS_BLOCK_SIZE]; }; +struct aegis_state; + +extern int aegis128_have_aes_insn; + #define AEGIS_BLOCK_ALIGN (__alignof__(union aegis_block)) #define AEGIS_ALIGNED(p) IS_ALIGNED((uintptr_t)p, AEGIS_BLOCK_ALIGN) +bool crypto_aegis128_have_simd(void); +void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg); +void crypto_aegis128_init_simd(struct aegis_state *state, + const union aegis_block *key, + const u8 *iv); +void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size); +void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size); +int crypto_aegis128_final_simd(struct aegis_state *state, + union aegis_block *tag_xor, + unsigned int assoclen, + unsigned int cryptlen, + unsigned int authsize); + static __always_inline void crypto_aegis_block_xor(union aegis_block *dst, const union aegis_block *src) { diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c index 89dc1c559689..c4f1bfa1d04f 100644 --- a/crypto/aegis128-core.c +++ b/crypto/aegis128-core.c @@ -58,21 +58,6 @@ static bool aegis128_do_simd(void) return false; } -bool crypto_aegis128_have_simd(void); -void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg); -void crypto_aegis128_init_simd(struct aegis_state *state, - const union aegis_block *key, - const u8 *iv); -void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size); -void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size); -int crypto_aegis128_final_simd(struct aegis_state *state, - union aegis_block *tag_xor, - unsigned int assoclen, - unsigned int cryptlen, - unsigned int authsize); - static void crypto_aegis128_update(struct aegis_state *state) { union aegis_block tmp; diff --git a/crypto/aegis128-neon.c b/crypto/aegis128-neon.c index 94d591a002a4..a7856915ec85 100644 --- a/crypto/aegis128-neon.c +++ b/crypto/aegis128-neon.c @@ -30,7 +30,7 @@ bool crypto_aegis128_have_simd(void) return IS_ENABLED(CONFIG_ARM64); } -void crypto_aegis128_init_simd(union aegis_block *state, +void crypto_aegis128_init_simd(struct aegis_state *state, const union aegis_block *key, const u8 *iv) { @@ -39,14 +39,14 @@ void crypto_aegis128_init_simd(union aegis_block *state, kernel_neon_end(); } -void crypto_aegis128_update_simd(union aegis_block *state, const void *msg) +void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg) { kernel_neon_begin(); crypto_aegis128_update_neon(state, msg); kernel_neon_end(); } -void crypto_aegis128_encrypt_chunk_simd(union aegis_block *state, u8 *dst, +void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst, const u8 *src, unsigned int size) { kernel_neon_begin(); @@ -54,7 +54,7 @@ void crypto_aegis128_encrypt_chunk_simd(union aegis_block *state, u8 *dst, kernel_neon_end(); } -void crypto_aegis128_decrypt_chunk_simd(union aegis_block *state, u8 *dst, +void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst, const u8 *src, unsigned int size) { kernel_neon_begin(); @@ -62,7 +62,7 @@ void crypto_aegis128_decrypt_chunk_simd(union aegis_block *state, u8 *dst, kernel_neon_end(); } -int crypto_aegis128_final_simd(union aegis_block *state, +int crypto_aegis128_final_simd(struct aegis_state *state, union aegis_block *tag_xor, unsigned int assoclen, unsigned int cryptlen, diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 9acb9d2c4bcf..18cc82dc4a42 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -491,8 +491,8 @@ static int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con) /** * af_alg_alloc_tsgl - allocate the TX SGL * - * @sk socket of connection to user space - * @return: 0 upon success, < 0 upon error + * @sk: socket of connection to user space + * Return: 0 upon success, < 0 upon error */ static int af_alg_alloc_tsgl(struct sock *sk) { @@ -525,15 +525,15 @@ static int af_alg_alloc_tsgl(struct sock *sk) } /** - * aead_count_tsgl - Count number of TX SG entries + * af_alg_count_tsgl - Count number of TX SG entries * * The counting starts from the beginning of the SGL to @bytes. If - * an offset is provided, the counting of the SG entries starts at the offset. + * an @offset is provided, the counting of the SG entries starts at the @offset. * - * @sk socket of connection to user space - * @bytes Count the number of SG entries holding given number of bytes. - * @offset Start the counting of SG entries from the given offset. - * @return Number of TX SG entries found given the constraints + * @sk: socket of connection to user space + * @bytes: Count the number of SG entries holding given number of bytes. + * @offset: Start the counting of SG entries from the given offset. + * Return: Number of TX SG entries found given the constraints */ unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset) { @@ -577,19 +577,19 @@ unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset) EXPORT_SYMBOL_GPL(af_alg_count_tsgl); /** - * aead_pull_tsgl - Release the specified buffers from TX SGL + * af_alg_pull_tsgl - Release the specified buffers from TX SGL * - * If @dst is non-null, reassign the pages to dst. The caller must release + * If @dst is non-null, reassign the pages to @dst. The caller must release * the pages. If @dst_offset is given only reassign the pages to @dst starting * at the @dst_offset (byte). The caller must ensure that @dst is large * enough (e.g. by using af_alg_count_tsgl with the same offset). * - * @sk socket of connection to user space - * @used Number of bytes to pull from TX SGL - * @dst If non-NULL, buffer is reassigned to dst SGL instead of releasing. The - * caller must release the buffers in dst. - * @dst_offset Reassign the TX SGL from given offset. All buffers before - * reaching the offset is released. + * @sk: socket of connection to user space + * @used: Number of bytes to pull from TX SGL + * @dst: If non-NULL, buffer is reassigned to dst SGL instead of releasing. The + * caller must release the buffers in dst. + * @dst_offset: Reassign the TX SGL from given offset. All buffers before + * reaching the offset is released. */ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, size_t dst_offset) @@ -657,7 +657,7 @@ EXPORT_SYMBOL_GPL(af_alg_pull_tsgl); /** * af_alg_free_areq_sgls - Release TX and RX SGLs of the request * - * @areq Request holding the TX and RX SGL + * @areq: Request holding the TX and RX SGL */ static void af_alg_free_areq_sgls(struct af_alg_async_req *areq) { @@ -692,9 +692,9 @@ static void af_alg_free_areq_sgls(struct af_alg_async_req *areq) /** * af_alg_wait_for_wmem - wait for availability of writable memory * - * @sk socket of connection to user space - * @flags If MSG_DONTWAIT is set, then only report if function would sleep - * @return 0 when writable memory is available, < 0 upon error + * @sk: socket of connection to user space + * @flags: If MSG_DONTWAIT is set, then only report if function would sleep + * Return: 0 when writable memory is available, < 0 upon error */ static int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags) { @@ -725,7 +725,7 @@ static int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags) /** * af_alg_wmem_wakeup - wakeup caller when writable memory is available * - * @sk socket of connection to user space + * @sk: socket of connection to user space */ void af_alg_wmem_wakeup(struct sock *sk) { @@ -748,10 +748,10 @@ EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup); /** * af_alg_wait_for_data - wait for availability of TX data * - * @sk socket of connection to user space - * @flags If MSG_DONTWAIT is set, then only report if function would sleep - * @min Set to minimum request size if partial requests are allowed. - * @return 0 when writable memory is available, < 0 upon error + * @sk: socket of connection to user space + * @flags: If MSG_DONTWAIT is set, then only report if function would sleep + * @min: Set to minimum request size if partial requests are allowed. + * Return: 0 when writable memory is available, < 0 upon error */ int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min) { @@ -790,7 +790,7 @@ EXPORT_SYMBOL_GPL(af_alg_wait_for_data); /** * af_alg_data_wakeup - wakeup caller when new data can be sent to kernel * - * @sk socket of connection to user space + * @sk: socket of connection to user space */ static void af_alg_data_wakeup(struct sock *sk) { @@ -820,12 +820,12 @@ static void af_alg_data_wakeup(struct sock *sk) * * In addition, the ctx is filled with the information sent via CMSG. * - * @sock socket of connection to user space - * @msg message from user space - * @size size of message from user space - * @ivsize the size of the IV for the cipher operation to verify that the + * @sock: socket of connection to user space + * @msg: message from user space + * @size: size of message from user space + * @ivsize: the size of the IV for the cipher operation to verify that the * user-space-provided IV has the right size - * @return the number of copied data upon success, < 0 upon error + * Return: the number of copied data upon success, < 0 upon error */ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, unsigned int ivsize) @@ -977,6 +977,11 @@ EXPORT_SYMBOL_GPL(af_alg_sendmsg); /** * af_alg_sendpage - sendpage system call handler + * @sock: socket of connection to user space to write to + * @page: data to send + * @offset: offset into page to begin sending + * @size: length of data + * @flags: message send/receive flags * * This is a generic implementation of sendpage to fill ctx->tsgl_list. */ @@ -1035,6 +1040,7 @@ EXPORT_SYMBOL_GPL(af_alg_sendpage); /** * af_alg_free_resources - release resources required for crypto request + * @areq: Request holding the TX and RX SGL */ void af_alg_free_resources(struct af_alg_async_req *areq) { @@ -1047,6 +1053,9 @@ EXPORT_SYMBOL_GPL(af_alg_free_resources); /** * af_alg_async_cb - AIO callback handler + * @_req: async request info + * @err: if non-zero, error result to be returned via ki_complete(); + * otherwise return the AIO output length via ki_complete(). * * This handler cleans up the struct af_alg_async_req upon completion of the * AIO operation. @@ -1073,6 +1082,9 @@ EXPORT_SYMBOL_GPL(af_alg_async_cb); /** * af_alg_poll - poll system call handler + * @file: file pointer + * @sock: socket to poll + * @wait: poll_table */ __poll_t af_alg_poll(struct file *file, struct socket *sock, poll_table *wait) @@ -1098,9 +1110,9 @@ EXPORT_SYMBOL_GPL(af_alg_poll); /** * af_alg_alloc_areq - allocate struct af_alg_async_req * - * @sk socket of connection to user space - * @areqlen size of struct af_alg_async_req + crypto_*_reqsize - * @return allocated data structure or ERR_PTR upon error + * @sk: socket of connection to user space + * @areqlen: size of struct af_alg_async_req + crypto_*_reqsize + * Return: allocated data structure or ERR_PTR upon error */ struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, unsigned int areqlen) @@ -1125,13 +1137,13 @@ EXPORT_SYMBOL_GPL(af_alg_alloc_areq); * af_alg_get_rsgl - create the RX SGL for the output data from the crypto * operation * - * @sk socket of connection to user space - * @msg user space message - * @flags flags used to invoke recvmsg with - * @areq instance of the cryptographic request that will hold the RX SGL - * @maxsize maximum number of bytes to be pulled from user space - * @outlen number of bytes in the RX SGL - * @return 0 on success, < 0 upon error + * @sk: socket of connection to user space + * @msg: user space message + * @flags: flags used to invoke recvmsg with + * @areq: instance of the cryptographic request that will hold the RX SGL + * @maxsize: maximum number of bytes to be pulled from user space + * @outlen: number of bytes in the RX SGL + * Return: 0 on success, < 0 upon error */ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, struct af_alg_async_req *areq, size_t maxsize, diff --git a/crypto/api.c b/crypto/api.c index ed08cbd5b9d3..c4eda56cff89 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -562,7 +562,7 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm) { struct crypto_alg *alg; - if (unlikely(!mem)) + if (IS_ERR_OR_NULL(mem)) return; alg = tfm->__crt_alg; diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index 788a4ba1e2e7..4fefb219bfdc 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -85,7 +86,8 @@ int software_key_determine_akcipher(const char *encoding, return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0; } - if (strcmp(encoding, "raw") == 0) { + if (strcmp(encoding, "raw") == 0 || + strcmp(encoding, "x962") == 0) { strcpy(alg_name, pkey->pkey_algo); return 0; } diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index 52c9b455fc7d..6d003096b5bc 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -227,6 +227,26 @@ int x509_note_pkey_algo(void *context, size_t hdrlen, ctx->cert->sig->hash_algo = "sha224"; goto rsa_pkcs1; + case OID_id_ecdsa_with_sha1: + ctx->cert->sig->hash_algo = "sha1"; + goto ecdsa; + + case OID_id_ecdsa_with_sha224: + ctx->cert->sig->hash_algo = "sha224"; + goto ecdsa; + + case OID_id_ecdsa_with_sha256: + ctx->cert->sig->hash_algo = "sha256"; + goto ecdsa; + + case OID_id_ecdsa_with_sha384: + ctx->cert->sig->hash_algo = "sha384"; + goto ecdsa; + + case OID_id_ecdsa_with_sha512: + ctx->cert->sig->hash_algo = "sha512"; + goto ecdsa; + case OID_gost2012Signature256: ctx->cert->sig->hash_algo = "streebog256"; goto ecrdsa; @@ -255,6 +275,11 @@ int x509_note_pkey_algo(void *context, size_t hdrlen, ctx->cert->sig->encoding = "raw"; ctx->algo_oid = ctx->last_oid; return 0; +ecdsa: + ctx->cert->sig->pkey_algo = "ecdsa"; + ctx->cert->sig->encoding = "x962"; + ctx->algo_oid = ctx->last_oid; + return 0; } /* @@ -276,7 +301,8 @@ int x509_note_signature(void *context, size_t hdrlen, if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0 || strcmp(ctx->cert->sig->pkey_algo, "ecrdsa") == 0 || - strcmp(ctx->cert->sig->pkey_algo, "sm2") == 0) { + strcmp(ctx->cert->sig->pkey_algo, "sm2") == 0 || + strcmp(ctx->cert->sig->pkey_algo, "ecdsa") == 0) { /* Discard the BIT STRING metadata */ if (vlen < 1 || *(const u8 *)value != 0) return -EBADMSG; @@ -459,6 +485,7 @@ int x509_extract_key_data(void *context, size_t hdrlen, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; + enum OID oid; ctx->key_algo = ctx->last_oid; switch (ctx->last_oid) { @@ -470,7 +497,25 @@ int x509_extract_key_data(void *context, size_t hdrlen, ctx->cert->pub->pkey_algo = "ecrdsa"; break; case OID_id_ecPublicKey: - ctx->cert->pub->pkey_algo = "sm2"; + if (parse_OID(ctx->params, ctx->params_size, &oid) != 0) + return -EBADMSG; + + switch (oid) { + case OID_sm2: + ctx->cert->pub->pkey_algo = "sm2"; + break; + case OID_id_prime192v1: + ctx->cert->pub->pkey_algo = "ecdsa-nist-p192"; + break; + case OID_id_prime256v1: + ctx->cert->pub->pkey_algo = "ecdsa-nist-p256"; + break; + case OID_id_ansip384r1: + ctx->cert->pub->pkey_algo = "ecdsa-nist-p384"; + break; + default: + return -ENOPKG; + } break; default: return -ENOPKG; diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c index ae450eb8be14..3d45161b271a 100644 --- a/crypto/asymmetric_keys/x509_public_key.c +++ b/crypto/asymmetric_keys/x509_public_key.c @@ -129,7 +129,9 @@ int x509_check_for_self_signed(struct x509_certificate *cert) } ret = -EKEYREJECTED; - if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0) + if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0 && + (strncmp(cert->pub->pkey_algo, "ecdsa-", 6) != 0 || + strcmp(cert->sig->pkey_algo, "ecdsa") != 0)) goto out; ret = public_key_verify_signature(cert->pub, cert->sig); diff --git a/crypto/crc32_generic.c b/crypto/crc32_generic.c index 0e103fb5dd77..a989cb44fd16 100644 --- a/crypto/crc32_generic.c +++ b/crypto/crc32_generic.c @@ -1,26 +1,4 @@ -/* GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see http://www.gnu.org/licenses - * - * Please visit http://www.xyratex.com/contact if you need additional - * information or have any questions. - * - * GPL HEADER END - */ - +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2012 Xyratex Technology Limited */ diff --git a/crypto/ecc.c b/crypto/ecc.c index c80aa25994a0..afc6cefdc1d9 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -24,6 +24,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#include #include #include #include @@ -42,7 +43,14 @@ typedef struct { u64 m_high; } uint128_t; -static inline const struct ecc_curve *ecc_get_curve(unsigned int curve_id) +/* Returns curv25519 curve param */ +const struct ecc_curve *ecc_get_curve25519(void) +{ + return &ecc_25519; +} +EXPORT_SYMBOL(ecc_get_curve25519); + +const struct ecc_curve *ecc_get_curve(unsigned int curve_id) { switch (curve_id) { /* In FIPS mode only allow P256 and higher */ @@ -50,10 +58,13 @@ static inline const struct ecc_curve *ecc_get_curve(unsigned int curve_id) return fips_enabled ? NULL : &nist_p192; case ECC_CURVE_NIST_P256: return &nist_p256; + case ECC_CURVE_NIST_P384: + return &nist_p384; default: return NULL; } } +EXPORT_SYMBOL(ecc_get_curve); static u64 *ecc_alloc_digits_space(unsigned int ndigits) { @@ -128,7 +139,7 @@ bool vli_is_zero(const u64 *vli, unsigned int ndigits) } EXPORT_SYMBOL(vli_is_zero); -/* Returns nonzero if bit bit of vli is set. */ +/* Returns nonzero if bit of vli is set. */ static u64 vli_test_bit(const u64 *vli, unsigned int bit) { return (vli[bit / 64] & ((u64)1 << (bit % 64))); @@ -775,18 +786,133 @@ static void vli_mmod_fast_256(u64 *result, const u64 *product, } } +#define SL32OR32(x32, y32) (((u64)x32 << 32) | y32) +#define AND64H(x64) (x64 & 0xffFFffFF00000000ull) +#define AND64L(x64) (x64 & 0x00000000ffFFffFFull) + +/* Computes result = product % curve_prime + * from "Mathematical routines for the NIST prime elliptic curves" + */ +static void vli_mmod_fast_384(u64 *result, const u64 *product, + const u64 *curve_prime, u64 *tmp) +{ + int carry; + const unsigned int ndigits = 6; + + /* t */ + vli_set(result, product, ndigits); + + /* s1 */ + tmp[0] = 0; // 0 || 0 + tmp[1] = 0; // 0 || 0 + tmp[2] = SL32OR32(product[11], (product[10]>>32)); //a22||a21 + tmp[3] = product[11]>>32; // 0 ||a23 + tmp[4] = 0; // 0 || 0 + tmp[5] = 0; // 0 || 0 + carry = vli_lshift(tmp, tmp, 1, ndigits); + carry += vli_add(result, result, tmp, ndigits); + + /* s2 */ + tmp[0] = product[6]; //a13||a12 + tmp[1] = product[7]; //a15||a14 + tmp[2] = product[8]; //a17||a16 + tmp[3] = product[9]; //a19||a18 + tmp[4] = product[10]; //a21||a20 + tmp[5] = product[11]; //a23||a22 + carry += vli_add(result, result, tmp, ndigits); + + /* s3 */ + tmp[0] = SL32OR32(product[11], (product[10]>>32)); //a22||a21 + tmp[1] = SL32OR32(product[6], (product[11]>>32)); //a12||a23 + tmp[2] = SL32OR32(product[7], (product[6])>>32); //a14||a13 + tmp[3] = SL32OR32(product[8], (product[7]>>32)); //a16||a15 + tmp[4] = SL32OR32(product[9], (product[8]>>32)); //a18||a17 + tmp[5] = SL32OR32(product[10], (product[9]>>32)); //a20||a19 + carry += vli_add(result, result, tmp, ndigits); + + /* s4 */ + tmp[0] = AND64H(product[11]); //a23|| 0 + tmp[1] = (product[10]<<32); //a20|| 0 + tmp[2] = product[6]; //a13||a12 + tmp[3] = product[7]; //a15||a14 + tmp[4] = product[8]; //a17||a16 + tmp[5] = product[9]; //a19||a18 + carry += vli_add(result, result, tmp, ndigits); + + /* s5 */ + tmp[0] = 0; // 0|| 0 + tmp[1] = 0; // 0|| 0 + tmp[2] = product[10]; //a21||a20 + tmp[3] = product[11]; //a23||a22 + tmp[4] = 0; // 0|| 0 + tmp[5] = 0; // 0|| 0 + carry += vli_add(result, result, tmp, ndigits); + + /* s6 */ + tmp[0] = AND64L(product[10]); // 0 ||a20 + tmp[1] = AND64H(product[10]); //a21|| 0 + tmp[2] = product[11]; //a23||a22 + tmp[3] = 0; // 0 || 0 + tmp[4] = 0; // 0 || 0 + tmp[5] = 0; // 0 || 0 + carry += vli_add(result, result, tmp, ndigits); + + /* d1 */ + tmp[0] = SL32OR32(product[6], (product[11]>>32)); //a12||a23 + tmp[1] = SL32OR32(product[7], (product[6]>>32)); //a14||a13 + tmp[2] = SL32OR32(product[8], (product[7]>>32)); //a16||a15 + tmp[3] = SL32OR32(product[9], (product[8]>>32)); //a18||a17 + tmp[4] = SL32OR32(product[10], (product[9]>>32)); //a20||a19 + tmp[5] = SL32OR32(product[11], (product[10]>>32)); //a22||a21 + carry -= vli_sub(result, result, tmp, ndigits); + + /* d2 */ + tmp[0] = (product[10]<<32); //a20|| 0 + tmp[1] = SL32OR32(product[11], (product[10]>>32)); //a22||a21 + tmp[2] = (product[11]>>32); // 0 ||a23 + tmp[3] = 0; // 0 || 0 + tmp[4] = 0; // 0 || 0 + tmp[5] = 0; // 0 || 0 + carry -= vli_sub(result, result, tmp, ndigits); + + /* d3 */ + tmp[0] = 0; // 0 || 0 + tmp[1] = AND64H(product[11]); //a23|| 0 + tmp[2] = product[11]>>32; // 0 ||a23 + tmp[3] = 0; // 0 || 0 + tmp[4] = 0; // 0 || 0 + tmp[5] = 0; // 0 || 0 + carry -= vli_sub(result, result, tmp, ndigits); + + if (carry < 0) { + do { + carry += vli_add(result, result, curve_prime, ndigits); + } while (carry < 0); + } else { + while (carry || vli_cmp(curve_prime, result, ndigits) != 1) + carry -= vli_sub(result, result, curve_prime, ndigits); + } + +} + +#undef SL32OR32 +#undef AND64H +#undef AND64L + /* Computes result = product % curve_prime for different curve_primes. * * Note that curve_primes are distinguished just by heuristic check and * not by complete conformance check. */ static bool vli_mmod_fast(u64 *result, u64 *product, - const u64 *curve_prime, unsigned int ndigits) + const struct ecc_curve *curve) { u64 tmp[2 * ECC_MAX_DIGITS]; + const u64 *curve_prime = curve->p; + const unsigned int ndigits = curve->g.ndigits; - /* Currently, both NIST primes have -1 in lowest qword. */ - if (curve_prime[0] != -1ull) { + /* All NIST curves have name prefix 'nist_' */ + if (strncmp(curve->name, "nist_", 5) != 0) { /* Try to handle Pseudo-Marsenne primes. */ if (curve_prime[ndigits - 1] == -1ull) { vli_mmod_special(result, product, curve_prime, @@ -809,6 +935,9 @@ static bool vli_mmod_fast(u64 *result, u64 *product, case 4: vli_mmod_fast_256(result, product, curve_prime, tmp); break; + case 6: + vli_mmod_fast_384(result, product, curve_prime, tmp); + break; default: pr_err_ratelimited("ecc: unsupported digits size!\n"); return false; @@ -832,22 +961,22 @@ EXPORT_SYMBOL(vli_mod_mult_slow); /* Computes result = (left * right) % curve_prime. */ static void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right, - const u64 *curve_prime, unsigned int ndigits) + const struct ecc_curve *curve) { u64 product[2 * ECC_MAX_DIGITS]; - vli_mult(product, left, right, ndigits); - vli_mmod_fast(result, product, curve_prime, ndigits); + vli_mult(product, left, right, curve->g.ndigits); + vli_mmod_fast(result, product, curve); } /* Computes result = left^2 % curve_prime. */ static void vli_mod_square_fast(u64 *result, const u64 *left, - const u64 *curve_prime, unsigned int ndigits) + const struct ecc_curve *curve) { u64 product[2 * ECC_MAX_DIGITS]; - vli_square(product, left, ndigits); - vli_mmod_fast(result, product, curve_prime, ndigits); + vli_square(product, left, curve->g.ndigits); + vli_mmod_fast(result, product, curve); } #define EVEN(vli) (!(vli[0] & 1)) @@ -945,25 +1074,27 @@ static bool ecc_point_is_zero(const struct ecc_point *point) /* Double in place */ static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1, - u64 *curve_prime, unsigned int ndigits) + const struct ecc_curve *curve) { /* t1 = x, t2 = y, t3 = z */ u64 t4[ECC_MAX_DIGITS]; u64 t5[ECC_MAX_DIGITS]; + const u64 *curve_prime = curve->p; + const unsigned int ndigits = curve->g.ndigits; if (vli_is_zero(z1, ndigits)) return; /* t4 = y1^2 */ - vli_mod_square_fast(t4, y1, curve_prime, ndigits); + vli_mod_square_fast(t4, y1, curve); /* t5 = x1*y1^2 = A */ - vli_mod_mult_fast(t5, x1, t4, curve_prime, ndigits); + vli_mod_mult_fast(t5, x1, t4, curve); /* t4 = y1^4 */ - vli_mod_square_fast(t4, t4, curve_prime, ndigits); + vli_mod_square_fast(t4, t4, curve); /* t2 = y1*z1 = z3 */ - vli_mod_mult_fast(y1, y1, z1, curve_prime, ndigits); + vli_mod_mult_fast(y1, y1, z1, curve); /* t3 = z1^2 */ - vli_mod_square_fast(z1, z1, curve_prime, ndigits); + vli_mod_square_fast(z1, z1, curve); /* t1 = x1 + z1^2 */ vli_mod_add(x1, x1, z1, curve_prime, ndigits); @@ -972,7 +1103,7 @@ static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1, /* t3 = x1 - z1^2 */ vli_mod_sub(z1, x1, z1, curve_prime, ndigits); /* t1 = x1^2 - z1^4 */ - vli_mod_mult_fast(x1, x1, z1, curve_prime, ndigits); + vli_mod_mult_fast(x1, x1, z1, curve); /* t3 = 2*(x1^2 - z1^4) */ vli_mod_add(z1, x1, x1, curve_prime, ndigits); @@ -989,7 +1120,7 @@ static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1, /* t1 = 3/2*(x1^2 - z1^4) = B */ /* t3 = B^2 */ - vli_mod_square_fast(z1, x1, curve_prime, ndigits); + vli_mod_square_fast(z1, x1, curve); /* t3 = B^2 - A */ vli_mod_sub(z1, z1, t5, curve_prime, ndigits); /* t3 = B^2 - 2A = x3 */ @@ -997,7 +1128,7 @@ static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1, /* t5 = A - x3 */ vli_mod_sub(t5, t5, z1, curve_prime, ndigits); /* t1 = B * (A - x3) */ - vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits); + vli_mod_mult_fast(x1, x1, t5, curve); /* t4 = B * (A - x3) - y1^4 = y3 */ vli_mod_sub(t4, x1, t4, curve_prime, ndigits); @@ -1007,23 +1138,22 @@ static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1, } /* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */ -static void apply_z(u64 *x1, u64 *y1, u64 *z, u64 *curve_prime, - unsigned int ndigits) +static void apply_z(u64 *x1, u64 *y1, u64 *z, const struct ecc_curve *curve) { u64 t1[ECC_MAX_DIGITS]; - vli_mod_square_fast(t1, z, curve_prime, ndigits); /* z^2 */ - vli_mod_mult_fast(x1, x1, t1, curve_prime, ndigits); /* x1 * z^2 */ - vli_mod_mult_fast(t1, t1, z, curve_prime, ndigits); /* z^3 */ - vli_mod_mult_fast(y1, y1, t1, curve_prime, ndigits); /* y1 * z^3 */ + vli_mod_square_fast(t1, z, curve); /* z^2 */ + vli_mod_mult_fast(x1, x1, t1, curve); /* x1 * z^2 */ + vli_mod_mult_fast(t1, t1, z, curve); /* z^3 */ + vli_mod_mult_fast(y1, y1, t1, curve); /* y1 * z^3 */ } /* P = (x1, y1) => 2P, (x2, y2) => P' */ static void xycz_initial_double(u64 *x1, u64 *y1, u64 *x2, u64 *y2, - u64 *p_initial_z, u64 *curve_prime, - unsigned int ndigits) + u64 *p_initial_z, const struct ecc_curve *curve) { u64 z[ECC_MAX_DIGITS]; + const unsigned int ndigits = curve->g.ndigits; vli_set(x2, x1, ndigits); vli_set(y2, y1, ndigits); @@ -1034,35 +1164,37 @@ static void xycz_initial_double(u64 *x1, u64 *y1, u64 *x2, u64 *y2, if (p_initial_z) vli_set(z, p_initial_z, ndigits); - apply_z(x1, y1, z, curve_prime, ndigits); + apply_z(x1, y1, z, curve); - ecc_point_double_jacobian(x1, y1, z, curve_prime, ndigits); + ecc_point_double_jacobian(x1, y1, z, curve); - apply_z(x2, y2, z, curve_prime, ndigits); + apply_z(x2, y2, z, curve); } /* Input P = (x1, y1, Z), Q = (x2, y2, Z) * Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3) * or P => P', Q => P + Q */ -static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime, - unsigned int ndigits) +static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2, + const struct ecc_curve *curve) { /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */ u64 t5[ECC_MAX_DIGITS]; + const u64 *curve_prime = curve->p; + const unsigned int ndigits = curve->g.ndigits; /* t5 = x2 - x1 */ vli_mod_sub(t5, x2, x1, curve_prime, ndigits); /* t5 = (x2 - x1)^2 = A */ - vli_mod_square_fast(t5, t5, curve_prime, ndigits); + vli_mod_square_fast(t5, t5, curve); /* t1 = x1*A = B */ - vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits); + vli_mod_mult_fast(x1, x1, t5, curve); /* t3 = x2*A = C */ - vli_mod_mult_fast(x2, x2, t5, curve_prime, ndigits); + vli_mod_mult_fast(x2, x2, t5, curve); /* t4 = y2 - y1 */ vli_mod_sub(y2, y2, y1, curve_prime, ndigits); /* t5 = (y2 - y1)^2 = D */ - vli_mod_square_fast(t5, y2, curve_prime, ndigits); + vli_mod_square_fast(t5, y2, curve); /* t5 = D - B */ vli_mod_sub(t5, t5, x1, curve_prime, ndigits); @@ -1071,11 +1203,11 @@ static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime, /* t3 = C - B */ vli_mod_sub(x2, x2, x1, curve_prime, ndigits); /* t2 = y1*(C - B) */ - vli_mod_mult_fast(y1, y1, x2, curve_prime, ndigits); + vli_mod_mult_fast(y1, y1, x2, curve); /* t3 = B - x3 */ vli_mod_sub(x2, x1, t5, curve_prime, ndigits); /* t4 = (y2 - y1)*(B - x3) */ - vli_mod_mult_fast(y2, y2, x2, curve_prime, ndigits); + vli_mod_mult_fast(y2, y2, x2, curve); /* t4 = y3 */ vli_mod_sub(y2, y2, y1, curve_prime, ndigits); @@ -1086,22 +1218,24 @@ static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime, * Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3) * or P => P - Q, Q => P + Q */ -static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime, - unsigned int ndigits) +static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, + const struct ecc_curve *curve) { /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */ u64 t5[ECC_MAX_DIGITS]; u64 t6[ECC_MAX_DIGITS]; u64 t7[ECC_MAX_DIGITS]; + const u64 *curve_prime = curve->p; + const unsigned int ndigits = curve->g.ndigits; /* t5 = x2 - x1 */ vli_mod_sub(t5, x2, x1, curve_prime, ndigits); /* t5 = (x2 - x1)^2 = A */ - vli_mod_square_fast(t5, t5, curve_prime, ndigits); + vli_mod_square_fast(t5, t5, curve); /* t1 = x1*A = B */ - vli_mod_mult_fast(x1, x1, t5, curve_prime, ndigits); + vli_mod_mult_fast(x1, x1, t5, curve); /* t3 = x2*A = C */ - vli_mod_mult_fast(x2, x2, t5, curve_prime, ndigits); + vli_mod_mult_fast(x2, x2, t5, curve); /* t4 = y2 + y1 */ vli_mod_add(t5, y2, y1, curve_prime, ndigits); /* t4 = y2 - y1 */ @@ -1110,29 +1244,29 @@ static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime, /* t6 = C - B */ vli_mod_sub(t6, x2, x1, curve_prime, ndigits); /* t2 = y1 * (C - B) */ - vli_mod_mult_fast(y1, y1, t6, curve_prime, ndigits); + vli_mod_mult_fast(y1, y1, t6, curve); /* t6 = B + C */ vli_mod_add(t6, x1, x2, curve_prime, ndigits); /* t3 = (y2 - y1)^2 */ - vli_mod_square_fast(x2, y2, curve_prime, ndigits); + vli_mod_square_fast(x2, y2, curve); /* t3 = x3 */ vli_mod_sub(x2, x2, t6, curve_prime, ndigits); /* t7 = B - x3 */ vli_mod_sub(t7, x1, x2, curve_prime, ndigits); /* t4 = (y2 - y1)*(B - x3) */ - vli_mod_mult_fast(y2, y2, t7, curve_prime, ndigits); + vli_mod_mult_fast(y2, y2, t7, curve); /* t4 = y3 */ vli_mod_sub(y2, y2, y1, curve_prime, ndigits); /* t7 = (y2 + y1)^2 = F */ - vli_mod_square_fast(t7, t5, curve_prime, ndigits); + vli_mod_square_fast(t7, t5, curve); /* t7 = x3' */ vli_mod_sub(t7, t7, t6, curve_prime, ndigits); /* t6 = x3' - B */ vli_mod_sub(t6, t7, x1, curve_prime, ndigits); /* t6 = (y2 + y1)*(x3' - B) */ - vli_mod_mult_fast(t6, t6, t5, curve_prime, ndigits); + vli_mod_mult_fast(t6, t6, t5, curve); /* t2 = y3' */ vli_mod_sub(y1, t6, y1, curve_prime, ndigits); @@ -1162,41 +1296,37 @@ static void ecc_point_mult(struct ecc_point *result, vli_set(rx[1], point->x, ndigits); vli_set(ry[1], point->y, ndigits); - xycz_initial_double(rx[1], ry[1], rx[0], ry[0], initial_z, curve_prime, - ndigits); + xycz_initial_double(rx[1], ry[1], rx[0], ry[0], initial_z, curve); for (i = num_bits - 2; i > 0; i--) { nb = !vli_test_bit(scalar, i); - xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve_prime, - ndigits); - xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve_prime, - ndigits); + xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve); + xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve); } nb = !vli_test_bit(scalar, 0); - xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve_prime, - ndigits); + xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve); /* Find final 1/Z value. */ /* X1 - X0 */ vli_mod_sub(z, rx[1], rx[0], curve_prime, ndigits); /* Yb * (X1 - X0) */ - vli_mod_mult_fast(z, z, ry[1 - nb], curve_prime, ndigits); + vli_mod_mult_fast(z, z, ry[1 - nb], curve); /* xP * Yb * (X1 - X0) */ - vli_mod_mult_fast(z, z, point->x, curve_prime, ndigits); + vli_mod_mult_fast(z, z, point->x, curve); /* 1 / (xP * Yb * (X1 - X0)) */ vli_mod_inv(z, z, curve_prime, point->ndigits); /* yP / (xP * Yb * (X1 - X0)) */ - vli_mod_mult_fast(z, z, point->y, curve_prime, ndigits); + vli_mod_mult_fast(z, z, point->y, curve); /* Xb * yP / (xP * Yb * (X1 - X0)) */ - vli_mod_mult_fast(z, z, rx[1 - nb], curve_prime, ndigits); + vli_mod_mult_fast(z, z, rx[1 - nb], curve); /* End 1/Z calculation */ - xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve_prime, ndigits); + xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve); - apply_z(rx[0], ry[0], z, curve_prime, ndigits); + apply_z(rx[0], ry[0], z, curve); vli_set(result->x, rx[0], ndigits); vli_set(result->y, ry[0], ndigits); @@ -1217,9 +1347,9 @@ static void ecc_point_add(const struct ecc_point *result, vli_mod_sub(z, result->x, p->x, curve->p, ndigits); vli_set(px, p->x, ndigits); vli_set(py, p->y, ndigits); - xycz_add(px, py, result->x, result->y, curve->p, ndigits); + xycz_add(px, py, result->x, result->y, curve); vli_mod_inv(z, z, curve->p, ndigits); - apply_z(result->x, result->y, z, curve->p, ndigits); + apply_z(result->x, result->y, z, curve); } /* Computes R = u1P + u2Q mod p using Shamir's trick. @@ -1248,8 +1378,7 @@ void ecc_point_mult_shamir(const struct ecc_point *result, points[2] = q; points[3] = ∑ - num_bits = max(vli_num_bits(u1, ndigits), - vli_num_bits(u2, ndigits)); + num_bits = max(vli_num_bits(u1, ndigits), vli_num_bits(u2, ndigits)); i = num_bits - 1; idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1); point = points[idx]; @@ -1260,7 +1389,7 @@ void ecc_point_mult_shamir(const struct ecc_point *result, z[0] = 1; for (--i; i >= 0; i--) { - ecc_point_double_jacobian(rx, ry, z, curve->p, ndigits); + ecc_point_double_jacobian(rx, ry, z, curve); idx = (!!vli_test_bit(u1, i)) | ((!!vli_test_bit(u2, i)) << 1); point = points[idx]; if (point) { @@ -1270,27 +1399,17 @@ void ecc_point_mult_shamir(const struct ecc_point *result, vli_set(tx, point->x, ndigits); vli_set(ty, point->y, ndigits); - apply_z(tx, ty, z, curve->p, ndigits); + apply_z(tx, ty, z, curve); vli_mod_sub(tz, rx, tx, curve->p, ndigits); - xycz_add(tx, ty, rx, ry, curve->p, ndigits); - vli_mod_mult_fast(z, z, tz, curve->p, ndigits); + xycz_add(tx, ty, rx, ry, curve); + vli_mod_mult_fast(z, z, tz, curve); } } vli_mod_inv(z, z, curve->p, ndigits); - apply_z(rx, ry, z, curve->p, ndigits); + apply_z(rx, ry, z, curve); } EXPORT_SYMBOL(ecc_point_mult_shamir); -static inline void ecc_swap_digits(const u64 *in, u64 *out, - unsigned int ndigits) -{ - const __be64 *src = (__force __be64 *)in; - int i; - - for (i = 0; i < ndigits; i++) - out[i] = be64_to_cpu(src[ndigits - 1 - i]); -} - static int __ecc_is_key_valid(const struct ecc_curve *curve, const u64 *private_key, unsigned int ndigits) { @@ -1441,10 +1560,10 @@ int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve, return -EINVAL; /* Check 3: Verify that y^2 == (x^3 + a·x + b) mod p */ - vli_mod_square_fast(yy, pk->y, curve->p, pk->ndigits); /* y^2 */ - vli_mod_square_fast(xxx, pk->x, curve->p, pk->ndigits); /* x^2 */ - vli_mod_mult_fast(xxx, xxx, pk->x, curve->p, pk->ndigits); /* x^3 */ - vli_mod_mult_fast(w, curve->a, pk->x, curve->p, pk->ndigits); /* a·x */ + vli_mod_square_fast(yy, pk->y, curve); /* y^2 */ + vli_mod_square_fast(xxx, pk->x, curve); /* x^2 */ + vli_mod_mult_fast(xxx, xxx, pk->x, curve); /* x^3 */ + vli_mod_mult_fast(w, curve->a, pk->x, curve); /* a·x */ vli_mod_add(w, w, curve->b, curve->p, pk->ndigits); /* a·x + b */ vli_mod_add(w, w, xxx, curve->p, pk->ndigits); /* x^3 + a·x + b */ if (vli_cmp(yy, w, pk->ndigits) != 0) /* Equation */ diff --git a/crypto/ecc.h b/crypto/ecc.h index d4e546b9ad79..a006132646a4 100644 --- a/crypto/ecc.h +++ b/crypto/ecc.h @@ -26,49 +26,34 @@ #ifndef _CRYPTO_ECC_H #define _CRYPTO_ECC_H +#include + /* One digit is u64 qword. */ #define ECC_CURVE_NIST_P192_DIGITS 3 #define ECC_CURVE_NIST_P256_DIGITS 4 -#define ECC_MAX_DIGITS (512 / 64) +#define ECC_CURVE_NIST_P384_DIGITS 6 +#define ECC_MAX_DIGITS (512 / 64) /* due to ecrdsa */ #define ECC_DIGITS_TO_BYTES_SHIFT 3 -/** - * struct ecc_point - elliptic curve point in affine coordinates - * - * @x: X coordinate in vli form. - * @y: Y coordinate in vli form. - * @ndigits: Length of vlis in u64 qwords. - */ -struct ecc_point { - u64 *x; - u64 *y; - u8 ndigits; -}; +#define ECC_MAX_BYTES (ECC_MAX_DIGITS << ECC_DIGITS_TO_BYTES_SHIFT) #define ECC_POINT_INIT(x, y, ndigits) (struct ecc_point) { x, y, ndigits } /** - * struct ecc_curve - definition of elliptic curve - * - * @name: Short name of the curve. - * @g: Generator point of the curve. - * @p: Prime number, if Barrett's reduction is used for this curve - * pre-calculated value 'mu' is appended to the @p after ndigits. - * Use of Barrett's reduction is heuristically determined in - * vli_mmod_fast(). - * @n: Order of the curve group. - * @a: Curve parameter a. - * @b: Curve parameter b. + * ecc_swap_digits() - Copy ndigits from big endian array to native array + * @in: Input array + * @out: Output array + * @ndigits: Number of digits to copy */ -struct ecc_curve { - char *name; - struct ecc_point g; - u64 *p; - u64 *n; - u64 *a; - u64 *b; -}; +static inline void ecc_swap_digits(const u64 *in, u64 *out, unsigned int ndigits) +{ + const __be64 *src = (__force __be64 *)in; + int i; + + for (i = 0; i < ndigits; i++) + out[i] = be64_to_cpu(src[ndigits - 1 - i]); +} /** * ecc_is_key_valid() - Validate a given ECDH private key diff --git a/crypto/ecc_curve_defs.h b/crypto/ecc_curve_defs.h index 69be6c7d228f..9719934c9428 100644 --- a/crypto/ecc_curve_defs.h +++ b/crypto/ecc_curve_defs.h @@ -54,4 +54,53 @@ static struct ecc_curve nist_p256 = { .b = nist_p256_b }; +/* NIST P-384 */ +static u64 nist_p384_g_x[] = { 0x3A545E3872760AB7ull, 0x5502F25DBF55296Cull, + 0x59F741E082542A38ull, 0x6E1D3B628BA79B98ull, + 0x8Eb1C71EF320AD74ull, 0xAA87CA22BE8B0537ull }; +static u64 nist_p384_g_y[] = { 0x7A431D7C90EA0E5Full, 0x0A60B1CE1D7E819Dull, + 0xE9DA3113B5F0B8C0ull, 0xF8F41DBD289A147Cull, + 0x5D9E98BF9292DC29ull, 0x3617DE4A96262C6Full }; +static u64 nist_p384_p[] = { 0x00000000FFFFFFFFull, 0xFFFFFFFF00000000ull, + 0xFFFFFFFFFFFFFFFEull, 0xFFFFFFFFFFFFFFFFull, + 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull }; +static u64 nist_p384_n[] = { 0xECEC196ACCC52973ull, 0x581A0DB248B0A77Aull, + 0xC7634D81F4372DDFull, 0xFFFFFFFFFFFFFFFFull, + 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull }; +static u64 nist_p384_a[] = { 0x00000000FFFFFFFCull, 0xFFFFFFFF00000000ull, + 0xFFFFFFFFFFFFFFFEull, 0xFFFFFFFFFFFFFFFFull, + 0xFFFFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull }; +static u64 nist_p384_b[] = { 0x2a85c8edd3ec2aefull, 0xc656398d8a2ed19dull, + 0x0314088f5013875aull, 0x181d9c6efe814112ull, + 0x988e056be3f82d19ull, 0xb3312fa7e23ee7e4ull }; +static struct ecc_curve nist_p384 = { + .name = "nist_384", + .g = { + .x = nist_p384_g_x, + .y = nist_p384_g_y, + .ndigits = 6, + }, + .p = nist_p384_p, + .n = nist_p384_n, + .a = nist_p384_a, + .b = nist_p384_b +}; + +/* curve25519 */ +static u64 curve25519_g_x[] = { 0x0000000000000009, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000 }; +static u64 curve25519_p[] = { 0xffffffffffffffed, 0xffffffffffffffff, + 0xffffffffffffffff, 0x7fffffffffffffff }; +static u64 curve25519_a[] = { 0x000000000001DB41, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000 }; +static const struct ecc_curve ecc_25519 = { + .name = "curve25519", + .g = { + .x = curve25519_g_x, + .ndigits = 4, + }, + .p = curve25519_p, + .a = curve25519_a, +}; + #endif diff --git a/crypto/ecdh.c b/crypto/ecdh.c index 96f80c8f8e30..04a427b8c956 100644 --- a/crypto/ecdh.c +++ b/crypto/ecdh.c @@ -23,33 +23,16 @@ static inline struct ecdh_ctx *ecdh_get_ctx(struct crypto_kpp *tfm) return kpp_tfm_ctx(tfm); } -static unsigned int ecdh_supported_curve(unsigned int curve_id) -{ - switch (curve_id) { - case ECC_CURVE_NIST_P192: return ECC_CURVE_NIST_P192_DIGITS; - case ECC_CURVE_NIST_P256: return ECC_CURVE_NIST_P256_DIGITS; - default: return 0; - } -} - static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, unsigned int len) { struct ecdh_ctx *ctx = ecdh_get_ctx(tfm); struct ecdh params; - unsigned int ndigits; if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0 || - params.key_size > sizeof(ctx->private_key)) + params.key_size > sizeof(u64) * ctx->ndigits) return -EINVAL; - ndigits = ecdh_supported_curve(params.curve_id); - if (!ndigits) - return -EINVAL; - - ctx->curve_id = params.curve_id; - ctx->ndigits = ndigits; - if (!params.key || !params.key_size) return ecc_gen_privkey(ctx->curve_id, ctx->ndigits, ctx->private_key); @@ -140,13 +123,24 @@ static unsigned int ecdh_max_size(struct crypto_kpp *tfm) return ctx->ndigits << (ECC_DIGITS_TO_BYTES_SHIFT + 1); } -static struct kpp_alg ecdh = { +static int ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm) +{ + struct ecdh_ctx *ctx = ecdh_get_ctx(tfm); + + ctx->curve_id = ECC_CURVE_NIST_P192; + ctx->ndigits = ECC_CURVE_NIST_P192_DIGITS; + + return 0; +} + +static struct kpp_alg ecdh_nist_p192 = { .set_secret = ecdh_set_secret, .generate_public_key = ecdh_compute_value, .compute_shared_secret = ecdh_compute_value, .max_size = ecdh_max_size, + .init = ecdh_nist_p192_init_tfm, .base = { - .cra_name = "ecdh", + .cra_name = "ecdh-nist-p192", .cra_driver_name = "ecdh-generic", .cra_priority = 100, .cra_module = THIS_MODULE, @@ -154,14 +148,48 @@ static struct kpp_alg ecdh = { }, }; +static int ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm) +{ + struct ecdh_ctx *ctx = ecdh_get_ctx(tfm); + + ctx->curve_id = ECC_CURVE_NIST_P256; + ctx->ndigits = ECC_CURVE_NIST_P256_DIGITS; + + return 0; +} + +static struct kpp_alg ecdh_nist_p256 = { + .set_secret = ecdh_set_secret, + .generate_public_key = ecdh_compute_value, + .compute_shared_secret = ecdh_compute_value, + .max_size = ecdh_max_size, + .init = ecdh_nist_p256_init_tfm, + .base = { + .cra_name = "ecdh-nist-p256", + .cra_driver_name = "ecdh-generic", + .cra_priority = 100, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct ecdh_ctx), + }, +}; + +static bool ecdh_nist_p192_registered; + static int ecdh_init(void) { - return crypto_register_kpp(&ecdh); + int ret; + + ret = crypto_register_kpp(&ecdh_nist_p192); + ecdh_nist_p192_registered = ret == 0; + + return crypto_register_kpp(&ecdh_nist_p256); } static void ecdh_exit(void) { - crypto_unregister_kpp(&ecdh); + if (ecdh_nist_p192_registered) + crypto_unregister_kpp(&ecdh_nist_p192); + crypto_unregister_kpp(&ecdh_nist_p256); } subsys_initcall(ecdh_init); diff --git a/crypto/ecdh_helper.c b/crypto/ecdh_helper.c index fca63b559f65..f18f9028f912 100644 --- a/crypto/ecdh_helper.c +++ b/crypto/ecdh_helper.c @@ -10,7 +10,7 @@ #include #include -#define ECDH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 2 * sizeof(short)) +#define ECDH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + sizeof(short)) static inline u8 *ecdh_pack_data(void *dst, const void *src, size_t sz) { @@ -46,7 +46,6 @@ int crypto_ecdh_encode_key(char *buf, unsigned int len, return -EINVAL; ptr = ecdh_pack_data(ptr, &secret, sizeof(secret)); - ptr = ecdh_pack_data(ptr, ¶ms->curve_id, sizeof(params->curve_id)); ptr = ecdh_pack_data(ptr, ¶ms->key_size, sizeof(params->key_size)); ecdh_pack_data(ptr, params->key, params->key_size); @@ -70,7 +69,6 @@ int crypto_ecdh_decode_key(const char *buf, unsigned int len, if (unlikely(len < secret.len)) return -EINVAL; - ptr = ecdh_unpack_data(¶ms->curve_id, ptr, sizeof(params->curve_id)); ptr = ecdh_unpack_data(¶ms->key_size, ptr, sizeof(params->key_size)); if (secret.len != crypto_ecdh_key_len(params)) return -EINVAL; diff --git a/crypto/ecdsa.c b/crypto/ecdsa.c new file mode 100644 index 000000000000..1e7b15009bf6 --- /dev/null +++ b/crypto/ecdsa.c @@ -0,0 +1,376 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2021 IBM Corporation + */ + +#include +#include +#include +#include +#include +#include + +#include "ecc.h" +#include "ecdsasignature.asn1.h" + +struct ecc_ctx { + unsigned int curve_id; + const struct ecc_curve *curve; + + bool pub_key_set; + u64 x[ECC_MAX_DIGITS]; /* pub key x and y coordinates */ + u64 y[ECC_MAX_DIGITS]; + struct ecc_point pub_key; +}; + +struct ecdsa_signature_ctx { + const struct ecc_curve *curve; + u64 r[ECC_MAX_DIGITS]; + u64 s[ECC_MAX_DIGITS]; +}; + +/* + * Get the r and s components of a signature from the X509 certificate. + */ +static int ecdsa_get_signature_rs(u64 *dest, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen, unsigned int ndigits) +{ + size_t keylen = ndigits * sizeof(u64); + ssize_t diff = vlen - keylen; + const char *d = value; + u8 rs[ECC_MAX_BYTES]; + + if (!value || !vlen) + return -EINVAL; + + /* diff = 0: 'value' has exacly the right size + * diff > 0: 'value' has too many bytes; one leading zero is allowed that + * makes the value a positive integer; error on more + * diff < 0: 'value' is missing leading zeros, which we add + */ + if (diff > 0) { + /* skip over leading zeros that make 'value' a positive int */ + if (*d == 0) { + vlen -= 1; + diff--; + d++; + } + if (diff) + return -EINVAL; + } + if (-diff >= keylen) + return -EINVAL; + + if (diff) { + /* leading zeros not given in 'value' */ + memset(rs, 0, -diff); + } + + memcpy(&rs[-diff], d, vlen); + + ecc_swap_digits((u64 *)rs, dest, ndigits); + + return 0; +} + +int ecdsa_get_signature_r(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct ecdsa_signature_ctx *sig = context; + + return ecdsa_get_signature_rs(sig->r, hdrlen, tag, value, vlen, + sig->curve->g.ndigits); +} + +int ecdsa_get_signature_s(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct ecdsa_signature_ctx *sig = context; + + return ecdsa_get_signature_rs(sig->s, hdrlen, tag, value, vlen, + sig->curve->g.ndigits); +} + +static int _ecdsa_verify(struct ecc_ctx *ctx, const u64 *hash, const u64 *r, const u64 *s) +{ + const struct ecc_curve *curve = ctx->curve; + unsigned int ndigits = curve->g.ndigits; + u64 s1[ECC_MAX_DIGITS]; + u64 u1[ECC_MAX_DIGITS]; + u64 u2[ECC_MAX_DIGITS]; + u64 x1[ECC_MAX_DIGITS]; + u64 y1[ECC_MAX_DIGITS]; + struct ecc_point res = ECC_POINT_INIT(x1, y1, ndigits); + + /* 0 < r < n and 0 < s < n */ + if (vli_is_zero(r, ndigits) || vli_cmp(r, curve->n, ndigits) >= 0 || + vli_is_zero(s, ndigits) || vli_cmp(s, curve->n, ndigits) >= 0) + return -EBADMSG; + + /* hash is given */ + pr_devel("hash : %016llx %016llx ... %016llx\n", + hash[ndigits - 1], hash[ndigits - 2], hash[0]); + + /* s1 = (s^-1) mod n */ + vli_mod_inv(s1, s, curve->n, ndigits); + /* u1 = (hash * s1) mod n */ + vli_mod_mult_slow(u1, hash, s1, curve->n, ndigits); + /* u2 = (r * s1) mod n */ + vli_mod_mult_slow(u2, r, s1, curve->n, ndigits); + /* res = u1*G + u2 * pub_key */ + ecc_point_mult_shamir(&res, u1, &curve->g, u2, &ctx->pub_key, curve); + + /* res.x = res.x mod n (if res.x > order) */ + if (unlikely(vli_cmp(res.x, curve->n, ndigits) == 1)) + /* faster alternative for NIST p384, p256 & p192 */ + vli_sub(res.x, res.x, curve->n, ndigits); + + if (!vli_cmp(res.x, r, ndigits)) + return 0; + + return -EKEYREJECTED; +} + +/* + * Verify an ECDSA signature. + */ +static int ecdsa_verify(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + size_t keylen = ctx->curve->g.ndigits * sizeof(u64); + struct ecdsa_signature_ctx sig_ctx = { + .curve = ctx->curve, + }; + u8 rawhash[ECC_MAX_BYTES]; + u64 hash[ECC_MAX_DIGITS]; + unsigned char *buffer; + ssize_t diff; + int ret; + + if (unlikely(!ctx->pub_key_set)) + return -EINVAL; + + buffer = kmalloc(req->src_len + req->dst_len, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + sg_pcopy_to_buffer(req->src, + sg_nents_for_len(req->src, req->src_len + req->dst_len), + buffer, req->src_len + req->dst_len, 0); + + ret = asn1_ber_decoder(&ecdsasignature_decoder, &sig_ctx, + buffer, req->src_len); + if (ret < 0) + goto error; + + /* if the hash is shorter then we will add leading zeros to fit to ndigits */ + diff = keylen - req->dst_len; + if (diff >= 0) { + if (diff) + memset(rawhash, 0, diff); + memcpy(&rawhash[diff], buffer + req->src_len, req->dst_len); + } else if (diff < 0) { + /* given hash is longer, we take the left-most bytes */ + memcpy(&rawhash, buffer + req->src_len, keylen); + } + + ecc_swap_digits((u64 *)rawhash, hash, ctx->curve->g.ndigits); + + ret = _ecdsa_verify(ctx, hash, sig_ctx.r, sig_ctx.s); + +error: + kfree(buffer); + + return ret; +} + +static int ecdsa_ecc_ctx_init(struct ecc_ctx *ctx, unsigned int curve_id) +{ + ctx->curve_id = curve_id; + ctx->curve = ecc_get_curve(curve_id); + if (!ctx->curve) + return -EINVAL; + + return 0; +} + + +static void ecdsa_ecc_ctx_deinit(struct ecc_ctx *ctx) +{ + ctx->pub_key_set = false; +} + +static int ecdsa_ecc_ctx_reset(struct ecc_ctx *ctx) +{ + unsigned int curve_id = ctx->curve_id; + int ret; + + ecdsa_ecc_ctx_deinit(ctx); + ret = ecdsa_ecc_ctx_init(ctx, curve_id); + if (ret == 0) + ctx->pub_key = ECC_POINT_INIT(ctx->x, ctx->y, + ctx->curve->g.ndigits); + return ret; +} + +/* + * Set the public key given the raw uncompressed key data from an X509 + * certificate. The key data contain the concatenated X and Y coordinates of + * the public key. + */ +static int ecdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) +{ + struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + const unsigned char *d = key; + const u64 *digits = (const u64 *)&d[1]; + unsigned int ndigits; + int ret; + + ret = ecdsa_ecc_ctx_reset(ctx); + if (ret < 0) + return ret; + + if (keylen < 1 || (((keylen - 1) >> 1) % sizeof(u64)) != 0) + return -EINVAL; + /* we only accept uncompressed format indicated by '4' */ + if (d[0] != 4) + return -EINVAL; + + keylen--; + ndigits = (keylen >> 1) / sizeof(u64); + if (ndigits != ctx->curve->g.ndigits) + return -EINVAL; + + ecc_swap_digits(digits, ctx->pub_key.x, ndigits); + ecc_swap_digits(&digits[ndigits], ctx->pub_key.y, ndigits); + ret = ecc_is_pubkey_valid_full(ctx->curve, &ctx->pub_key); + + ctx->pub_key_set = ret == 0; + + return ret; +} + +static void ecdsa_exit_tfm(struct crypto_akcipher *tfm) +{ + struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + + ecdsa_ecc_ctx_deinit(ctx); +} + +static unsigned int ecdsa_max_size(struct crypto_akcipher *tfm) +{ + struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + + return ctx->pub_key.ndigits << ECC_DIGITS_TO_BYTES_SHIFT; +} + +static int ecdsa_nist_p384_init_tfm(struct crypto_akcipher *tfm) +{ + struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + + return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P384); +} + +static struct akcipher_alg ecdsa_nist_p384 = { + .verify = ecdsa_verify, + .set_pub_key = ecdsa_set_pub_key, + .max_size = ecdsa_max_size, + .init = ecdsa_nist_p384_init_tfm, + .exit = ecdsa_exit_tfm, + .base = { + .cra_name = "ecdsa-nist-p384", + .cra_driver_name = "ecdsa-nist-p384-generic", + .cra_priority = 100, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct ecc_ctx), + }, +}; + +static int ecdsa_nist_p256_init_tfm(struct crypto_akcipher *tfm) +{ + struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + + return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P256); +} + +static struct akcipher_alg ecdsa_nist_p256 = { + .verify = ecdsa_verify, + .set_pub_key = ecdsa_set_pub_key, + .max_size = ecdsa_max_size, + .init = ecdsa_nist_p256_init_tfm, + .exit = ecdsa_exit_tfm, + .base = { + .cra_name = "ecdsa-nist-p256", + .cra_driver_name = "ecdsa-nist-p256-generic", + .cra_priority = 100, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct ecc_ctx), + }, +}; + +static int ecdsa_nist_p192_init_tfm(struct crypto_akcipher *tfm) +{ + struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm); + + return ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P192); +} + +static struct akcipher_alg ecdsa_nist_p192 = { + .verify = ecdsa_verify, + .set_pub_key = ecdsa_set_pub_key, + .max_size = ecdsa_max_size, + .init = ecdsa_nist_p192_init_tfm, + .exit = ecdsa_exit_tfm, + .base = { + .cra_name = "ecdsa-nist-p192", + .cra_driver_name = "ecdsa-nist-p192-generic", + .cra_priority = 100, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct ecc_ctx), + }, +}; +static bool ecdsa_nist_p192_registered; + +static int ecdsa_init(void) +{ + int ret; + + /* NIST p192 may not be available in FIPS mode */ + ret = crypto_register_akcipher(&ecdsa_nist_p192); + ecdsa_nist_p192_registered = ret == 0; + + ret = crypto_register_akcipher(&ecdsa_nist_p256); + if (ret) + goto nist_p256_error; + + ret = crypto_register_akcipher(&ecdsa_nist_p384); + if (ret) + goto nist_p384_error; + + return 0; + +nist_p384_error: + crypto_unregister_akcipher(&ecdsa_nist_p256); + +nist_p256_error: + if (ecdsa_nist_p192_registered) + crypto_unregister_akcipher(&ecdsa_nist_p192); + return ret; +} + +static void ecdsa_exit(void) +{ + if (ecdsa_nist_p192_registered) + crypto_unregister_akcipher(&ecdsa_nist_p192); + crypto_unregister_akcipher(&ecdsa_nist_p256); + crypto_unregister_akcipher(&ecdsa_nist_p384); +} + +subsys_initcall(ecdsa_init); +module_exit(ecdsa_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Stefan Berger "); +MODULE_DESCRIPTION("ECDSA generic algorithm"); +MODULE_ALIAS_CRYPTO("ecdsa-generic"); diff --git a/crypto/ecdsasignature.asn1 b/crypto/ecdsasignature.asn1 new file mode 100644 index 000000000000..621ab754fb9f --- /dev/null +++ b/crypto/ecdsasignature.asn1 @@ -0,0 +1,4 @@ +ECDSASignature ::= SEQUENCE { + r INTEGER ({ ecdsa_get_signature_r }), + s INTEGER ({ ecdsa_get_signature_s }) +} diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c index c36ea0c8be98..76a04d000c0d 100644 --- a/crypto/fcrypt.c +++ b/crypto/fcrypt.c @@ -63,10 +63,7 @@ do { \ } while (0) /* Rotate right one 64 bit number as a 56 bit number */ -#define ror56_64(k, n) \ -do { \ - k = (k >> n) | ((k & ((1 << n) - 1)) << (56 - n)); \ -} while (0) +#define ror56_64(k, n) (k = (k >> n) | ((k & ((1 << n) - 1)) << (56 - n))) /* * Sboxes for Feistel network derived from diff --git a/crypto/jitterentropy.c b/crypto/jitterentropy.c index 6e147c43fc18..a11b3208760f 100644 --- a/crypto/jitterentropy.c +++ b/crypto/jitterentropy.c @@ -597,7 +597,7 @@ int jent_read_entropy(struct rand_data *ec, unsigned char *data, if (!ec) return -1; - while (0 < len) { + while (len > 0) { unsigned int tocopy; jent_gen_entropy(ec); @@ -678,7 +678,7 @@ struct rand_data *jent_entropy_collector_alloc(unsigned int osr, } /* verify and set the oversampling rate */ - if (0 == osr) + if (osr == 0) osr = 1; /* minimum sampling rate is 1 */ entropy_collector->osr = osr; @@ -769,7 +769,7 @@ int jent_entropy_init(void) * etc. with the goal to clear it to get the worst case * measurements. */ - if (CLEARCACHE > i) + if (i < CLEARCACHE) continue; if (stuck) @@ -826,7 +826,7 @@ int jent_entropy_init(void) * should not fail. The value of 3 should cover the NTP case being * performed during our test run. */ - if (3 < time_backwards) + if (time_backwards > 3) return JENT_ENOMONOTONIC; /* diff --git a/crypto/keywrap.c b/crypto/keywrap.c index 3517773bc7f7..054d9a216fc9 100644 --- a/crypto/keywrap.c +++ b/crypto/keywrap.c @@ -114,9 +114,9 @@ static void crypto_kw_scatterlist_ff(struct scatter_walk *walk, scatterwalk_start(walk, sg); scatterwalk_advance(walk, skip); break; - } else - skip -= sg->length; + } + skip -= sg->length; sg = sg_next(sg); } } diff --git a/crypto/rng.c b/crypto/rng.c index a888d84b524a..fea082b25fe4 100644 --- a/crypto/rng.c +++ b/crypto/rng.c @@ -34,22 +34,18 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) u8 *buf = NULL; int err; - crypto_stats_get(alg); if (!seed && slen) { buf = kmalloc(slen, GFP_KERNEL); - if (!buf) { - crypto_alg_put(alg); + if (!buf) return -ENOMEM; - } err = get_random_bytes_wait(buf, slen); - if (err) { - crypto_alg_put(alg); + if (err) goto out; - } seed = buf; } + crypto_stats_get(alg); err = crypto_rng_alg(tfm)->seed(tfm, seed, slen); crypto_stats_rng_seed(alg, err); out: diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c index 236c87547a17..45f98b750053 100644 --- a/crypto/serpent_generic.c +++ b/crypto/serpent_generic.c @@ -272,6 +272,7 @@ int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key, u32 *k = ctx->expkey; u8 *k8 = (u8 *)k; u32 r0, r1, r2, r3, r4; + __le32 *lk; int i; /* Copy key, add padding */ @@ -283,22 +284,32 @@ int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key, while (i < SERPENT_MAX_KEY_SIZE) k8[i++] = 0; + lk = (__le32 *)k; + k[0] = le32_to_cpu(lk[0]); + k[1] = le32_to_cpu(lk[1]); + k[2] = le32_to_cpu(lk[2]); + k[3] = le32_to_cpu(lk[3]); + k[4] = le32_to_cpu(lk[4]); + k[5] = le32_to_cpu(lk[5]); + k[6] = le32_to_cpu(lk[6]); + k[7] = le32_to_cpu(lk[7]); + /* Expand key using polynomial */ - r0 = le32_to_cpu(k[3]); - r1 = le32_to_cpu(k[4]); - r2 = le32_to_cpu(k[5]); - r3 = le32_to_cpu(k[6]); - r4 = le32_to_cpu(k[7]); + r0 = k[3]; + r1 = k[4]; + r2 = k[5]; + r3 = k[6]; + r4 = k[7]; - keyiter(le32_to_cpu(k[0]), r0, r4, r2, 0, 0); - keyiter(le32_to_cpu(k[1]), r1, r0, r3, 1, 1); - keyiter(le32_to_cpu(k[2]), r2, r1, r4, 2, 2); - keyiter(le32_to_cpu(k[3]), r3, r2, r0, 3, 3); - keyiter(le32_to_cpu(k[4]), r4, r3, r1, 4, 4); - keyiter(le32_to_cpu(k[5]), r0, r4, r2, 5, 5); - keyiter(le32_to_cpu(k[6]), r1, r0, r3, 6, 6); - keyiter(le32_to_cpu(k[7]), r2, r1, r4, 7, 7); + keyiter(k[0], r0, r4, r2, 0, 0); + keyiter(k[1], r1, r0, r3, 1, 1); + keyiter(k[2], r2, r1, r4, 2, 2); + keyiter(k[3], r3, r2, r0, 3, 3); + keyiter(k[4], r4, r3, r1, 4, 4); + keyiter(k[5], r0, r4, r2, 5, 5); + keyiter(k[6], r1, r0, r3, 6, 6); + keyiter(k[7], r2, r1, r4, 7, 7); keyiter(k[0], r3, r2, r0, 8, 8); keyiter(k[1], r4, r3, r1, 9, 9); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 93359999c94b..10c5b3b01ec4 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -1168,11 +1168,6 @@ static inline int check_shash_op(const char *op, int err, return err; } -static inline const void *sg_data(struct scatterlist *sg) -{ - return page_address(sg_page(sg)) + sg->offset; -} - /* Test one hash test vector in one configuration, using the shash API */ static int test_shash_vec_cfg(const struct hash_testvec *vec, const char *vec_name, @@ -1230,7 +1225,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec, return 0; if (cfg->nosimd) crypto_disable_simd_for_test(); - err = crypto_shash_digest(desc, sg_data(&tsgl->sgl[0]), + err = crypto_shash_digest(desc, sg_virt(&tsgl->sgl[0]), tsgl->sgl[0].length, result); if (cfg->nosimd) crypto_reenable_simd_for_test(); @@ -1266,7 +1261,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec, cfg->finalization_type == FINALIZATION_TYPE_FINUP) { if (divs[i]->nosimd) crypto_disable_simd_for_test(); - err = crypto_shash_finup(desc, sg_data(&tsgl->sgl[i]), + err = crypto_shash_finup(desc, sg_virt(&tsgl->sgl[i]), tsgl->sgl[i].length, result); if (divs[i]->nosimd) crypto_reenable_simd_for_test(); @@ -1278,7 +1273,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec, } if (divs[i]->nosimd) crypto_disable_simd_for_test(); - err = crypto_shash_update(desc, sg_data(&tsgl->sgl[i]), + err = crypto_shash_update(desc, sg_virt(&tsgl->sgl[i]), tsgl->sgl[i].length); if (divs[i]->nosimd) crypto_reenable_simd_for_test(); @@ -4904,11 +4899,38 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { #endif - .alg = "ecdh", +#ifndef CONFIG_CRYPTO_FIPS + .alg = "ecdh-nist-p192", .test = alg_test_kpp, .fips_allowed = 1, .suite = { - .kpp = __VECS(ecdh_tv_template) + .kpp = __VECS(ecdh_p192_tv_template) + } + }, { +#endif + .alg = "ecdh-nist-p256", + .test = alg_test_kpp, + .fips_allowed = 1, + .suite = { + .kpp = __VECS(ecdh_p256_tv_template) + } + }, { + .alg = "ecdsa-nist-p192", + .test = alg_test_akcipher, + .suite = { + .akcipher = __VECS(ecdsa_nist_p192_tv_template) + } + }, { + .alg = "ecdsa-nist-p256", + .test = alg_test_akcipher, + .suite = { + .akcipher = __VECS(ecdsa_nist_p256_tv_template) + } + }, { + .alg = "ecdsa-nist-p384", + .test = alg_test_akcipher, + .suite = { + .akcipher = __VECS(ecdsa_nist_p384_tv_template) } }, { .alg = "ecrdsa", diff --git a/crypto/testmgr.h b/crypto/testmgr.h index ced56ea0c9b4..34e4a3db3991 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -566,6 +566,430 @@ static const struct akcipher_testvec rsa_tv_template[] = { } }; +/* + * ECDSA test vectors. + */ +static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = { + { + .key = + "\x04\xf7\x46\xf8\x2f\x15\xf6\x22\x8e\xd7\x57\x4f\xcc\xe7\xbb\xc1" + "\xd4\x09\x73\xcf\xea\xd0\x15\x07\x3d\xa5\x8a\x8a\x95\x43\xe4\x68" + "\xea\xc6\x25\xc1\xc1\x01\x25\x4c\x7e\xc3\x3c\xa6\x04\x0a\xe7\x08" + "\x98", + .key_len = 49, + .params = + "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" + "\xce\x3d\x03\x01\x01", + .param_len = 21, + .m = + "\xcd\xb9\xd2\x1c\xb7\x6f\xcd\x44\xb3\xfd\x63\xea\xa3\x66\x7f\xae" + "\x63\x85\xe7\x82", + .m_size = 20, + .algo = OID_id_ecdsa_with_sha1, + .c = + "\x30\x35\x02\x19\x00\xba\xe5\x93\x83\x6e\xb6\x3b\x63\xa0\x27\x91" + "\xc6\xf6\x7f\xc3\x09\xad\x59\xad\x88\x27\xd6\x92\x6b\x02\x18\x10" + "\x68\x01\x9d\xba\xce\x83\x08\xef\x95\x52\x7b\xa0\x0f\xe4\x18\x86" + "\x80\x6f\xa5\x79\x77\xda\xd0", + .c_size = 55, + .public_key_vec = true, + .siggen_sigver_test = true, + }, { + .key = + "\x04\xb6\x4b\xb1\xd1\xac\xba\x24\x8f\x65\xb2\x60\x00\x90\xbf\xbd" + "\x78\x05\x73\xe9\x79\x1d\x6f\x7c\x0b\xd2\xc3\x93\xa7\x28\xe1\x75" + "\xf7\xd5\x95\x1d\x28\x10\xc0\x75\x50\x5c\x1a\x4f\x3f\x8f\xa5\xee" + "\xa3", + .key_len = 49, + .params = + "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" + "\xce\x3d\x03\x01\x01", + .param_len = 21, + .m = + "\x8d\xd6\xb8\x3e\xe5\xff\x23\xf6\x25\xa2\x43\x42\x74\x45\xa7\x40" + "\x3a\xff\x2f\xe1\xd3\xf6\x9f\xe8\x33\xcb\x12\x11", + .m_size = 28, + .algo = OID_id_ecdsa_with_sha224, + .c = + "\x30\x34\x02\x18\x5a\x8b\x82\x69\x7e\x8a\x0a\x09\x14\xf8\x11\x2b" + "\x55\xdc\xae\x37\x83\x7b\x12\xe6\xb6\x5b\xcb\xd4\x02\x18\x6a\x14" + "\x4f\x53\x75\xc8\x02\x48\xeb\xc3\x92\x0f\x1e\x72\xee\xc4\xa3\xe3" + "\x5c\x99\xdb\x92\x5b\x36", + .c_size = 54, + .public_key_vec = true, + .siggen_sigver_test = true, + }, { + .key = + "\x04\xe2\x51\x24\x9b\xf7\xb6\x32\x82\x39\x66\x3d\x5b\xec\x3b\xae" + "\x0c\xd5\xf2\x67\xd1\xc7\xe1\x02\xe4\xbf\x90\x62\xb8\x55\x75\x56" + "\x69\x20\x5e\xcb\x4e\xca\x33\xd6\xcb\x62\x6b\x94\xa9\xa2\xe9\x58" + "\x91", + .key_len = 49, + .params = + "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" + "\xce\x3d\x03\x01\x01", + .param_len = 21, + .m = + "\x35\xec\xa1\xa0\x9e\x14\xde\x33\x03\xb6\xf6\xbd\x0c\x2f\xb2\xfd" + "\x1f\x27\x82\xa5\xd7\x70\x3f\xef\xa0\x82\x69\x8e\x73\x31\x8e\xd7", + .m_size = 32, + .algo = OID_id_ecdsa_with_sha256, + .c = + "\x30\x35\x02\x18\x3f\x72\x3f\x1f\x42\xd2\x3f\x1d\x6b\x1a\x58\x56" + "\xf1\x8f\xf7\xfd\x01\x48\xfb\x5f\x72\x2a\xd4\x8f\x02\x19\x00\xb3" + "\x69\x43\xfd\x48\x19\x86\xcf\x32\xdd\x41\x74\x6a\x51\xc7\xd9\x7d" + "\x3a\x97\xd9\xcd\x1a\x6a\x49", + .c_size = 55, + .public_key_vec = true, + .siggen_sigver_test = true, + }, { + .key = + "\x04\x5a\x13\xfe\x68\x86\x4d\xf4\x17\xc7\xa4\xe5\x8c\x65\x57\xb7" + "\x03\x73\x26\x57\xfb\xe5\x58\x40\xd8\xfd\x49\x05\xab\xf1\x66\x1f" + "\xe2\x9d\x93\x9e\xc2\x22\x5a\x8b\x4f\xf3\x77\x22\x59\x7e\xa6\x4e" + "\x8b", + .key_len = 49, + .params = + "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" + "\xce\x3d\x03\x01\x01", + .param_len = 21, + .m = + "\x9d\x2e\x1a\x8f\xed\x6c\x4b\x61\xae\xac\xd5\x19\x79\xce\x67\xf9" + "\xa0\x34\xeb\xb0\x81\xf9\xd9\xdc\x6e\xb3\x5c\xa8\x69\xfc\x8a\x61" + "\x39\x81\xfb\xfd\x5c\x30\x6b\xa8\xee\xed\x89\xaf\xa3\x05\xe4\x78", + .m_size = 48, + .algo = OID_id_ecdsa_with_sha384, + .c = + "\x30\x35\x02\x19\x00\xf0\xa3\x38\xce\x2b\xf8\x9d\x1a\xcf\x7f\x34" + "\xb4\xb4\xe5\xc5\x00\xdd\x15\xbb\xd6\x8c\xa7\x03\x78\x02\x18\x64" + "\xbc\x5a\x1f\x82\x96\x61\xd7\xd1\x01\x77\x44\x5d\x53\xa4\x7c\x93" + "\x12\x3b\x3b\x28\xfb\x6d\xe1", + .c_size = 55, + .public_key_vec = true, + .siggen_sigver_test = true, + }, { + .key = + "\x04\xd5\xf2\x6e\xc3\x94\x5c\x52\xbc\xdf\x86\x6c\x14\xd1\xca\xea" + "\xcc\x72\x3a\x8a\xf6\x7a\x3a\x56\x36\x3b\xca\xc6\x94\x0e\x17\x1d" + "\x9e\xa0\x58\x28\xf9\x4b\xe6\xd1\xa5\x44\x91\x35\x0d\xe7\xf5\x11" + "\x57", + .key_len = 49, + .params = + "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" + "\xce\x3d\x03\x01\x01", + .param_len = 21, + .m = + "\xd5\x4b\xe9\x36\xda\xd8\x6e\xc0\x50\x03\xbe\x00\x43\xff\xf0\x23" + "\xac\xa2\x42\xe7\x37\x77\x79\x52\x8f\x3e\xc0\x16\xc1\xfc\x8c\x67" + "\x16\xbc\x8a\x5d\x3b\xd3\x13\xbb\xb6\xc0\x26\x1b\xeb\x33\xcc\x70" + "\x4a\xf2\x11\x37\xe8\x1b\xba\x55\xac\x69\xe1\x74\x62\x7c\x6e\xb5", + .m_size = 64, + .algo = OID_id_ecdsa_with_sha512, + .c = + "\x30\x35\x02\x19\x00\x88\x5b\x8f\x59\x43\xbf\xcf\xc6\xdd\x3f\x07" + "\x87\x12\xa0\xd4\xac\x2b\x11\x2d\x1c\xb6\x06\xc9\x6c\x02\x18\x73" + "\xb4\x22\x9a\x98\x73\x3c\x83\xa9\x14\x2a\x5e\xf5\xe5\xfb\x72\x28" + "\x6a\xdf\x97\xfd\x82\x76\x24", + .c_size = 55, + .public_key_vec = true, + .siggen_sigver_test = true, + }, +}; + +static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = { + { + .key = + "\x04\xb9\x7b\xbb\xd7\x17\x64\xd2\x7e\xfc\x81\x5d\x87\x06\x83\x41" + "\x22\xd6\x9a\xaa\x87\x17\xec\x4f\x63\x55\x2f\x94\xba\xdd\x83\xe9" + "\x34\x4b\xf3\xe9\x91\x13\x50\xb6\xcb\xca\x62\x08\xe7\x3b\x09\xdc" + "\xc3\x63\x4b\x2d\xb9\x73\x53\xe4\x45\xe6\x7c\xad\xe7\x6b\xb0\xe8" + "\xaf", + .key_len = 65, + .params = + "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" + "\xce\x3d\x03\x01\x07", + .param_len = 21, + .m = + "\xc2\x2b\x5f\x91\x78\x34\x26\x09\x42\x8d\x6f\x51\xb2\xc5\xaf\x4c" + "\x0b\xde\x6a\x42", + .m_size = 20, + .algo = OID_id_ecdsa_with_sha1, + .c = + "\x30\x46\x02\x21\x00\xf9\x25\xce\x9f\x3a\xa6\x35\x81\xcf\xd4\xe7" + "\xb7\xf0\x82\x56\x41\xf7\xd4\xad\x8d\x94\x5a\x69\x89\xee\xca\x6a" + "\x52\x0e\x48\x4d\xcc\x02\x21\x00\xd7\xe4\xef\x52\x66\xd3\x5b\x9d" + "\x8a\xfa\x54\x93\x29\xa7\x70\x86\xf1\x03\x03\xf3\x3b\xe2\x73\xf7" + "\xfb\x9d\x8b\xde\xd4\x8d\x6f\xad", + .c_size = 72, + .public_key_vec = true, + .siggen_sigver_test = true, + }, { + .key = + "\x04\x8b\x6d\xc0\x33\x8e\x2d\x8b\x67\xf5\xeb\xc4\x7f\xa0\xf5\xd9" + "\x7b\x03\xa5\x78\x9a\xb5\xea\x14\xe4\x23\xd0\xaf\xd7\x0e\x2e\xa0" + "\xc9\x8b\xdb\x95\xf8\xb3\xaf\xac\x00\x2c\x2c\x1f\x7a\xfd\x95\x88" + "\x43\x13\xbf\xf3\x1c\x05\x1a\x14\x18\x09\x3f\xd6\x28\x3e\xc5\xa0" + "\xd4", + .key_len = 65, + .params = + "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" + "\xce\x3d\x03\x01\x07", + .param_len = 21, + .m = + "\x1a\x15\xbc\xa3\xe4\xed\x3a\xb8\x23\x67\xc6\xc4\x34\xf8\x6c\x41" + "\x04\x0b\xda\xc5\x77\xfa\x1c\x2d\xe6\x2c\x3b\xe0", + .m_size = 28, + .algo = OID_id_ecdsa_with_sha224, + .c = + "\x30\x44\x02\x20\x20\x43\xfa\xc0\x9f\x9d\x7b\xe7\xae\xce\x77\x59" + "\x1a\xdb\x59\xd5\x34\x62\x79\xcb\x6a\x91\x67\x2e\x7d\x25\xd8\x25" + "\xf5\x81\xd2\x1e\x02\x20\x5f\xf8\x74\xf8\x57\xd0\x5e\x54\x76\x20" + "\x4a\x77\x22\xec\xc8\x66\xbf\x50\x05\x58\x39\x0e\x26\x92\xce\xd5" + "\x2e\x8b\xde\x5a\x04\x0e", + .c_size = 70, + .public_key_vec = true, + .siggen_sigver_test = true, + }, { + .key = + "\x04\xf1\xea\xc4\x53\xf3\xb9\x0e\x9f\x7e\xad\xe3\xea\xd7\x0e\x0f" + "\xd6\x98\x9a\xca\x92\x4d\x0a\x80\xdb\x2d\x45\xc7\xec\x4b\x97\x00" + "\x2f\xe9\x42\x6c\x29\xdc\x55\x0e\x0b\x53\x12\x9b\x2b\xad\x2c\xe9" + "\x80\xe6\xc5\x43\xc2\x1d\x5e\xbb\x65\x21\x50\xb6\x37\xb0\x03\x8e" + "\xb8", + .key_len = 65, + .params = + "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" + "\xce\x3d\x03\x01\x07", + .param_len = 21, + .m = + "\x8f\x43\x43\x46\x64\x8f\x6b\x96\xdf\x89\xdd\xa9\x01\xc5\x17\x6b" + "\x10\xa6\xd8\x39\x61\xdd\x3c\x1a\xc8\x8b\x59\xb2\xdc\x32\x7a\xa4", + .m_size = 32, + .algo = OID_id_ecdsa_with_sha256, + .c = + "\x30\x45\x02\x20\x08\x31\xfa\x74\x0d\x1d\x21\x5d\x09\xdc\x29\x63" + "\xa8\x1a\xad\xfc\xac\x44\xc3\xe8\x24\x11\x2d\xa4\x91\xdc\x02\x67" + "\xdc\x0c\xd0\x82\x02\x21\x00\xbd\xff\xce\xee\x42\xc3\x97\xff\xf9" + "\xa9\x81\xac\x4a\x50\xd0\x91\x0a\x6e\x1b\xc4\xaf\xe1\x83\xc3\x4f" + "\x2a\x65\x35\x23\xe3\x1d\xfa", + .c_size = 71, + .public_key_vec = true, + .siggen_sigver_test = true, + }, { + .key = + "\x04\xc5\xc6\xea\x60\xc9\xce\xad\x02\x8d\xf5\x3e\x24\xe3\x52\x1d" + "\x28\x47\x3b\xc3\x6b\xa4\x99\x35\x99\x11\x88\x88\xc8\xf4\xee\x7e" + "\x8c\x33\x8f\x41\x03\x24\x46\x2b\x1a\x82\xf9\x9f\xe1\x97\x1b\x00" + "\xda\x3b\x24\x41\xf7\x66\x33\x58\x3d\x3a\x81\xad\xcf\x16\xe9\xe2" + "\x7c", + .key_len = 65, + .params = + "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" + "\xce\x3d\x03\x01\x07", + .param_len = 21, + .m = + "\x3e\x78\x70\xfb\xcd\x66\xba\x91\xa1\x79\xff\x1e\x1c\x6b\x78\xe6" + "\xc0\x81\x3a\x65\x97\x14\x84\x36\x14\x1a\x9a\xb7\xc5\xab\x84\x94" + "\x5e\xbb\x1b\x34\x71\xcb\x41\xe1\xf6\xfc\x92\x7b\x34\xbb\x86\xbb", + .m_size = 48, + .algo = OID_id_ecdsa_with_sha384, + .c = + "\x30\x46\x02\x21\x00\x8e\xf3\x6f\xdc\xf8\x69\xa6\x2e\xd0\x2e\x95" + "\x54\xd1\x95\x64\x93\x08\xb2\x6b\x24\x94\x48\x46\x5e\xf2\xe4\x6c" + "\xc7\x94\xb1\xd5\xfe\x02\x21\x00\xeb\xa7\x80\x26\xdc\xf9\x3a\x44" + "\x19\xfb\x5f\x92\xf4\xc9\x23\x37\x69\xf4\x3b\x4f\x47\xcf\x9b\x16" + "\xc0\x60\x11\x92\xdc\x17\x89\x12", + .c_size = 72, + .public_key_vec = true, + .siggen_sigver_test = true, + }, { + .key = + "\x04\xd7\x27\x46\x49\xf6\x26\x85\x12\x40\x76\x8e\xe2\xe6\x2a\x7a" + "\x83\xb1\x4e\x7a\xeb\x3b\x5c\x67\x4a\xb5\xa4\x92\x8c\x69\xff\x38" + "\xee\xd9\x4e\x13\x29\x59\xad\xde\x6b\xbb\x45\x31\xee\xfd\xd1\x1b" + "\x64\xd3\xb5\xfc\xaf\x9b\x4b\x88\x3b\x0e\xb7\xd6\xdf\xf1\xd5\x92" + "\xbf", + .key_len = 65, + .params = + "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48" + "\xce\x3d\x03\x01\x07", + .param_len = 21, + .m = + "\x57\xb7\x9e\xe9\x05\x0a\x8c\x1b\xc9\x13\xe5\x4a\x24\xc7\xe2\xe9" + "\x43\xc3\xd1\x76\x62\xf4\x98\x1a\x9c\x13\xb0\x20\x1b\xe5\x39\xca" + "\x4f\xd9\x85\x34\x95\xa2\x31\xbc\xbb\xde\xdd\x76\xbb\x61\xe3\xcf" + "\x9d\xc0\x49\x7a\xf3\x7a\xc4\x7d\xa8\x04\x4b\x8d\xb4\x4d\x5b\xd6", + .m_size = 64, + .algo = OID_id_ecdsa_with_sha512, + .c = + "\x30\x45\x02\x21\x00\xb8\x6d\x87\x81\x43\xdf\xfb\x9f\x40\xea\x44" + "\x81\x00\x4e\x29\x08\xed\x8c\x73\x30\x6c\x22\xb3\x97\x76\xf6\x04" + "\x99\x09\x37\x4d\xfa\x02\x20\x1e\xb9\x75\x31\xf6\x04\xa5\x4d\xf8" + "\x00\xdd\xab\xd4\xc0\x2b\xe6\x5c\xad\xc3\x78\x1c\xc2\xc1\x19\x76" + "\x31\x79\x4a\xe9\x81\x6a\xee", + .c_size = 71, + .public_key_vec = true, + .siggen_sigver_test = true, + }, +}; + +static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = { + { + .key = /* secp384r1(sha1) */ + "\x04\x89\x25\xf3\x97\x88\xcb\xb0\x78\xc5\x72\x9a\x14\x6e\x7a\xb1" + "\x5a\xa5\x24\xf1\x95\x06\x9e\x28\xfb\xc4\xb9\xbe\x5a\x0d\xd9\x9f" + "\xf3\xd1\x4d\x2d\x07\x99\xbd\xda\xa7\x66\xec\xbb\xea\xba\x79\x42" + "\xc9\x34\x89\x6a\xe7\x0b\xc3\xf2\xfe\x32\x30\xbe\xba\xf9\xdf\x7e" + "\x4b\x6a\x07\x8e\x26\x66\x3f\x1d\xec\xa2\x57\x91\x51\xdd\x17\x0e" + "\x0b\x25\xd6\x80\x5c\x3b\xe6\x1a\x98\x48\x91\x45\x7a\x73\xb0\xc3" + "\xf1", + .key_len = 97, + .params = + "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04" + "\x00\x22", + .param_len = 18, + .m = + "\x12\x55\x28\xf0\x77\xd5\xb6\x21\x71\x32\x48\xcd\x28\xa8\x25\x22" + "\x3a\x69\xc1\x93", + .m_size = 20, + .algo = OID_id_ecdsa_with_sha1, + .c = + "\x30\x66\x02\x31\x00\xf5\x0f\x24\x4c\x07\x93\x6f\x21\x57\x55\x07" + "\x20\x43\x30\xde\xa0\x8d\x26\x8e\xae\x63\x3f\xbc\x20\x3a\xc6\xf1" + "\x32\x3c\xce\x70\x2b\x78\xf1\x4c\x26\xe6\x5b\x86\xcf\xec\x7c\x7e" + "\xd0\x87\xd7\xd7\x6e\x02\x31\x00\xcd\xbb\x7e\x81\x5d\x8f\x63\xc0" + "\x5f\x63\xb1\xbe\x5e\x4c\x0e\xa1\xdf\x28\x8c\x1b\xfa\xf9\x95\x88" + "\x74\xa0\x0f\xbf\xaf\xc3\x36\x76\x4a\xa1\x59\xf1\x1c\xa4\x58\x26" + "\x79\x12\x2a\xb7\xc5\x15\x92\xc5", + .c_size = 104, + .public_key_vec = true, + .siggen_sigver_test = true, + }, { + .key = /* secp384r1(sha224) */ + "\x04\x69\x6c\xcf\x62\xee\xd0\x0d\xe5\xb5\x2f\x70\x54\xcf\x26\xa0" + "\xd9\x98\x8d\x92\x2a\xab\x9b\x11\xcb\x48\x18\xa1\xa9\x0d\xd5\x18" + "\x3e\xe8\x29\x6e\xf6\xe4\xb5\x8e\xc7\x4a\xc2\x5f\x37\x13\x99\x05" + "\xb6\xa4\x9d\xf9\xfb\x79\x41\xe7\xd7\x96\x9f\x73\x3b\x39\x43\xdc" + "\xda\xf4\x06\xb9\xa5\x29\x01\x9d\x3b\xe1\xd8\x68\x77\x2a\xf4\x50" + "\x6b\x93\x99\x6c\x66\x4c\x42\x3f\x65\x60\x6c\x1c\x0b\x93\x9b\x9d" + "\xe0", + .key_len = 97, + .params = + "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04" + "\x00\x22", + .param_len = 18, + .m = + "\x12\x80\xb6\xeb\x25\xe2\x3d\xf0\x21\x32\x96\x17\x3a\x38\x39\xfd" + "\x1f\x05\x34\x7b\xb8\xf9\x71\x66\x03\x4f\xd5\xe5", + .m_size = 28, + .algo = OID_id_ecdsa_with_sha224, + .c = + "\x30\x66\x02\x31\x00\x8a\x51\x84\xce\x13\x1e\xd2\xdc\xec\xcb\xe4" + "\x89\x47\xb2\xf7\xbc\x97\xf1\xc8\x72\x26\xcf\x5a\x5e\xc5\xda\xb4" + "\xe3\x93\x07\xe0\x99\xc9\x9c\x11\xb8\x10\x01\xc5\x41\x3f\xdd\x15" + "\x1b\x68\x2b\x9d\x8b\x02\x31\x00\x8b\x03\x2c\xfc\x1f\xd1\xa9\xa4" + "\x4b\x00\x08\x31\x6c\xf5\xd5\xf6\xdf\xd8\x68\xa2\x64\x42\x65\xf3" + "\x4d\xd0\xc6\x6e\xb0\xe9\xfc\x14\x9f\x19\xd0\x42\x8b\x93\xc2\x11" + "\x88\x2b\x82\x26\x5e\x1c\xda\xfb", + .c_size = 104, + .public_key_vec = true, + .siggen_sigver_test = true, + }, { + .key = /* secp384r1(sha256) */ + "\x04\xee\xd6\xda\x3e\x94\x90\x00\x27\xed\xf8\x64\x55\xd6\x51\x9a" + "\x1f\x52\x00\x63\x78\xf1\xa9\xfd\x75\x4c\x9e\xb2\x20\x1a\x91\x5a" + "\xba\x7a\xa3\xe5\x6c\xb6\x25\x68\x4b\xe8\x13\xa6\x54\x87\x2c\x0e" + "\xd0\x83\x95\xbc\xbf\xc5\x28\x4f\x77\x1c\x46\xa6\xf0\xbc\xd4\xa4" + "\x8d\xc2\x8f\xb3\x32\x37\x40\xd6\xca\xf8\xae\x07\x34\x52\x39\x52" + "\x17\xc3\x34\x29\xd6\x40\xea\x5c\xb9\x3f\xfb\x32\x2e\x12\x33\xbc" + "\xab", + .key_len = 97, + .params = + "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04" + "\x00\x22", + .param_len = 18, + .m = + "\xaa\xe7\xfd\x03\x26\xcb\x94\x71\xe4\xce\x0f\xc5\xff\xa6\x29\xa3" + "\xe1\xcc\x4c\x35\x4e\xde\xca\x80\xab\x26\x0c\x25\xe6\x68\x11\xc2", + .m_size = 32, + .algo = OID_id_ecdsa_with_sha256, + .c = + "\x30\x64\x02\x30\x08\x09\x12\x9d\x6e\x96\x64\xa6\x8e\x3f\x7e\xce" + "\x0a\x9b\xaa\x59\xcc\x47\x53\x87\xbc\xbd\x83\x3f\xaf\x06\x3f\x84" + "\x04\xe2\xf9\x67\xb6\xc6\xfc\x70\x2e\x66\x3c\x77\xc8\x8d\x2c\x79" + "\x3a\x8e\x32\xc4\x02\x30\x40\x34\xb8\x90\xa9\x80\xab\x47\x26\xa2" + "\xb0\x89\x42\x0a\xda\xd9\xdd\xce\xbc\xb2\x97\xf4\x9c\xf3\x15\x68" + "\xc0\x75\x3e\x23\x5e\x36\x4f\x8d\xde\x1e\x93\x8d\x95\xbb\x10\x0e" + "\xf4\x1f\x39\xca\x4d\x43", + .c_size = 102, + .public_key_vec = true, + .siggen_sigver_test = true, + }, { + .key = /* secp384r1(sha384) */ + "\x04\x3a\x2f\x62\xe7\x1a\xcf\x24\xd0\x0b\x7c\xe0\xed\x46\x0a\x4f" + "\x74\x16\x43\xe9\x1a\x25\x7c\x55\xff\xf0\x29\x68\x66\x20\x91\xf9" + "\xdb\x2b\xf6\xb3\x6c\x54\x01\xca\xc7\x6a\x5c\x0d\xeb\x68\xd9\x3c" + "\xf1\x01\x74\x1f\xf9\x6c\xe5\x5b\x60\xe9\x7f\x5d\xb3\x12\x80\x2a" + "\xd8\x67\x92\xc9\x0e\x4c\x4c\x6b\xa1\xb2\xa8\x1e\xac\x1c\x97\xd9" + "\x21\x67\xe5\x1b\x5a\x52\x31\x68\xd6\xee\xf0\x19\xb0\x55\xed\x89" + "\x9e", + .key_len = 97, + .params = + "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04" + "\x00\x22", + .param_len = 18, + .m = + "\x8d\xf2\xc0\xe9\xa8\xf3\x8e\x44\xc4\x8c\x1a\xa0\xb8\xd7\x17\xdf" + "\xf2\x37\x1b\xc6\xe3\xf5\x62\xcc\x68\xf5\xd5\x0b\xbf\x73\x2b\xb1" + "\xb0\x4c\x04\x00\x31\xab\xfe\xc8\xd6\x09\xc8\xf2\xea\xd3\x28\xff", + .m_size = 48, + .algo = OID_id_ecdsa_with_sha384, + .c = + "\x30\x66\x02\x31\x00\x9b\x28\x68\xc0\xa1\xea\x8c\x50\xee\x2e\x62" + "\x35\x46\xfa\x00\xd8\x2d\x7a\x91\x5f\x49\x2d\x22\x08\x29\xe6\xfb" + "\xca\x8c\xd6\xb6\xb4\x3b\x1f\x07\x8f\x15\x02\xfe\x1d\xa2\xa4\xc8" + "\xf2\xea\x9d\x11\x1f\x02\x31\x00\xfc\x50\xf6\x43\xbd\x50\x82\x0e" + "\xbf\xe3\x75\x24\x49\xac\xfb\xc8\x71\xcd\x8f\x18\x99\xf0\x0f\x13" + "\x44\x92\x8c\x86\x99\x65\xb3\x97\x96\x17\x04\xc9\x05\x77\xf1\x8e" + "\xab\x8d\x4e\xde\xe6\x6d\x9b\x66", + .c_size = 104, + .public_key_vec = true, + .siggen_sigver_test = true, + }, { + .key = /* secp384r1(sha512) */ + "\x04\xb4\xe7\xc1\xeb\x64\x25\x22\x46\xc3\x86\x61\x80\xbe\x1e\x46" + "\xcb\xf6\x05\xc2\xee\x73\x83\xbc\xea\x30\x61\x4d\x40\x05\x41\xf4" + "\x8c\xe3\x0e\x5c\xf0\x50\xf2\x07\x19\xe8\x4f\x25\xbe\xee\x0c\x95" + "\x54\x36\x86\xec\xc2\x20\x75\xf3\x89\xb5\x11\xa1\xb7\xf5\xaf\xbe" + "\x81\xe4\xc3\x39\x06\xbd\xe4\xfe\x68\x1c\x6d\x99\x2b\x1b\x63\xfa" + "\xdf\x42\x5c\xc2\x5a\xc7\x0c\xf4\x15\xf7\x1b\xa3\x2e\xd7\x00\xac" + "\xa3", + .key_len = 97, + .params = + "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04" + "\x00\x22", + .param_len = 18, + .m = + "\xe8\xb7\x52\x7d\x1a\x44\x20\x05\x53\x6b\x3a\x68\xf2\xe7\x6c\xa1" + "\xae\x9d\x84\xbb\xba\x52\x43\x3e\x2c\x42\x78\x49\xbf\x78\xb2\x71" + "\xeb\xe1\xe0\xe8\x42\x7b\x11\xad\x2b\x99\x05\x1d\x36\xe6\xac\xfc" + "\x55\x73\xf0\x15\x63\x39\xb8\x6a\x6a\xc5\x91\x5b\xca\x6a\xa8\x0e", + .m_size = 64, + .algo = OID_id_ecdsa_with_sha512, + .c = + "\x30\x63\x02\x2f\x1d\x20\x94\x77\xfe\x31\xfa\x4d\xc6\xef\xda\x02" + "\xe7\x0f\x52\x9a\x02\xde\x93\xe8\x83\xe4\x84\x4c\xfc\x6f\x80\xe3" + "\xaf\xb3\xd9\xdc\x2b\x43\x0e\x6a\xb3\x53\x6f\x3e\xb3\xc7\xa8\xb3" + "\x17\x77\xd1\x02\x30\x63\xf6\xf0\x3d\x5f\x5f\x99\x3f\xde\x3a\x3d" + "\x16\xaf\xb4\x52\x6a\xec\x63\xe3\x0c\xec\x50\xdc\xcc\xc4\x6a\x03" + "\x5f\x8d\x7a\xf9\xfb\x34\xe4\x8b\x80\xa5\xb6\xda\x2c\x4e\x45\xcf" + "\x3c\x93\xff\x50\x5d", + .c_size = 101, + .public_key_vec = true, + .siggen_sigver_test = true, + }, +}; + /* * EC-RDSA test vectors are generated by gost-engine. */ @@ -2261,19 +2685,17 @@ static const struct kpp_testvec curve25519_tv_template[] = { } }; -static const struct kpp_testvec ecdh_tv_template[] = { - { #ifndef CONFIG_CRYPTO_FIPS +static const struct kpp_testvec ecdh_p192_tv_template[] = { + { .secret = #ifdef __LITTLE_ENDIAN "\x02\x00" /* type */ - "\x20\x00" /* len */ - "\x01\x00" /* curve_id */ + "\x1e\x00" /* len */ "\x18\x00" /* key_size */ #else "\x00\x02" /* type */ - "\x00\x20" /* len */ - "\x00\x01" /* curve_id */ + "\x00\x1e" /* len */ "\x00\x18" /* key_size */ #endif "\xb5\x05\xb1\x71\x1e\xbf\x8c\xda" @@ -2301,18 +2723,20 @@ static const struct kpp_testvec ecdh_tv_template[] = { .b_public_size = 48, .expected_a_public_size = 48, .expected_ss_size = 24 - }, { + } +}; #endif + +static const struct kpp_testvec ecdh_p256_tv_template[] = { + { .secret = #ifdef __LITTLE_ENDIAN "\x02\x00" /* type */ - "\x28\x00" /* len */ - "\x02\x00" /* curve_id */ + "\x26\x00" /* len */ "\x20\x00" /* key_size */ #else "\x00\x02" /* type */ - "\x00\x28" /* len */ - "\x00\x02" /* curve_id */ + "\x00\x26" /* len */ "\x00\x20" /* key_size */ #endif "\x24\xd1\x21\xeb\xe5\xcf\x2d\x83" @@ -2350,25 +2774,21 @@ static const struct kpp_testvec ecdh_tv_template[] = { .secret = #ifdef __LITTLE_ENDIAN "\x02\x00" /* type */ - "\x08\x00" /* len */ - "\x02\x00" /* curve_id */ + "\x06\x00" /* len */ "\x00\x00", /* key_size */ #else "\x00\x02" /* type */ - "\x00\x08" /* len */ - "\x00\x02" /* curve_id */ + "\x00\x06" /* len */ "\x00\x00", /* key_size */ #endif .b_secret = #ifdef __LITTLE_ENDIAN "\x02\x00" /* type */ - "\x28\x00" /* len */ - "\x02\x00" /* curve_id */ + "\x26\x00" /* len */ "\x20\x00" /* key_size */ #else "\x00\x02" /* type */ - "\x00\x28" /* len */ - "\x00\x02" /* curve_id */ + "\x00\x26" /* len */ "\x00\x20" /* key_size */ #endif "\x24\xd1\x21\xeb\xe5\xcf\x2d\x83" diff --git a/drivers/char/hw_random/ba431-rng.c b/drivers/char/hw_random/ba431-rng.c index 410b50b05e21..5b7ca0416490 100644 --- a/drivers/char/hw_random/ba431-rng.c +++ b/drivers/char/hw_random/ba431-rng.c @@ -170,7 +170,6 @@ static int ba431_trng_init(struct hwrng *rng) static int ba431_trng_probe(struct platform_device *pdev) { struct ba431_trng *ba431; - struct resource *res; int ret; ba431 = devm_kzalloc(&pdev->dev, sizeof(*ba431), GFP_KERNEL); @@ -179,8 +178,7 @@ static int ba431_trng_probe(struct platform_device *pdev) ba431->dev = &pdev->dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ba431->base = devm_ioremap_resource(&pdev->dev, res); + ba431->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ba431->base)) return PTR_ERR(ba431->base); @@ -193,7 +191,7 @@ static int ba431_trng_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ba431); - ret = hwrng_register(&ba431->rng); + ret = devm_hwrng_register(&pdev->dev, &ba431->rng); if (ret) { dev_err(&pdev->dev, "BA431 registration failed (%d)\n", ret); return ret; @@ -204,15 +202,6 @@ static int ba431_trng_probe(struct platform_device *pdev) return 0; } -static int ba431_trng_remove(struct platform_device *pdev) -{ - struct ba431_trng *ba431 = platform_get_drvdata(pdev); - - hwrng_unregister(&ba431->rng); - - return 0; -} - static const struct of_device_id ba431_trng_dt_ids[] = { { .compatible = "silex-insight,ba431-rng", .data = NULL }, { /* sentinel */ } @@ -225,7 +214,6 @@ static struct platform_driver ba431_trng_driver = { .of_match_table = ba431_trng_dt_ids, }, .probe = ba431_trng_probe, - .remove = ba431_trng_remove, }; module_platform_driver(ba431_trng_driver); diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c index 1a7c43b43c6b..e7dd457e9b22 100644 --- a/drivers/char/hw_random/bcm2835-rng.c +++ b/drivers/char/hw_random/bcm2835-rng.c @@ -13,6 +13,7 @@ #include #include #include +#include #define RNG_CTRL 0x0 #define RNG_STATUS 0x4 @@ -32,6 +33,7 @@ struct bcm2835_rng_priv { void __iomem *base; bool mask_interrupts; struct clk *clk; + struct reset_control *reset; }; static inline struct bcm2835_rng_priv *to_rng_priv(struct hwrng *rng) @@ -88,11 +90,13 @@ static int bcm2835_rng_init(struct hwrng *rng) int ret = 0; u32 val; - if (!IS_ERR(priv->clk)) { - ret = clk_prepare_enable(priv->clk); - if (ret) - return ret; - } + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + ret = reset_control_reset(priv->reset); + if (ret) + return ret; if (priv->mask_interrupts) { /* mask the interrupt */ @@ -115,8 +119,7 @@ static void bcm2835_rng_cleanup(struct hwrng *rng) /* disable rng hardware */ rng_writel(priv, 0, RNG_CTRL); - if (!IS_ERR(priv->clk)) - clk_disable_unprepare(priv->clk); + clk_disable_unprepare(priv->clk); } struct bcm2835_rng_of_data { @@ -155,9 +158,13 @@ static int bcm2835_rng_probe(struct platform_device *pdev) return PTR_ERR(priv->base); /* Clock is optional on most platforms */ - priv->clk = devm_clk_get(dev, NULL); - if (PTR_ERR(priv->clk) == -EPROBE_DEFER) - return -EPROBE_DEFER; + priv->clk = devm_clk_get_optional(dev, NULL); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + + priv->reset = devm_reset_control_get_optional_exclusive(dev, NULL); + if (IS_ERR(priv->reset)) + return PTR_ERR(priv->reset); priv->rng.name = pdev->name; priv->rng.init = bcm2835_rng_init; diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c index 7a293f2147a0..302ffa354c2f 100644 --- a/drivers/char/hw_random/cctrng.c +++ b/drivers/char/hw_random/cctrng.c @@ -486,7 +486,6 @@ static void cc_trng_clk_fini(struct cctrng_drvdata *drvdata) static int cctrng_probe(struct platform_device *pdev) { - struct resource *req_mem_cc_regs = NULL; struct cctrng_drvdata *drvdata; struct device *dev = &pdev->dev; int rc = 0; @@ -510,27 +509,16 @@ static int cctrng_probe(struct platform_device *pdev) drvdata->circ.buf = (char *)drvdata->data_buf; - /* Get device resources */ - /* First CC registers space */ - req_mem_cc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - /* Map registers space */ - drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs); + drvdata->cc_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(drvdata->cc_base)) { dev_err(dev, "Failed to ioremap registers"); return PTR_ERR(drvdata->cc_base); } - dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name, - req_mem_cc_regs); - dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n", - &req_mem_cc_regs->start, drvdata->cc_base); - /* Then IRQ */ irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "Failed getting IRQ resource\n"); + if (irq < 0) return irq; - } /* parse sampling rate from device tree */ rc = cc_trng_parse_sampling_ratio(drvdata); @@ -585,7 +573,7 @@ static int cctrng_probe(struct platform_device *pdev) atomic_set(&drvdata->pending_hw, 1); /* registration of the hwrng device */ - rc = hwrng_register(&drvdata->rng); + rc = devm_hwrng_register(dev, &drvdata->rng); if (rc) { dev_err(dev, "Could not register hwrng device.\n"); goto post_pm_err; @@ -618,8 +606,6 @@ static int cctrng_remove(struct platform_device *pdev) dev_dbg(dev, "Releasing cctrng resources...\n"); - hwrng_unregister(&drvdata->rng); - cc_trng_pm_fini(drvdata); cc_trng_clk_fini(drvdata); diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 8c1c47dd9f46..adb3c2bd7783 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -396,7 +396,7 @@ static ssize_t hwrng_attr_selected_show(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%d\n", cur_rng_set_by_user); + return sysfs_emit(buf, "%d\n", cur_rng_set_by_user); } static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR, diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c index eb7db27f9f19..d740b8814bf3 100644 --- a/drivers/char/hw_random/intel-rng.c +++ b/drivers/char/hw_random/intel-rng.c @@ -25,13 +25,13 @@ */ #include +#include #include #include #include #include #include #include -#include #define PFX KBUILD_MODNAME ": " diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 5cc5fc504968..cede9f159102 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -30,8 +30,7 @@ #include #include #include - -#include +#include #define RNG_REG_STATUS_RDY (1 << 0) @@ -378,16 +377,13 @@ MODULE_DEVICE_TABLE(of, omap_rng_of_match); static int of_get_omap_rng_device_details(struct omap_rng_dev *priv, struct platform_device *pdev) { - const struct of_device_id *match; struct device *dev = &pdev->dev; int irq, err; - match = of_match_device(of_match_ptr(omap_rng_of_match), dev); - if (!match) { - dev_err(dev, "no compatible OF match\n"); - return -EINVAL; - } - priv->pdata = match->data; + priv->pdata = of_device_get_match_data(dev); + if (!priv->pdata) + return -ENODEV; + if (of_device_is_compatible(dev->of_node, "ti,omap4-rng") || of_device_is_compatible(dev->of_node, "inside-secure,safexcel-eip76")) { diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c index e8210c1715cf..99c8bd0859a1 100644 --- a/drivers/char/hw_random/pic32-rng.c +++ b/drivers/char/hw_random/pic32-rng.c @@ -96,7 +96,7 @@ static int pic32_rng_probe(struct platform_device *pdev) priv->rng.name = pdev->name; priv->rng.read = pic32_rng_read; - ret = hwrng_register(&priv->rng); + ret = devm_hwrng_register(&pdev->dev, &priv->rng); if (ret) goto err_register; @@ -113,7 +113,6 @@ static int pic32_rng_remove(struct platform_device *pdev) { struct pic32_rng *rng = platform_get_drvdata(pdev); - hwrng_unregister(&rng->rng); writel(0, rng->base + RNGCON); clk_disable_unprepare(rng->clk); return 0; diff --git a/drivers/char/hw_random/xiphera-trng.c b/drivers/char/hw_random/xiphera-trng.c index 7bdab8c8a6a8..2a9fea72b2e0 100644 --- a/drivers/char/hw_random/xiphera-trng.c +++ b/drivers/char/hw_random/xiphera-trng.c @@ -63,14 +63,12 @@ static int xiphera_trng_probe(struct platform_device *pdev) int ret; struct xiphera_trng *trng; struct device *dev = &pdev->dev; - struct resource *res; trng = devm_kzalloc(dev, sizeof(*trng), GFP_KERNEL); if (!trng) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - trng->mem = devm_ioremap_resource(dev, res); + trng->mem = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(trng->mem)) return PTR_ERR(trng->mem); diff --git a/drivers/char/random.c b/drivers/char/random.c index 0fe9e200e4c8..605969ed0f96 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -500,7 +500,6 @@ struct entropy_store { unsigned short add_ptr; unsigned short input_rotate; int entropy_count; - unsigned int initialized:1; unsigned int last_data_init:1; __u8 last_data[EXTRACT_SIZE]; }; @@ -660,7 +659,7 @@ static void process_random_ready_list(void) */ static void credit_entropy_bits(struct entropy_store *r, int nbits) { - int entropy_count, orig, has_initialized = 0; + int entropy_count, orig; const int pool_size = r->poolinfo->poolfracbits; int nfrac = nbits << ENTROPY_SHIFT; @@ -717,23 +716,14 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits) if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) goto retry; - if (has_initialized) { - r->initialized = 1; - kill_fasync(&fasync, SIGIO, POLL_IN); - } - trace_credit_entropy_bits(r->name, nbits, entropy_count >> ENTROPY_SHIFT, _RET_IP_); if (r == &input_pool) { int entropy_bits = entropy_count >> ENTROPY_SHIFT; - if (crng_init < 2) { - if (entropy_bits < 128) - return; + if (crng_init < 2 && entropy_bits >= 128) crng_reseed(&primary_crng, r); - entropy_bits = ENTROPY_BITS(r); - } } } @@ -819,7 +809,7 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng) static void __maybe_unused crng_initialize_secondary(struct crng_state *crng) { - memcpy(&crng->state[0], "expand 32-byte k", 16); + chacha_init_consts(crng->state); _get_random_bytes(&crng->state[4], sizeof(__u32) * 12); crng_init_try_arch(crng); crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; @@ -827,7 +817,7 @@ static void __maybe_unused crng_initialize_secondary(struct crng_state *crng) static void __init crng_initialize_primary(struct crng_state *crng) { - memcpy(&crng->state[0], "expand 32-byte k", 16); + chacha_init_consts(crng->state); _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0); if (crng_init_try_arch_early(crng) && trust_cpu) { invalidate_batched_entropy(); @@ -1372,8 +1362,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, } /* - * This function does the actual extraction for extract_entropy and - * extract_entropy_user. + * This function does the actual extraction for extract_entropy. * * Note: we assume that .poolwords is a multiple of 16 words. */ diff --git a/drivers/crypto/allwinner/Kconfig b/drivers/crypto/allwinner/Kconfig index 856fb2045656..b8e75210a0e3 100644 --- a/drivers/crypto/allwinner/Kconfig +++ b/drivers/crypto/allwinner/Kconfig @@ -71,10 +71,10 @@ config CRYPTO_DEV_SUN8I_CE_DEBUG config CRYPTO_DEV_SUN8I_CE_HASH bool "Enable support for hash on sun8i-ce" depends on CRYPTO_DEV_SUN8I_CE - select MD5 - select SHA1 - select SHA256 - select SHA512 + select CRYPTO_MD5 + select CRYPTO_SHA1 + select CRYPTO_SHA256 + select CRYPTO_SHA512 help Say y to enable support for hash algorithms. @@ -132,8 +132,8 @@ config CRYPTO_DEV_SUN8I_SS_PRNG config CRYPTO_DEV_SUN8I_SS_HASH bool "Enable support for hash on sun8i-ss" depends on CRYPTO_DEV_SUN8I_SS - select MD5 - select SHA1 - select SHA256 + select CRYPTO_MD5 + select CRYPTO_SHA1 + select CRYPTO_SHA256 help Say y to enable support for hash algorithms. diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c index c2e6f5ed1d79..dec79fa3ebaf 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c @@ -561,7 +561,7 @@ int sun4i_ss_cipher_init(struct crypto_tfm *tfm) sizeof(struct sun4i_cipher_req_ctx) + crypto_skcipher_reqsize(op->fallback_tfm)); - err = pm_runtime_get_sync(op->ss->dev); + err = pm_runtime_resume_and_get(op->ss->dev); if (err < 0) goto error_pm; diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c index 709905ec4680..44b8fc4b786d 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c @@ -288,8 +288,7 @@ static int sun4i_ss_pm_suspend(struct device *dev) { struct sun4i_ss_ctx *ss = dev_get_drvdata(dev); - if (ss->reset) - reset_control_assert(ss->reset); + reset_control_assert(ss->reset); clk_disable_unprepare(ss->ssclk); clk_disable_unprepare(ss->busclk); @@ -314,12 +313,10 @@ static int sun4i_ss_pm_resume(struct device *dev) goto err_enable; } - if (ss->reset) { - err = reset_control_deassert(ss->reset); - if (err) { - dev_err(ss->dev, "Cannot deassert reset control\n"); - goto err_enable; - } + err = reset_control_deassert(ss->reset); + if (err) { + dev_err(ss->dev, "Cannot deassert reset control\n"); + goto err_enable; } return err; @@ -401,12 +398,10 @@ static int sun4i_ss_probe(struct platform_device *pdev) dev_dbg(&pdev->dev, "clock ahb_ss acquired\n"); ss->reset = devm_reset_control_get_optional(&pdev->dev, "ahb"); - if (IS_ERR(ss->reset)) { - if (PTR_ERR(ss->reset) == -EPROBE_DEFER) - return PTR_ERR(ss->reset); + if (IS_ERR(ss->reset)) + return PTR_ERR(ss->reset); + if (!ss->reset) dev_info(&pdev->dev, "no reset control found\n"); - ss->reset = NULL; - } /* * Check that clock have the correct rates given in the datasheet @@ -459,7 +454,7 @@ static int sun4i_ss_probe(struct platform_device *pdev) * this info could be useful */ - err = pm_runtime_get_sync(ss->dev); + err = pm_runtime_resume_and_get(ss->dev); if (err < 0) goto error_pm; diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c index c1b4585e9bbc..d28292762b32 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c @@ -27,7 +27,7 @@ int sun4i_hash_crainit(struct crypto_tfm *tfm) algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash); op->ss = algt->ss; - err = pm_runtime_get_sync(op->ss->dev); + err = pm_runtime_resume_and_get(op->ss->dev); if (err < 0) return err; diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c index 443160a114bb..491fcb7b81b4 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c @@ -29,7 +29,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng); ss = algt->ss; - err = pm_runtime_get_sync(ss->dev); + err = pm_runtime_resume_and_get(ss->dev); if (err < 0) return err; diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index 33707a2e55ff..54ae8d16e493 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -240,11 +240,14 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req theend_sgs: if (areq->src == areq->dst) { - dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL); + dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src), + DMA_BIDIRECTIONAL); } else { if (nr_sgs > 0) - dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE); - dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE); + dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src), + DMA_TO_DEVICE); + dma_unmap_sg(ce->dev, areq->dst, sg_nents(areq->dst), + DMA_FROM_DEVICE); } theend_iv: diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c index 158422ff5695..00194d1d9ae6 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c @@ -932,7 +932,7 @@ static int sun8i_ce_probe(struct platform_device *pdev) if (err) goto error_alg; - err = pm_runtime_get_sync(ce->dev); + err = pm_runtime_resume_and_get(ce->dev); if (err < 0) goto error_alg; diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c index 2f09a37306e2..88194718a806 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c @@ -405,7 +405,8 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm)); dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE); - dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE); + dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src), + DMA_TO_DEVICE); dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE); diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c index cfde9ee4356b..cd1baee424a1 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c @@ -99,6 +99,7 @@ int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src, dma_iv = dma_map_single(ce->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE); if (dma_mapping_error(ce->dev, dma_iv)) { dev_err(ce->dev, "Cannot DMA MAP IV\n"); + err = -EFAULT; goto err_iv; } diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index ed2a69f82e1c..9ef1c85c4aaa 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -232,10 +232,13 @@ static int sun8i_ss_cipher(struct skcipher_request *areq) theend_sgs: if (areq->src == areq->dst) { - dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL); + dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src), + DMA_BIDIRECTIONAL); } else { - dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE); - dma_unmap_sg(ss->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE); + dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src), + DMA_TO_DEVICE); + dma_unmap_sg(ss->dev, areq->dst, sg_nents(areq->dst), + DMA_FROM_DEVICE); } theend_iv: @@ -351,7 +354,7 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm) op->enginectx.op.prepare_request = NULL; op->enginectx.op.unprepare_request = NULL; - err = pm_runtime_get_sync(op->ss->dev); + err = pm_runtime_resume_and_get(op->ss->dev); if (err < 0) { dev_err(op->ss->dev, "pm error %d\n", err); goto error_pm; diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c index e0ddc684798d..80e89066dbd1 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c @@ -753,7 +753,7 @@ static int sun8i_ss_probe(struct platform_device *pdev) if (err) goto error_alg; - err = pm_runtime_get_sync(ss->dev); + err = pm_runtime_resume_and_get(ss->dev); if (err < 0) goto error_alg; diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c index 11cbcbc83a7b..3c073eb3db03 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c @@ -348,8 +348,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) bf = (__le32 *)pad; result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA); - if (!result) + if (!result) { + kfree(pad); return -ENOMEM; + } for (i = 0; i < MAX_SG; i++) { rctx->t_dst[i].addr = 0; @@ -432,14 +434,14 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) err = sun8i_ss_run_hash_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm)); dma_unmap_single(ss->dev, addr_pad, j * 4, DMA_TO_DEVICE); - dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE); + dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src), + DMA_TO_DEVICE); dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE); - kfree(pad); - memcpy(areq->result, result, algt->alg.hash.halg.digestsize); - kfree(result); theend: + kfree(pad); + kfree(result); crypto_finalize_hash_request(engine, breq, err); return 0; } diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c index 08a1473b2145..3191527928e4 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c @@ -103,7 +103,8 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, dma_iv = dma_map_single(ss->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE); if (dma_mapping_error(ss->dev, dma_iv)) { dev_err(ss->dev, "Cannot DMA MAP IV\n"); - return -EFAULT; + err = -EFAULT; + goto err_free; } dma_dst = dma_map_single(ss->dev, d, todo, DMA_FROM_DEVICE); @@ -167,6 +168,7 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, memcpy(ctx->seed, d + dlen, ctx->slen); } memzero_explicit(d, todo); +err_free: kfree(d); return err; diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c index a3fa849b139a..ded732242732 100644 --- a/drivers/crypto/amcc/crypto4xx_alg.c +++ b/drivers/crypto/amcc/crypto4xx_alg.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/** +/* * AMCC SoC PPC4xx Crypto Driver * * Copyright (c) 2008 Applied Micro Circuits Corporation. @@ -115,7 +115,7 @@ int crypto4xx_decrypt_iv_block(struct skcipher_request *req) return crypto4xx_crypt(req, AES_IV_SIZE, true, true); } -/** +/* * AES Functions */ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher, @@ -374,7 +374,7 @@ static int crypto4xx_aead_setup_fallback(struct crypto4xx_ctx *ctx, return crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen); } -/** +/* * AES-CCM Functions */ @@ -489,7 +489,7 @@ int crypto4xx_setauthsize_aead(struct crypto_aead *cipher, return crypto_aead_setauthsize(ctx->sw_cipher.aead, authsize); } -/** +/* * AES-GCM Functions */ @@ -617,7 +617,7 @@ int crypto4xx_decrypt_aes_gcm(struct aead_request *req) return crypto4xx_crypt_aes_gcm(req, true); } -/** +/* * HASH SHA1 Functions */ static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm, @@ -711,7 +711,7 @@ int crypto4xx_hash_digest(struct ahash_request *req) ctx->sa_len, 0, NULL); } -/** +/* * SHA1 Algorithm */ int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm) diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index 8d1b918a0533..8278d98074e9 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/** +/* * AMCC SoC PPC4xx Crypto Driver * * Copyright (c) 2008 Applied Micro Circuits Corporation. @@ -44,7 +44,7 @@ #define PPC4XX_SEC_VERSION_STR "0.5" -/** +/* * PPC4xx Crypto Engine Initialization Routine */ static void crypto4xx_hw_init(struct crypto4xx_device *dev) @@ -159,7 +159,7 @@ void crypto4xx_free_sa(struct crypto4xx_ctx *ctx) ctx->sa_len = 0; } -/** +/* * alloc memory for the gather ring * no need to alloc buf for the ring * gdr_tail, gdr_head and gdr_count are initialized by this function @@ -268,7 +268,7 @@ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx) return tail; } -/** +/* * alloc memory for the gather ring * no need to alloc buf for the ring * gdr_tail, gdr_head and gdr_count are initialized by this function @@ -346,7 +346,7 @@ static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev, return &dev->gdr[idx]; } -/** +/* * alloc memory for the scatter ring * need to alloc buf for the ring * sdr_tail, sdr_head and sdr_count are initialized by this function @@ -930,7 +930,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req, return is_busy ? -EBUSY : -EINPROGRESS; } -/** +/* * Algorithm Registration Functions */ static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg, @@ -1097,7 +1097,7 @@ static void crypto4xx_bh_tasklet_cb(unsigned long data) } while (head != tail); } -/** +/* * Top Half of isr. */ static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data, @@ -1186,7 +1186,7 @@ static int crypto4xx_prng_seed(struct crypto_rng *tfm, const u8 *seed, return 0; } -/** +/* * Supported Crypto Algorithms */ static struct crypto4xx_alg_common crypto4xx_alg[] = { @@ -1369,7 +1369,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = { } }, }; -/** +/* * Module Initialization Routine */ static int crypto4xx_probe(struct platform_device *ofdev) diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h index a4e25b46cd0a..56c10668c0ab 100644 --- a/drivers/crypto/amcc/crypto4xx_core.h +++ b/drivers/crypto/amcc/crypto4xx_core.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/** +/* * AMCC SoC PPC4xx Crypto Driver * * Copyright (c) 2008 Applied Micro Circuits Corporation. @@ -188,7 +188,7 @@ int crypto4xx_hash_final(struct ahash_request *req); int crypto4xx_hash_update(struct ahash_request *req); int crypto4xx_hash_init(struct ahash_request *req); -/** +/* * Note: Only use this function to copy items that is word aligned. */ static inline void crypto4xx_memcpy_swab32(u32 *dst, const void *buf, diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h index c4c0a1a75941..1038061224da 100644 --- a/drivers/crypto/amcc/crypto4xx_reg_def.h +++ b/drivers/crypto/amcc/crypto4xx_reg_def.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/** +/* * AMCC SoC PPC4xx Crypto Driver * * Copyright (c) 2008 Applied Micro Circuits Corporation. @@ -104,7 +104,7 @@ #define CRYPTO4XX_PRNG_LFSR_L 0x00070030 #define CRYPTO4XX_PRNG_LFSR_H 0x00070034 -/** +/* * Initialize CRYPTO ENGINE registers, and memory bases. */ #define PPC4XX_PDR_POLL 0x3ff @@ -123,7 +123,7 @@ #define PPC4XX_INT_TIMEOUT_CNT 0 #define PPC4XX_INT_TIMEOUT_CNT_REVB 0x3FF #define PPC4XX_INT_CFG 1 -/** +/* * all follow define are ad hoc */ #define PPC4XX_RING_RETRY 100 @@ -131,7 +131,7 @@ #define PPC4XX_SDR_SIZE PPC4XX_NUM_SD #define PPC4XX_GDR_SIZE PPC4XX_NUM_GD -/** +/* * Generic Security Association (SA) with all possible fields. These will * never likely used except for reference purpose. These structure format * can be not changed as the hardware expects them to be layout as defined. diff --git a/drivers/crypto/amcc/crypto4xx_sa.h b/drivers/crypto/amcc/crypto4xx_sa.h index fe756abfc19f..e98e4e7abbad 100644 --- a/drivers/crypto/amcc/crypto4xx_sa.h +++ b/drivers/crypto/amcc/crypto4xx_sa.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/** +/* * AMCC SoC PPC4xx Crypto Driver * * Copyright (c) 2008 Applied Micro Circuits Corporation. @@ -14,7 +14,7 @@ #define AES_IV_SIZE 16 -/** +/* * Contents of Dynamic Security Association (SA) with all possible fields */ union dynamic_sa_contents { @@ -122,7 +122,7 @@ union sa_command_0 { #define SA_AES_KEY_LEN_256 4 #define SA_REV2 1 -/** +/* * The follow defines bits sa_command_1 * In Basic hash mode this bit define simple hash or hmac. * In IPsec mode, this bit define muting control. @@ -172,7 +172,7 @@ struct dynamic_sa_ctl { union sa_command_1 sa_command_1; } __attribute__((packed)); -/** +/* * State Record for Security Association (SA) */ struct sa_state_record { @@ -184,7 +184,7 @@ struct sa_state_record { }; } __attribute__((packed)); -/** +/* * Security Association (SA) for AES128 * */ @@ -213,7 +213,7 @@ struct dynamic_sa_aes192 { #define SA_AES192_LEN (sizeof(struct dynamic_sa_aes192)/4) #define SA_AES192_CONTENTS 0x3e000062 -/** +/* * Security Association (SA) for AES256 */ struct dynamic_sa_aes256 { @@ -228,7 +228,7 @@ struct dynamic_sa_aes256 { #define SA_AES256_CONTENTS 0x3e000082 #define SA_AES_CONTENTS 0x3e000002 -/** +/* * Security Association (SA) for AES128 CCM */ struct dynamic_sa_aes128_ccm { @@ -242,7 +242,7 @@ struct dynamic_sa_aes128_ccm { #define SA_AES128_CCM_CONTENTS 0x3e000042 #define SA_AES_CCM_CONTENTS 0x3e000002 -/** +/* * Security Association (SA) for AES128_GCM */ struct dynamic_sa_aes128_gcm { @@ -258,7 +258,7 @@ struct dynamic_sa_aes128_gcm { #define SA_AES128_GCM_CONTENTS 0x3e000442 #define SA_AES_GCM_CONTENTS 0x3e000402 -/** +/* * Security Association (SA) for HASH160: HMAC-SHA1 */ struct dynamic_sa_hash160 { diff --git a/drivers/crypto/amcc/crypto4xx_trng.h b/drivers/crypto/amcc/crypto4xx_trng.h index 3af732f25c1c..7356716274cb 100644 --- a/drivers/crypto/amcc/crypto4xx_trng.h +++ b/drivers/crypto/amcc/crypto4xx_trng.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ -/** +/* * AMCC SoC PPC4xx Crypto Driver * * Copyright (c) 2008 Applied Micro Circuits Corporation. diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c index 8b5e07316352..c6865cbd334b 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c +++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c @@ -236,10 +236,10 @@ static int meson_cipher(struct skcipher_request *areq) dma_unmap_single(mc->dev, phykeyiv, keyivlen, DMA_TO_DEVICE); if (areq->src == areq->dst) { - dma_unmap_sg(mc->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL); + dma_unmap_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_BIDIRECTIONAL); } else { - dma_unmap_sg(mc->dev, areq->src, nr_sgs, DMA_TO_DEVICE); - dma_unmap_sg(mc->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE); + dma_unmap_sg(mc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); + dma_unmap_sg(mc->dev, areq->dst, sg_nents(areq->dst), DMA_FROM_DEVICE); } if (areq->iv && ivsize > 0) { diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c index 5bbeff433c8c..6e7ae896717c 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-core.c +++ b/drivers/crypto/amlogic/amlogic-gxl-core.c @@ -217,9 +217,6 @@ static int meson_crypto_probe(struct platform_device *pdev) struct meson_dev *mc; int err, i; - if (!pdev->dev.of_node) - return -ENODEV; - mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL); if (!mc) return -ENOMEM; diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c index 9bd8e5167be3..333fbefbbccb 100644 --- a/drivers/crypto/atmel-ecc.c +++ b/drivers/crypto/atmel-ecc.c @@ -26,7 +26,7 @@ static struct atmel_ecc_driver_data driver_data; /** - * atmel_ecdh_ctx - transformation context + * struct atmel_ecdh_ctx - transformation context * @client : pointer to i2c client device * @fallback : used for unsupported curves or when user wants to use its own * private key. @@ -34,7 +34,6 @@ static struct atmel_ecc_driver_data driver_data; * of the user to not call set_secret() while * generate_public_key() or compute_shared_secret() are in flight. * @curve_id : elliptic curve id - * @n_sz : size in bytes of the n prime * @do_fallback: true when the device doesn't support the curve or when the user * wants to use its own private key. */ @@ -43,7 +42,6 @@ struct atmel_ecdh_ctx { struct crypto_kpp *fallback; const u8 *public_key; unsigned int curve_id; - size_t n_sz; bool do_fallback; }; @@ -51,7 +49,6 @@ static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq, int status) { struct kpp_request *req = areq; - struct atmel_ecdh_ctx *ctx = work_data->ctx; struct atmel_i2c_cmd *cmd = &work_data->cmd; size_t copied, n_sz; @@ -59,7 +56,7 @@ static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq, goto free_work_data; /* might want less than we've got */ - n_sz = min_t(size_t, ctx->n_sz, req->dst_len); + n_sz = min_t(size_t, ATMEL_ECC_NIST_P256_N_SIZE, req->dst_len); /* copy the shared secret */ copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, n_sz), @@ -73,14 +70,6 @@ static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq, kpp_request_complete(req, status); } -static unsigned int atmel_ecdh_supported_curve(unsigned int curve_id) -{ - if (curve_id == ECC_CURVE_NIST_P256) - return ATMEL_ECC_NIST_P256_N_SIZE; - - return 0; -} - /* * A random private key is generated and stored in the device. The device * returns the pair public key. @@ -104,8 +93,7 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, return -EINVAL; } - ctx->n_sz = atmel_ecdh_supported_curve(params.curve_id); - if (!ctx->n_sz || params.key_size) { + if (params.key_size) { /* fallback to ecdh software implementation */ ctx->do_fallback = true; return crypto_kpp_set_secret(ctx->fallback, buf, len); @@ -125,7 +113,6 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, goto free_cmd; ctx->do_fallback = false; - ctx->curve_id = params.curve_id; atmel_i2c_init_genkey_cmd(cmd, DATA_SLOT_2); @@ -263,6 +250,7 @@ static int atmel_ecdh_init_tfm(struct crypto_kpp *tfm) struct crypto_kpp *fallback; struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm); + ctx->curve_id = ECC_CURVE_NIST_P256; ctx->client = atmel_ecc_i2c_client_alloc(); if (IS_ERR(ctx->client)) { pr_err("tfm - i2c_client binding failed\n"); @@ -306,7 +294,7 @@ static unsigned int atmel_ecdh_max_size(struct crypto_kpp *tfm) return ATMEL_ECC_PUBKEY_SIZE; } -static struct kpp_alg atmel_ecdh = { +static struct kpp_alg atmel_ecdh_nist_p256 = { .set_secret = atmel_ecdh_set_secret, .generate_public_key = atmel_ecdh_generate_public_key, .compute_shared_secret = atmel_ecdh_compute_shared_secret, @@ -315,7 +303,7 @@ static struct kpp_alg atmel_ecdh = { .max_size = atmel_ecdh_max_size, .base = { .cra_flags = CRYPTO_ALG_NEED_FALLBACK, - .cra_name = "ecdh", + .cra_name = "ecdh-nist-p256", .cra_driver_name = "atmel-ecdh", .cra_priority = ATMEL_ECC_PRIORITY, .cra_module = THIS_MODULE, @@ -340,14 +328,14 @@ static int atmel_ecc_probe(struct i2c_client *client, &driver_data.i2c_client_list); spin_unlock(&driver_data.i2c_list_lock); - ret = crypto_register_kpp(&atmel_ecdh); + ret = crypto_register_kpp(&atmel_ecdh_nist_p256); if (ret) { spin_lock(&driver_data.i2c_list_lock); list_del(&i2c_priv->i2c_client_list_node); spin_unlock(&driver_data.i2c_list_lock); dev_err(&client->dev, "%s alg registration failed\n", - atmel_ecdh.base.cra_driver_name); + atmel_ecdh_nist_p256.base.cra_driver_name); } else { dev_info(&client->dev, "atmel ecc algorithms registered in /proc/crypto\n"); } @@ -365,7 +353,7 @@ static int atmel_ecc_remove(struct i2c_client *client) return -EBUSY; } - crypto_unregister_kpp(&atmel_ecdh); + crypto_unregister_kpp(&atmel_ecdh_nist_p256); spin_lock(&driver_data.i2c_list_lock); list_del(&i2c_priv->i2c_client_list_node); diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c index e8e8281e027d..6fd3e969211d 100644 --- a/drivers/crypto/atmel-i2c.c +++ b/drivers/crypto/atmel-i2c.c @@ -339,7 +339,7 @@ int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) } if (bus_clk_rate > 1000000L) { - dev_err(dev, "%d exceeds maximum supported clock frequency (1MHz)\n", + dev_err(dev, "%u exceeds maximum supported clock frequency (1MHz)\n", bus_clk_rate); return -EINVAL; } diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 352d80cb5ae9..1b13f601fd95 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -434,7 +434,7 @@ static int atmel_sha_init(struct ahash_request *req) ctx->flags = 0; - dev_dbg(dd->dev, "init: digest size: %d\n", + dev_dbg(dd->dev, "init: digest size: %u\n", crypto_ahash_digestsize(tfm)); switch (crypto_ahash_digestsize(tfm)) { @@ -1102,7 +1102,7 @@ static int atmel_sha_start(struct atmel_sha_dev *dd) struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); int err; - dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", + dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %u\n", ctx->op, req->nbytes); err = atmel_sha_hw_init(dd); diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index 4d63cb13a54f..6f01c51e3c37 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c @@ -1217,7 +1217,6 @@ static int atmel_tdes_probe(struct platform_device *pdev) tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res); if (IS_ERR(tdes_dd->io_base)) { - dev_err(dev, "can't ioremap\n"); err = PTR_ERR(tdes_dd->io_base); goto err_tasklet_kill; } diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 851b149f7170..053315e260c2 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -1019,6 +1019,7 @@ static void handle_ahash_resp(struct iproc_reqctx_s *rctx) * a SPU response message for an AEAD request. Includes buffers to catch SPU * message headers and the response data. * @mssg: mailbox message containing the receive sg + * @req: Crypto API request * @rctx: crypto request context * @rx_frag_num: number of scatterlist elements required to hold the * SPU response message @@ -2952,9 +2953,9 @@ static int aead_gcm_esp_setkey(struct crypto_aead *cipher, /** * rfc4543_gcm_esp_setkey() - setkey operation for RFC4543 variant of GCM/GMAC. - * cipher: AEAD structure - * key: Key followed by 4 bytes of salt - * keylen: Length of key plus salt, in bytes + * @cipher: AEAD structure + * @key: Key followed by 4 bytes of salt + * @keylen: Length of key plus salt, in bytes * * Extracts salt from key and stores it to be prepended to IV on each request. * Digest is always 16 bytes diff --git a/drivers/crypto/bcm/spu.c b/drivers/crypto/bcm/spu.c index 007abf92cc05..6283e8c6d51d 100644 --- a/drivers/crypto/bcm/spu.c +++ b/drivers/crypto/bcm/spu.c @@ -457,7 +457,7 @@ u16 spum_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode, * @cipher_mode: Algo type * @data_size: Length of plaintext (bytes) * - * @Return: Length of padding, in bytes + * Return: Length of padding, in bytes */ u32 spum_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode, unsigned int data_size) @@ -510,10 +510,10 @@ u32 spum_assoc_resp_len(enum spu_cipher_mode cipher_mode, } /** - * spu_aead_ivlen() - Calculate the length of the AEAD IV to be included + * spum_aead_ivlen() - Calculate the length of the AEAD IV to be included * in a SPU request after the AAD and before the payload. * @cipher_mode: cipher mode - * @iv_ctr_len: initialization vector length in bytes + * @iv_len: initialization vector length in bytes * * In Linux ~4.2 and later, the assoc_data sg includes the IV. So no need * to include the IV as a separate field in the SPU request msg. @@ -543,9 +543,9 @@ enum hash_type spum_hash_type(u32 src_sent) /** * spum_digest_size() - Determine the size of a hash digest to expect the SPU to * return. - * alg_digest_size: Number of bytes in the final digest for the given algo - * alg: The hash algorithm - * htype: Type of hash operation (init, update, full, etc) + * @alg_digest_size: Number of bytes in the final digest for the given algo + * @alg: The hash algorithm + * @htype: Type of hash operation (init, update, full, etc) * * When doing incremental hashing for an algorithm with a truncated hash * (e.g., SHA224), the SPU returns the full digest so that it can be fed back as @@ -580,7 +580,7 @@ u32 spum_digest_size(u32 alg_digest_size, enum hash_alg alg, * @aead_parms: Parameters related to AEAD operation * @data_size: Length of data to be encrypted or authenticated. If AEAD, does * not include length of AAD. - + * * Return: the length of the SPU header in bytes. 0 if an error occurs. */ u32 spum_create_request(u8 *spu_hdr, @@ -911,7 +911,7 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms) * setkey() time in spu_cipher_req_init(). * @spu_hdr: Start of the request message header (MH field) * @spu_req_hdr_len: Length in bytes of the SPU request header - * @isInbound: 0 encrypt, 1 decrypt + * @is_inbound: 0 encrypt, 1 decrypt * @cipher_parms: Parameters describing cipher operation to be performed * @data_size: Length of the data in the BD field * diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c index 2db35b5ccaa2..07989bb8c220 100644 --- a/drivers/crypto/bcm/spu2.c +++ b/drivers/crypto/bcm/spu2.c @@ -543,7 +543,8 @@ void spu2_dump_msg_hdr(u8 *buf, unsigned int buf_len) /** * spu2_fmd_init() - At setkey time, initialize the fixed meta data for * subsequent skcipher requests for this context. - * @spu2_cipher_type: Cipher algorithm + * @fmd: Start of FMD field to be written + * @spu2_type: Cipher algorithm * @spu2_mode: Cipher mode * @cipher_key_len: Length of cipher key, in bytes * @cipher_iv_len: Length of cipher initialization vector, in bytes @@ -598,7 +599,7 @@ static int spu2_fmd_init(struct SPU2_FMD *fmd, * SPU request packet. * @fmd: Start of FMD field to be written * @is_inbound: true if decrypting. false if encrypting. - * @authFirst: true if alg authenticates before encrypting + * @auth_first: true if alg authenticates before encrypting * @protocol: protocol selector * @cipher_type: cipher algorithm * @cipher_mode: cipher mode @@ -640,6 +641,7 @@ static void spu2_fmd_ctrl0_write(struct SPU2_FMD *fmd, * spu2_fmd_ctrl1_write() - Write ctrl1 field in fixed metadata (FMD) field of * SPU request packet. * @fmd: Start of FMD field to be written + * @is_inbound: true if decrypting. false if encrypting. * @assoc_size: Length of additional associated data, in bytes * @auth_key_len: Length of authentication key, in bytes * @cipher_key_len: Length of cipher key, in bytes @@ -793,7 +795,7 @@ u32 spu2_ctx_max_payload(enum spu_cipher_alg cipher_alg, } /** - * spu_payload_length() - Given a SPU2 message header, extract the payload + * spu2_payload_length() - Given a SPU2 message header, extract the payload * length. * @spu_hdr: Start of SPU message header (FMD) * @@ -812,10 +814,11 @@ u32 spu2_payload_length(u8 *spu_hdr) } /** - * spu_response_hdr_len() - Determine the expected length of a SPU response + * spu2_response_hdr_len() - Determine the expected length of a SPU response * header. * @auth_key_len: Length of authentication key, in bytes * @enc_key_len: Length of encryption key, in bytes + * @is_hash: Unused * * For SPU2, includes just FMD. OMD is never requested. * @@ -827,7 +830,7 @@ u16 spu2_response_hdr_len(u16 auth_key_len, u16 enc_key_len, bool is_hash) } /** - * spu_hash_pad_len() - Calculate the length of hash padding required to extend + * spu2_hash_pad_len() - Calculate the length of hash padding required to extend * data to a full block size. * @hash_alg: hash algorithm * @hash_mode: hash mode @@ -845,8 +848,10 @@ u16 spu2_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode, } /** - * spu2_gcm_ccm_padlen() - Determine the length of GCM/CCM padding for either + * spu2_gcm_ccm_pad_len() - Determine the length of GCM/CCM padding for either * the AAD field or the data. + * @cipher_mode: Unused + * @data_size: Unused * * Return: 0. Unlike SPU-M, SPU2 hardware does any GCM/CCM padding required. */ @@ -857,7 +862,7 @@ u32 spu2_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode, } /** - * spu_assoc_resp_len() - Determine the size of the AAD2 buffer needed to catch + * spu2_assoc_resp_len() - Determine the size of the AAD2 buffer needed to catch * associated data in a SPU2 output packet. * @cipher_mode: cipher mode * @assoc_len: length of additional associated data, in bytes @@ -878,11 +883,11 @@ u32 spu2_assoc_resp_len(enum spu_cipher_mode cipher_mode, return resp_len; } -/* - * spu_aead_ivlen() - Calculate the length of the AEAD IV to be included +/** + * spu2_aead_ivlen() - Calculate the length of the AEAD IV to be included * in a SPU request after the AAD and before the payload. * @cipher_mode: cipher mode - * @iv_ctr_len: initialization vector length in bytes + * @iv_len: initialization vector length in bytes * * For SPU2, AEAD IV is included in OMD and does not need to be repeated * prior to the payload. @@ -909,9 +914,9 @@ enum hash_type spu2_hash_type(u32 src_sent) /** * spu2_digest_size() - Determine the size of a hash digest to expect the SPU to * return. - * alg_digest_size: Number of bytes in the final digest for the given algo - * alg: The hash algorithm - * htype: Type of hash operation (init, update, full, etc) + * @alg_digest_size: Number of bytes in the final digest for the given algo + * @alg: The hash algorithm + * @htype: Type of hash operation (init, update, full, etc) * */ u32 spu2_digest_size(u32 alg_digest_size, enum hash_alg alg, @@ -921,7 +926,7 @@ u32 spu2_digest_size(u32 alg_digest_size, enum hash_alg alg, } /** - * spu_create_request() - Build a SPU2 request message header, includint FMD and + * spu2_create_request() - Build a SPU2 request message header, includint FMD and * OMD. * @spu_hdr: Start of buffer where SPU request header is to be written * @req_opts: SPU request message options @@ -1105,7 +1110,7 @@ u32 spu2_create_request(u8 *spu_hdr, } /** - * spu_cipher_req_init() - Build an skcipher SPU2 request message header, + * spu2_cipher_req_init() - Build an skcipher SPU2 request message header, * including FMD and OMD. * @spu_hdr: Location of start of SPU request (FMD field) * @cipher_parms: Parameters describing cipher request @@ -1162,11 +1167,11 @@ u16 spu2_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms) } /** - * spu_cipher_req_finish() - Finish building a SPU request message header for a + * spu2_cipher_req_finish() - Finish building a SPU request message header for a * block cipher request. * @spu_hdr: Start of the request message header (MH field) * @spu_req_hdr_len: Length in bytes of the SPU request header - * @isInbound: 0 encrypt, 1 decrypt + * @is_inbound: 0 encrypt, 1 decrypt * @cipher_parms: Parameters describing cipher operation to be performed * @data_size: Length of the data in the BD field * @@ -1222,7 +1227,7 @@ void spu2_cipher_req_finish(u8 *spu_hdr, } /** - * spu_request_pad() - Create pad bytes at the end of the data. + * spu2_request_pad() - Create pad bytes at the end of the data. * @pad_start: Start of buffer where pad bytes are to be written * @gcm_padding: Length of GCM padding, in bytes * @hash_pad_len: Number of bytes of padding extend data to full block @@ -1311,7 +1316,7 @@ u8 spu2_rx_status_len(void) } /** - * spu_status_process() - Process the status from a SPU response message. + * spu2_status_process() - Process the status from a SPU response message. * @statp: start of STATUS word * * Return: 0 - if status is good and response should be processed diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c index c4669a96eaec..d5d9cabea55a 100644 --- a/drivers/crypto/bcm/util.c +++ b/drivers/crypto/bcm/util.c @@ -119,8 +119,8 @@ int spu_sg_count(struct scatterlist *sg_list, unsigned int skip, int nbytes) * @from_skip: number of bytes to skip in from_sg. Non-zero when previous * request included part of the buffer in entry in from_sg. * Assumes from_skip < from_sg->length. - * @from_nents number of entries in from_sg - * @length number of bytes to copy. may reach this limit before exhausting + * @from_nents: number of entries in from_sg + * @length: number of bytes to copy. may reach this limit before exhausting * from_sg. * * Copies the entries themselves, not the data in the entries. Assumes to_sg has diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index a780e627838a..8b8ed77d8715 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -71,6 +71,9 @@ struct caam_skcipher_alg { * @adata: authentication algorithm details * @cdata: encryption algorithm details * @authsize: authentication tag (a.k.a. ICV / MAC) size + * @xts_key_fallback: true if fallback tfm needs to be used due + * to unsupported xts key lengths + * @fallback: xts fallback tfm */ struct caam_ctx { struct caam_flc flc[NUM_OP]; diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index dd5f101e43f8..e313233ec6de 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -187,7 +187,8 @@ static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err, } /** - * Count leading zeros, need it to strip, from a given scatterlist + * caam_rsa_count_leading_zeros - Count leading zeros, need it to strip, + * from a given scatterlist * * @sgl : scatterlist to count zeros from * @nbytes: number of zeros, in bytes, to strip diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c index 711b1acdd4e0..06ee42e8a245 100644 --- a/drivers/crypto/cavium/cpt/cptpf_main.c +++ b/drivers/crypto/cavium/cpt/cptpf_main.c @@ -10,7 +10,6 @@ #include #include #include -#include #include "cptpf.h" diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.c b/drivers/crypto/cavium/nitrox/nitrox_isr.c index 99b053094f5a..c288c4b51783 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_isr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_isr.c @@ -10,7 +10,7 @@ #include "nitrox_isr.h" #include "nitrox_mbx.h" -/** +/* * One vector for each type of ring * - NPS packet ring, AQMQ ring and ZQMQ ring */ @@ -216,7 +216,7 @@ static void nps_core_int_tasklet(unsigned long data) } } -/** +/* * nps_core_int_isr - interrupt handler for NITROX errors and * mailbox communication */ diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c index 53ef06792133..df95ba26b414 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c @@ -58,14 +58,15 @@ static void softreq_unmap_sgbufs(struct nitrox_softreq *sr) struct device *dev = DEV(ndev); - dma_unmap_sg(dev, sr->in.sg, sr->in.sgmap_cnt, DMA_BIDIRECTIONAL); + dma_unmap_sg(dev, sr->in.sg, sg_nents(sr->in.sg), + DMA_BIDIRECTIONAL); dma_unmap_single(dev, sr->in.sgcomp_dma, sr->in.sgcomp_len, DMA_TO_DEVICE); kfree(sr->in.sgcomp); sr->in.sg = NULL; sr->in.sgmap_cnt = 0; - dma_unmap_sg(dev, sr->out.sg, sr->out.sgmap_cnt, + dma_unmap_sg(dev, sr->out.sg, sg_nents(sr->out.sg), DMA_BIDIRECTIONAL); dma_unmap_single(dev, sr->out.sgcomp_dma, sr->out.sgcomp_len, DMA_TO_DEVICE); @@ -178,7 +179,7 @@ static int dma_map_inbufs(struct nitrox_softreq *sr, return 0; incomp_err: - dma_unmap_sg(dev, req->src, nents, DMA_BIDIRECTIONAL); + dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL); sr->in.sgmap_cnt = 0; return ret; } @@ -203,7 +204,7 @@ static int dma_map_outbufs(struct nitrox_softreq *sr, return 0; outcomp_map_err: - dma_unmap_sg(dev, req->dst, nents, DMA_BIDIRECTIONAL); + dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_BIDIRECTIONAL); sr->out.sgmap_cnt = 0; sr->out.sg = NULL; return ret; diff --git a/drivers/crypto/cavium/zip/common.h b/drivers/crypto/cavium/zip/common.h index 58fb3ed6e644..54f6fb054119 100644 --- a/drivers/crypto/cavium/zip/common.h +++ b/drivers/crypto/cavium/zip/common.h @@ -56,7 +56,6 @@ #include #include #include -#include /* Device specific zlib function definitions */ #include "zip_device.h" diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index 88275b4867ea..5976530c00a8 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -59,7 +59,7 @@ struct ccp_crypto_queue { #define CCP_CRYPTO_MAX_QLEN 100 static struct ccp_crypto_queue req_queue; -static spinlock_t req_queue_lock; +static DEFINE_SPINLOCK(req_queue_lock); struct ccp_crypto_cmd { struct list_head entry; @@ -410,7 +410,6 @@ static int ccp_crypto_init(void) return ret; } - spin_lock_init(&req_queue_lock); INIT_LIST_HEAD(&req_queue.cmds); req_queue.backlog = &req_queue.cmds; req_queue.cmd_count = 0; diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 0971ee60f840..6777582aa1ce 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c @@ -548,7 +548,7 @@ bool ccp_queues_suspended(struct ccp_device *ccp) return ccp->cmd_q_count == suspended; } -int ccp_dev_suspend(struct sp_device *sp) +void ccp_dev_suspend(struct sp_device *sp) { struct ccp_device *ccp = sp->ccp_data; unsigned long flags; @@ -556,7 +556,7 @@ int ccp_dev_suspend(struct sp_device *sp) /* If there's no device there's nothing to do */ if (!ccp) - return 0; + return; spin_lock_irqsave(&ccp->cmd_lock, flags); @@ -572,11 +572,9 @@ int ccp_dev_suspend(struct sp_device *sp) while (!ccp_queues_suspended(ccp)) wait_event_interruptible(ccp->suspend_queue, ccp_queues_suspended(ccp)); - - return 0; } -int ccp_dev_resume(struct sp_device *sp) +void ccp_dev_resume(struct sp_device *sp) { struct ccp_device *ccp = sp->ccp_data; unsigned long flags; @@ -584,7 +582,7 @@ int ccp_dev_resume(struct sp_device *sp) /* If there's no device there's nothing to do */ if (!ccp) - return 0; + return; spin_lock_irqsave(&ccp->cmd_lock, flags); @@ -597,8 +595,6 @@ int ccp_dev_resume(struct sp_device *sp) } spin_unlock_irqrestore(&ccp->cmd_lock, flags); - - return 0; } int ccp_dev_init(struct sp_device *sp) diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index d6a8f4e4b14a..bb88198c874e 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -2418,7 +2418,6 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) dst.address += CCP_ECC_OUTPUT_SIZE; ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0, CCP_ECC_MODULUS_BYTES); - dst.address += CCP_ECC_OUTPUT_SIZE; /* Restore the workarea address */ dst.address = save; diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index cb9b4c4e371e..da3872c48308 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -21,6 +21,7 @@ #include #include #include +#include #include @@ -972,6 +973,11 @@ int sev_dev_init(struct psp_device *psp) struct sev_device *sev; int ret = -ENOMEM; + if (!boot_cpu_has(X86_FEATURE_SEV)) { + dev_info_once(dev, "SEV: memory encryption not enabled by BIOS\n"); + return 0; + } + sev = devm_kzalloc(dev, sizeof(*sev), GFP_KERNEL); if (!sev) goto e_err; diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c index 6284a15e5047..7eb3e4668286 100644 --- a/drivers/crypto/ccp/sp-dev.c +++ b/drivers/crypto/ccp/sp-dev.c @@ -213,12 +213,8 @@ void sp_destroy(struct sp_device *sp) int sp_suspend(struct sp_device *sp) { - int ret; - if (sp->dev_vdata->ccp_vdata) { - ret = ccp_dev_suspend(sp); - if (ret) - return ret; + ccp_dev_suspend(sp); } return 0; @@ -226,12 +222,8 @@ int sp_suspend(struct sp_device *sp) int sp_resume(struct sp_device *sp) { - int ret; - if (sp->dev_vdata->ccp_vdata) { - ret = ccp_dev_resume(sp); - if (ret) - return ret; + ccp_dev_resume(sp); } return 0; diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h index 0218d0670eee..20377e67f65d 100644 --- a/drivers/crypto/ccp/sp-dev.h +++ b/drivers/crypto/ccp/sp-dev.h @@ -134,8 +134,8 @@ struct sp_device *sp_get_psp_master_device(void); int ccp_dev_init(struct sp_device *sp); void ccp_dev_destroy(struct sp_device *sp); -int ccp_dev_suspend(struct sp_device *sp); -int ccp_dev_resume(struct sp_device *sp); +void ccp_dev_suspend(struct sp_device *sp); +void ccp_dev_resume(struct sp_device *sp); #else /* !CONFIG_CRYPTO_DEV_SP_CCP */ @@ -144,15 +144,8 @@ static inline int ccp_dev_init(struct sp_device *sp) return 0; } static inline void ccp_dev_destroy(struct sp_device *sp) { } - -static inline int ccp_dev_suspend(struct sp_device *sp) -{ - return 0; -} -static inline int ccp_dev_resume(struct sp_device *sp) -{ - return 0; -} +static inline void ccp_dev_suspend(struct sp_device *sp) { } +static inline void ccp_dev_resume(struct sp_device *sp) { } #endif /* CONFIG_CRYPTO_DEV_SP_CCP */ #ifdef CONFIG_CRYPTO_DEV_SP_PSP diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index f471dbaef1fb..f468594ef8af 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -356,6 +356,7 @@ static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] }, { PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] }, { PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] }, + { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[4] }, /* Last entry must be zero */ { 0, } }; diff --git a/drivers/crypto/ccp/tee-dev.c b/drivers/crypto/ccp/tee-dev.c index 5e697a90ea7f..5c9d47f3be37 100644 --- a/drivers/crypto/ccp/tee-dev.c +++ b/drivers/crypto/ccp/tee-dev.c @@ -5,7 +5,7 @@ * Author: Rijo Thomas * Author: Devaraj Rangasamy * - * Copyright 2019 Advanced Micro Devices, Inc. + * Copyright (C) 2019,2021 Advanced Micro Devices, Inc. */ #include @@ -36,6 +36,7 @@ static int tee_alloc_ring(struct psp_tee_device *tee, int ring_size) if (!start_addr) return -ENOMEM; + memset(start_addr, 0x0, ring_size); rb_mgr->ring_start = start_addr; rb_mgr->ring_size = ring_size; rb_mgr->ring_pa = __psp_pa(start_addr); @@ -244,41 +245,54 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id, void *buf, size_t len, struct tee_ring_cmd **resp) { struct tee_ring_cmd *cmd; - u32 rptr, wptr; int nloop = 1000, ret = 0; + u32 rptr; *resp = NULL; mutex_lock(&tee->rb_mgr.mutex); - wptr = tee->rb_mgr.wptr; - - /* Check if ring buffer is full */ + /* Loop until empty entry found in ring buffer */ do { + /* Get pointer to ring buffer command entry */ + cmd = (struct tee_ring_cmd *) + (tee->rb_mgr.ring_start + tee->rb_mgr.wptr); + rptr = ioread32(tee->io_regs + tee->vdata->ring_rptr_reg); - if (!(wptr + sizeof(struct tee_ring_cmd) == rptr)) + /* Check if ring buffer is full or command entry is waiting + * for response from TEE + */ + if (!(tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr || + cmd->flag == CMD_WAITING_FOR_RESPONSE)) break; - dev_info(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n", - rptr, wptr); + dev_dbg(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n", + rptr, tee->rb_mgr.wptr); - /* Wait if ring buffer is full */ + /* Wait if ring buffer is full or TEE is processing data */ mutex_unlock(&tee->rb_mgr.mutex); schedule_timeout_interruptible(msecs_to_jiffies(10)); mutex_lock(&tee->rb_mgr.mutex); } while (--nloop); - if (!nloop && (wptr + sizeof(struct tee_ring_cmd) == rptr)) { - dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n", - rptr, wptr); + if (!nloop && + (tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr || + cmd->flag == CMD_WAITING_FOR_RESPONSE)) { + dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u response flag %u\n", + rptr, tee->rb_mgr.wptr, cmd->flag); ret = -EBUSY; goto unlock; } - /* Pointer to empty data entry in ring buffer */ - cmd = (struct tee_ring_cmd *)(tee->rb_mgr.ring_start + wptr); + /* Do not submit command if PSP got disabled while processing any + * command in another thread + */ + if (psp_dead) { + ret = -EBUSY; + goto unlock; + } /* Write command data into ring buffer */ cmd->cmd_id = cmd_id; @@ -286,6 +300,9 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id, memset(&cmd->buf[0], 0, sizeof(cmd->buf)); memcpy(&cmd->buf[0], buf, len); + /* Indicate driver is waiting for response */ + cmd->flag = CMD_WAITING_FOR_RESPONSE; + /* Update local copy of write pointer */ tee->rb_mgr.wptr += sizeof(struct tee_ring_cmd); if (tee->rb_mgr.wptr >= tee->rb_mgr.ring_size) @@ -309,14 +326,14 @@ static int tee_wait_cmd_completion(struct psp_tee_device *tee, struct tee_ring_cmd *resp, unsigned int timeout) { - /* ~5ms sleep per loop => nloop = timeout * 200 */ - int nloop = timeout * 200; + /* ~1ms sleep per loop => nloop = timeout * 1000 */ + int nloop = timeout * 1000; while (--nloop) { if (resp->cmd_state == TEE_CMD_STATE_COMPLETED) return 0; - usleep_range(5000, 5100); + usleep_range(1000, 1100); } dev_err(tee->dev, "tee: command 0x%x timed out, disabling PSP\n", @@ -353,12 +370,16 @@ int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len, return ret; ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_TIMEOUT); - if (ret) + if (ret) { + resp->flag = CMD_RESPONSE_TIMEDOUT; return ret; + } memcpy(buf, &resp->buf[0], len); *status = resp->status; + resp->flag = CMD_RESPONSE_COPIED; + return 0; } EXPORT_SYMBOL(psp_tee_process_cmd); diff --git a/drivers/crypto/ccp/tee-dev.h b/drivers/crypto/ccp/tee-dev.h index f09960112115..49d26158b71e 100644 --- a/drivers/crypto/ccp/tee-dev.h +++ b/drivers/crypto/ccp/tee-dev.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: MIT */ /* - * Copyright 2019 Advanced Micro Devices, Inc. + * Copyright (C) 2019,2021 Advanced Micro Devices, Inc. * * Author: Rijo Thomas * Author: Devaraj Rangasamy @@ -18,7 +18,7 @@ #include #define TEE_DEFAULT_TIMEOUT 10 -#define MAX_BUFFER_SIZE 992 +#define MAX_BUFFER_SIZE 988 /** * enum tee_ring_cmd_id - TEE interface commands for ring buffer configuration @@ -81,6 +81,20 @@ enum tee_cmd_state { TEE_CMD_STATE_COMPLETED, }; +/** + * enum cmd_resp_state - TEE command's response status maintained by driver + * @CMD_RESPONSE_INVALID: initial state when no command is written to ring + * @CMD_WAITING_FOR_RESPONSE: driver waiting for response from TEE + * @CMD_RESPONSE_TIMEDOUT: failed to get response from TEE + * @CMD_RESPONSE_COPIED: driver has copied response from TEE + */ +enum cmd_resp_state { + CMD_RESPONSE_INVALID, + CMD_WAITING_FOR_RESPONSE, + CMD_RESPONSE_TIMEDOUT, + CMD_RESPONSE_COPIED, +}; + /** * struct tee_ring_cmd - Structure of the command buffer in TEE ring * @cmd_id: refers to &enum tee_cmd_id. Command id for the ring buffer @@ -91,6 +105,7 @@ enum tee_cmd_state { * @pdata: private data (currently unused) * @res1: reserved region * @buf: TEE command specific buffer + * @flag: refers to &enum cmd_resp_state */ struct tee_ring_cmd { u32 cmd_id; @@ -100,6 +115,7 @@ struct tee_ring_cmd { u64 pdata; u32 res1[2]; u8 buf[MAX_BUFFER_SIZE]; + u32 flag; /* Total size: 1024 bytes */ } __packed; diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index d0e59e942568..e599ac6dc162 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -352,10 +352,8 @@ static int init_cc_resources(struct platform_device *plat_dev) req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0); /* Map registers space */ new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs); - if (IS_ERR(new_drvdata->cc_base)) { - dev_err(dev, "Failed to ioremap registers"); + if (IS_ERR(new_drvdata->cc_base)) return PTR_ERR(new_drvdata->cc_base); - } dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name, req_mem_cc_regs); diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index f5a336634daa..6933546f87b1 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -126,11 +126,6 @@ static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx) return container_of(ctx->dev, struct uld_ctx, dev); } -static inline int is_ofld_imm(const struct sk_buff *skb) -{ - return (skb->len <= SGE_MAX_WR_LEN); -} - static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx) { memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr)); @@ -769,13 +764,14 @@ static inline void create_wreq(struct chcr_context *ctx, struct uld_ctx *u_ctx = ULD_CTX(ctx); unsigned int tx_channel_id, rx_channel_id; unsigned int txqidx = 0, rxqidx = 0; - unsigned int qid, fid; + unsigned int qid, fid, portno; get_qidxs(req, &txqidx, &rxqidx); qid = u_ctx->lldi.rxq_ids[rxqidx]; fid = u_ctx->lldi.rxq_ids[0]; + portno = rxqidx / ctx->rxq_perchan; tx_channel_id = txqidx / ctx->txq_perchan; - rx_channel_id = rxqidx / ctx->rxq_perchan; + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]); chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE; @@ -797,15 +793,13 @@ static inline void create_wreq(struct chcr_context *ctx, /** * create_cipher_wr - form the WR for cipher operations - * @req: cipher req. - * @ctx: crypto driver context of the request. - * @qid: ingress qid where response of this WR should be received. - * @op_type: encryption or decryption + * @wrparam: Container for create_cipher_wr()'s parameters */ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req); struct chcr_context *ctx = c_ctx(tfm); + struct uld_ctx *u_ctx = ULD_CTX(ctx); struct ablk_ctx *ablkctx = ABLK_CTX(ctx); struct sk_buff *skb = NULL; struct chcr_wr *chcr_req; @@ -822,6 +816,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) struct adapter *adap = padap(ctx->dev); unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE, reqctx->dst_ofst); dst_size = get_space_for_phys_dsgl(nents); @@ -1559,7 +1554,8 @@ static inline void chcr_free_shash(struct crypto_shash *base_hash) /** * create_hash_wr - Create hash work request - * @req - Cipher req base + * @req: Cipher req base + * @param: Container for create_hash_wr()'s parameters */ static struct sk_buff *create_hash_wr(struct ahash_request *req, struct hash_wr_param *param) @@ -1580,6 +1576,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req, int error = 0; unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan; + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len); req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len + param->sg_len) <= SGE_MAX_WR_LEN; @@ -2438,6 +2435,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_context *ctx = a_ctx(tfm); + struct uld_ctx *u_ctx = ULD_CTX(ctx); struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); @@ -2457,6 +2455,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req, struct adapter *adap = padap(ctx->dev); unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); if (req->cryptlen == 0) return NULL; @@ -2710,9 +2709,11 @@ void chcr_add_aead_dst_ent(struct aead_request *req, struct dsgl_walk dsgl_walk; unsigned int authsize = crypto_aead_authsize(tfm); struct chcr_context *ctx = a_ctx(tfm); + struct uld_ctx *u_ctx = ULD_CTX(ctx); u32 temp; unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); dsgl_walk_init(&dsgl_walk, phys_cpl); dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma); temp = req->assoclen + req->cryptlen + @@ -2752,9 +2753,11 @@ void chcr_add_cipher_dst_ent(struct skcipher_request *req, struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req); struct chcr_context *ctx = c_ctx(tfm); + struct uld_ctx *u_ctx = ULD_CTX(ctx); struct dsgl_walk dsgl_walk; unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); dsgl_walk_init(&dsgl_walk, phys_cpl); dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes, reqctx->dst_ofst); @@ -2958,6 +2961,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_context *ctx = a_ctx(tfm); + struct uld_ctx *u_ctx = ULD_CTX(ctx); struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM; @@ -2967,6 +2971,8 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, unsigned int tag_offset = 0, auth_offset = 0; unsigned int assoclen; + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); + if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) assoclen = req->assoclen - 8; else @@ -3127,6 +3133,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_context *ctx = a_ctx(tfm); + struct uld_ctx *u_ctx = ULD_CTX(ctx); struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); struct sk_buff *skb = NULL; @@ -3143,6 +3150,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req, struct adapter *adap = padap(ctx->dev); unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; + rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) assoclen = req->assoclen - 8; diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c index f91f9d762a45..39c70e6255f9 100644 --- a/drivers/crypto/chelsio/chcr_core.c +++ b/drivers/crypto/chelsio/chcr_core.c @@ -1,4 +1,4 @@ -/** +/* * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. * * Copyright (C) 2011-2016 Chelsio Communications. All rights reserved. @@ -184,7 +184,7 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld) struct uld_ctx *u_ctx; /* Create the device and add it in the device list */ - pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION); + pr_info_once("%s\n", DRV_DESC); if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) return ERR_PTR(-EOPNOTSUPP); @@ -309,4 +309,3 @@ module_exit(chcr_crypto_exit); MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards."); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Chelsio Communications"); -MODULE_VERSION(DRV_VERSION); diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h index b02f981e7c32..f7c8bb95a71b 100644 --- a/drivers/crypto/chelsio/chcr_core.h +++ b/drivers/crypto/chelsio/chcr_core.h @@ -44,7 +44,6 @@ #include "cxgb4_uld.h" #define DRV_MODULE_NAME "chcr" -#define DRV_VERSION "1.0.0.0-ko" #define DRV_DESC "Chelsio T6 Crypto Co-processor Driver" #define MAX_PENDING_REQ_TO_HW 20 diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index 4ee010f39912..fa5a9f207bc9 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c @@ -21,7 +21,7 @@ /* Static structures */ static void __iomem *_iobase; -static spinlock_t lock; +static DEFINE_SPINLOCK(lock); /* Write a 128 bit field (either a writable key or IV) */ static inline void @@ -383,8 +383,6 @@ static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) goto erequest; } - spin_lock_init(&lock); - /* Clear any pending activity */ iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG); diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig index 843192666dc3..e572f9982d4e 100644 --- a/drivers/crypto/hisilicon/Kconfig +++ b/drivers/crypto/hisilicon/Kconfig @@ -68,6 +68,8 @@ config CRYPTO_DEV_HISI_HPRE select CRYPTO_DEV_HISI_QM select CRYPTO_DH select CRYPTO_RSA + select CRYPTO_CURVE25519 + select CRYPTO_ECDH help Support for HiSilicon HPRE(High Performance RSA Engine) accelerator, which can accelerate RSA and DH algorithms. diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h index 181c109b19f7..e0b4a1982ee9 100644 --- a/drivers/crypto/hisilicon/hpre/hpre.h +++ b/drivers/crypto/hisilicon/hpre/hpre.h @@ -10,6 +10,14 @@ #define HPRE_PF_DEF_Q_NUM 64 #define HPRE_PF_DEF_Q_BASE 0 +/* + * type used in qm sqc DW6. + * 0 - Algorithm which has been supported in V2, like RSA, DH and so on; + * 1 - ECC algorithm in V3. + */ +#define HPRE_V2_ALG_TYPE 0 +#define HPRE_V3_ECC_ALG_TYPE 1 + enum { HPRE_CLUSTER0, HPRE_CLUSTER1, @@ -18,7 +26,6 @@ enum { }; enum hpre_ctrl_dbgfs_file { - HPRE_CURRENT_QM, HPRE_CLEAR_ENABLE, HPRE_CLUSTER_CTRL, HPRE_DEBUG_FILE_NUM, @@ -75,6 +82,9 @@ enum hpre_alg_type { HPRE_ALG_KG_CRT = 0x3, HPRE_ALG_DH_G2 = 0x4, HPRE_ALG_DH = 0x5, + HPRE_ALG_ECC_MUL = 0xD, + /* shared by x25519 and x448, but x448 is not supported now */ + HPRE_ALG_CURVE25519_MUL = 0x10, }; struct hpre_sqe { @@ -92,8 +102,8 @@ struct hpre_sqe { __le32 rsvd1[_HPRE_SQE_ALIGN_EXT]; }; -struct hisi_qp *hpre_create_qp(void); -int hpre_algs_register(void); -void hpre_algs_unregister(void); +struct hisi_qp *hpre_create_qp(u8 type); +int hpre_algs_register(struct hisi_qm *qm); +void hpre_algs_unregister(struct hisi_qm *qm); #endif diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index a87f9904087a..a380087c83f7 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -1,7 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 HiSilicon Limited. */ #include +#include #include +#include +#include #include #include #include @@ -36,6 +39,13 @@ struct hpre_ctx; #define HPRE_DFX_SEC_TO_US 1000000 #define HPRE_DFX_US_TO_NS 1000 +/* size in bytes of the n prime */ +#define HPRE_ECC_NIST_P192_N_SIZE 24 +#define HPRE_ECC_NIST_P256_N_SIZE 32 + +/* size in bytes */ +#define HPRE_ECC_HW256_KSZ_B 32 + typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe); struct hpre_rsa_ctx { @@ -61,14 +71,35 @@ struct hpre_dh_ctx { * else if base if the counterpart public key we * compute the shared secret * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1] + * low address: d--->n, please refer to Hisilicon HPRE UM */ - char *xa_p; /* low address: d--->n, please refer to Hisilicon HPRE UM */ + char *xa_p; dma_addr_t dma_xa_p; char *g; /* m */ dma_addr_t dma_g; }; +struct hpre_ecdh_ctx { + /* low address: p->a->k->b */ + unsigned char *p; + dma_addr_t dma_p; + + /* low address: x->y */ + unsigned char *g; + dma_addr_t dma_g; +}; + +struct hpre_curve25519_ctx { + /* low address: p->a->k */ + unsigned char *p; + dma_addr_t dma_p; + + /* gx coordinate */ + unsigned char *g; + dma_addr_t dma_g; +}; + struct hpre_ctx { struct hisi_qp *qp; struct hpre_asym_request **req_list; @@ -80,7 +111,11 @@ struct hpre_ctx { union { struct hpre_rsa_ctx rsa; struct hpre_dh_ctx dh; + struct hpre_ecdh_ctx ecdh; + struct hpre_curve25519_ctx curve25519; }; + /* for ecc algorithms */ + unsigned int curve_id; }; struct hpre_asym_request { @@ -91,6 +126,8 @@ struct hpre_asym_request { union { struct akcipher_request *rsa; struct kpp_request *dh; + struct kpp_request *ecdh; + struct kpp_request *curve25519; } areq; int err; int req_id; @@ -152,12 +189,12 @@ static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req) } } -static struct hisi_qp *hpre_get_qp_and_start(void) +static struct hisi_qp *hpre_get_qp_and_start(u8 type) { struct hisi_qp *qp; int ret; - qp = hpre_create_qp(); + qp = hpre_create_qp(type); if (!qp) { pr_err("Can not create hpre qp!\n"); return ERR_PTR(-ENODEV); @@ -261,8 +298,6 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx, dma_addr_t tmp; tmp = le64_to_cpu(sqe->in); - if (unlikely(!tmp)) - return; if (src) { if (req->src) @@ -272,8 +307,6 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx, } tmp = le64_to_cpu(sqe->out); - if (unlikely(!tmp)) - return; if (req->dst) { if (dst) @@ -288,13 +321,16 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx, static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe, void **kreq) { + struct device *dev = HPRE_DEV(ctx); struct hpre_asym_request *req; - int err, id, done; + unsigned int err, done, alg; + int id; #define HPRE_NO_HW_ERR 0 #define HPRE_HW_TASK_DONE 3 #define HREE_HW_ERR_MASK 0x7ff #define HREE_SQE_DONE_MASK 0x3 +#define HREE_ALG_TYPE_MASK 0x1f id = (int)le16_to_cpu(sqe->tag); req = ctx->req_list[id]; hpre_rm_req_from_ctx(req); @@ -307,7 +343,11 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe, HREE_SQE_DONE_MASK; if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE)) - return 0; + return 0; + + alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK; + dev_err_ratelimited(dev, "alg[0x%x] error: done[0x%x], etype[0x%x]\n", + alg, done, err); return -EINVAL; } @@ -413,7 +453,6 @@ static void hpre_alg_cb(struct hisi_qp *qp, void *resp) struct hpre_sqe *sqe = resp; struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)]; - if (unlikely(!req)) { atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value); return; @@ -422,18 +461,29 @@ static void hpre_alg_cb(struct hisi_qp *qp, void *resp) req->cb(ctx, resp); } -static int hpre_ctx_init(struct hpre_ctx *ctx) +static void hpre_stop_qp_and_put(struct hisi_qp *qp) +{ + hisi_qm_stop_qp(qp); + hisi_qm_free_qps(&qp, 1); +} + +static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type) { struct hisi_qp *qp; + int ret; - qp = hpre_get_qp_and_start(); + qp = hpre_get_qp_and_start(type); if (IS_ERR(qp)) return PTR_ERR(qp); qp->qp_ctx = ctx; qp->req_cb = hpre_alg_cb; - return hpre_ctx_set(ctx, qp, QM_Q_DEPTH); + ret = hpre_ctx_set(ctx, qp, QM_Q_DEPTH); + if (ret) + hpre_stop_qp_and_put(qp); + + return ret; } static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa) @@ -510,7 +560,6 @@ static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg) return ret; } -#ifdef CONFIG_CRYPTO_DH static int hpre_dh_compute_value(struct kpp_request *req) { struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); @@ -674,7 +723,7 @@ static int hpre_dh_init_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - return hpre_ctx_init(ctx); + return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); } static void hpre_dh_exit_tfm(struct crypto_kpp *tfm) @@ -683,7 +732,6 @@ static void hpre_dh_exit_tfm(struct crypto_kpp *tfm) hpre_dh_clear_ctx(ctx, true); } -#endif static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len) { @@ -1100,7 +1148,7 @@ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm) return PTR_ERR(ctx->rsa.soft_tfm); } - ret = hpre_ctx_init(ctx); + ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); if (ret) crypto_free_akcipher(ctx->rsa.soft_tfm); @@ -1115,6 +1163,734 @@ static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm) crypto_free_akcipher(ctx->rsa.soft_tfm); } +static void hpre_key_to_big_end(u8 *data, int len) +{ + int i, j; + u8 tmp; + + for (i = 0; i < len / 2; i++) { + j = len - i - 1; + tmp = data[j]; + data[j] = data[i]; + data[i] = tmp; + } +} + +static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all, + bool is_ecdh) +{ + struct device *dev = HPRE_DEV(ctx); + unsigned int sz = ctx->key_sz; + unsigned int shift = sz << 1; + + if (is_clear_all) + hisi_qm_stop_qp(ctx->qp); + + if (is_ecdh && ctx->ecdh.p) { + /* ecdh: p->a->k->b */ + memzero_explicit(ctx->ecdh.p + shift, sz); + dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p); + ctx->ecdh.p = NULL; + } else if (!is_ecdh && ctx->curve25519.p) { + /* curve25519: p->a->k */ + memzero_explicit(ctx->curve25519.p + shift, sz); + dma_free_coherent(dev, sz << 2, ctx->curve25519.p, + ctx->curve25519.dma_p); + ctx->curve25519.p = NULL; + } + + hpre_ctx_clear(ctx, is_clear_all); +} + +static unsigned int hpre_ecdh_supported_curve(unsigned short id) +{ + switch (id) { + case ECC_CURVE_NIST_P192: + case ECC_CURVE_NIST_P256: + return HPRE_ECC_HW256_KSZ_B; + default: + break; + } + + return 0; +} + +static void fill_curve_param(void *addr, u64 *param, unsigned int cur_sz, u8 ndigits) +{ + unsigned int sz = cur_sz - (ndigits - 1) * sizeof(u64); + u8 i = 0; + + while (i < ndigits - 1) { + memcpy(addr + sizeof(u64) * i, ¶m[i], sizeof(u64)); + i++; + } + + memcpy(addr + sizeof(u64) * i, ¶m[ndigits - 1], sz); + hpre_key_to_big_end((u8 *)addr, cur_sz); +} + +static int hpre_ecdh_fill_curve(struct hpre_ctx *ctx, struct ecdh *params, + unsigned int cur_sz) +{ + unsigned int shifta = ctx->key_sz << 1; + unsigned int shiftb = ctx->key_sz << 2; + void *p = ctx->ecdh.p + ctx->key_sz - cur_sz; + void *a = ctx->ecdh.p + shifta - cur_sz; + void *b = ctx->ecdh.p + shiftb - cur_sz; + void *x = ctx->ecdh.g + ctx->key_sz - cur_sz; + void *y = ctx->ecdh.g + shifta - cur_sz; + const struct ecc_curve *curve = ecc_get_curve(ctx->curve_id); + char *n; + + if (unlikely(!curve)) + return -EINVAL; + + n = kzalloc(ctx->key_sz, GFP_KERNEL); + if (!n) + return -ENOMEM; + + fill_curve_param(p, curve->p, cur_sz, curve->g.ndigits); + fill_curve_param(a, curve->a, cur_sz, curve->g.ndigits); + fill_curve_param(b, curve->b, cur_sz, curve->g.ndigits); + fill_curve_param(x, curve->g.x, cur_sz, curve->g.ndigits); + fill_curve_param(y, curve->g.y, cur_sz, curve->g.ndigits); + fill_curve_param(n, curve->n, cur_sz, curve->g.ndigits); + + if (params->key_size == cur_sz && memcmp(params->key, n, cur_sz) >= 0) { + kfree(n); + return -EINVAL; + } + + kfree(n); + return 0; +} + +static unsigned int hpre_ecdh_get_curvesz(unsigned short id) +{ + switch (id) { + case ECC_CURVE_NIST_P192: + return HPRE_ECC_NIST_P192_N_SIZE; + case ECC_CURVE_NIST_P256: + return HPRE_ECC_NIST_P256_N_SIZE; + default: + break; + } + + return 0; +} + +static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params) +{ + struct device *dev = HPRE_DEV(ctx); + unsigned int sz, shift, curve_sz; + int ret; + + ctx->key_sz = hpre_ecdh_supported_curve(ctx->curve_id); + if (!ctx->key_sz) + return -EINVAL; + + curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id); + if (!curve_sz || params->key_size > curve_sz) + return -EINVAL; + + sz = ctx->key_sz; + + if (!ctx->ecdh.p) { + ctx->ecdh.p = dma_alloc_coherent(dev, sz << 3, &ctx->ecdh.dma_p, + GFP_KERNEL); + if (!ctx->ecdh.p) + return -ENOMEM; + } + + shift = sz << 2; + ctx->ecdh.g = ctx->ecdh.p + shift; + ctx->ecdh.dma_g = ctx->ecdh.dma_p + shift; + + ret = hpre_ecdh_fill_curve(ctx, params, curve_sz); + if (ret) { + dev_err(dev, "failed to fill curve_param, ret = %d!\n", ret); + dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p); + ctx->ecdh.p = NULL; + return ret; + } + + return 0; +} + +static bool hpre_key_is_zero(char *key, unsigned short key_sz) +{ + int i; + + for (i = 0; i < key_sz; i++) + if (key[i]) + return false; + + return true; +} + +static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, + unsigned int len) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + struct device *dev = HPRE_DEV(ctx); + unsigned int sz, sz_shift; + struct ecdh params; + int ret; + + if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0) { + dev_err(dev, "failed to decode ecdh key!\n"); + return -EINVAL; + } + + if (hpre_key_is_zero(params.key, params.key_size)) { + dev_err(dev, "Invalid hpre key!\n"); + return -EINVAL; + } + + hpre_ecc_clear_ctx(ctx, false, true); + + ret = hpre_ecdh_set_param(ctx, ¶ms); + if (ret < 0) { + dev_err(dev, "failed to set hpre param, ret = %d!\n", ret); + return ret; + } + + sz = ctx->key_sz; + sz_shift = (sz << 1) + sz - params.key_size; + memcpy(ctx->ecdh.p + sz_shift, params.key, params.key_size); + + return 0; +} + +static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx, + struct hpre_asym_request *req, + struct scatterlist *dst, + struct scatterlist *src) +{ + struct device *dev = HPRE_DEV(ctx); + struct hpre_sqe *sqe = &req->req; + dma_addr_t dma; + + dma = le64_to_cpu(sqe->in); + + if (src && req->src) + dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma); + + dma = le64_to_cpu(sqe->out); + + if (req->dst) + dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma); + if (dst) + dma_unmap_single(dev, dma, ctx->key_sz << 1, DMA_FROM_DEVICE); +} + +static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp) +{ + unsigned int curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id); + struct hpre_dfx *dfx = ctx->hpre->debug.dfx; + struct hpre_asym_request *req = NULL; + struct kpp_request *areq; + u64 overtime_thrhld; + char *p; + int ret; + + ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req); + areq = req->areq.ecdh; + areq->dst_len = ctx->key_sz << 1; + + overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value); + if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) + atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); + + p = sg_virt(areq->dst); + memmove(p, p + ctx->key_sz - curve_sz, curve_sz); + memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz); + + hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src); + kpp_request_complete(areq, ret); + + atomic64_inc(&dfx[HPRE_RECV_CNT].value); +} + +static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx, + struct kpp_request *req) +{ + struct hpre_asym_request *h_req; + struct hpre_sqe *msg; + int req_id; + void *tmp; + + if (req->dst_len < ctx->key_sz << 1) { + req->dst_len = ctx->key_sz << 1; + return -EINVAL; + } + + tmp = kpp_request_ctx(req); + h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + h_req->cb = hpre_ecdh_cb; + h_req->areq.ecdh = req; + msg = &h_req->req; + memset(msg, 0, sizeof(*msg)); + msg->key = cpu_to_le64(ctx->ecdh.dma_p); + + msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT); + msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1; + h_req->ctx = ctx; + + req_id = hpre_add_req_to_ctx(h_req); + if (req_id < 0) + return -EBUSY; + + msg->tag = cpu_to_le16((u16)req_id); + return 0; +} + +static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req, + struct scatterlist *data, unsigned int len) +{ + struct hpre_sqe *msg = &hpre_req->req; + struct hpre_ctx *ctx = hpre_req->ctx; + struct device *dev = HPRE_DEV(ctx); + unsigned int tmpshift; + dma_addr_t dma = 0; + void *ptr; + int shift; + + /* Src_data include gx and gy. */ + shift = ctx->key_sz - (len >> 1); + if (unlikely(shift < 0)) + return -EINVAL; + + ptr = dma_alloc_coherent(dev, ctx->key_sz << 2, &dma, GFP_KERNEL); + if (unlikely(!ptr)) + return -ENOMEM; + + tmpshift = ctx->key_sz << 1; + scatterwalk_map_and_copy(ptr + tmpshift, data, 0, len, 0); + memcpy(ptr + shift, ptr + tmpshift, len >> 1); + memcpy(ptr + ctx->key_sz + shift, ptr + tmpshift + (len >> 1), len >> 1); + + hpre_req->src = ptr; + msg->in = cpu_to_le64(dma); + return 0; +} + +static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req, + struct scatterlist *data, unsigned int len) +{ + struct hpre_sqe *msg = &hpre_req->req; + struct hpre_ctx *ctx = hpre_req->ctx; + struct device *dev = HPRE_DEV(ctx); + dma_addr_t dma = 0; + + if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) { + dev_err(dev, "data or data length is illegal!\n"); + return -EINVAL; + } + + hpre_req->dst = NULL; + dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, dma))) { + dev_err(dev, "dma map data err!\n"); + return -ENOMEM; + } + + msg->out = cpu_to_le64(dma); + return 0; +} + +static int hpre_ecdh_compute_value(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + struct device *dev = HPRE_DEV(ctx); + void *tmp = kpp_request_ctx(req); + struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + struct hpre_sqe *msg = &hpre_req->req; + int ret; + + ret = hpre_ecdh_msg_request_set(ctx, req); + if (unlikely(ret)) { + dev_err(dev, "failed to set ecdh request, ret = %d!\n", ret); + return ret; + } + + if (req->src) { + ret = hpre_ecdh_src_data_init(hpre_req, req->src, req->src_len); + if (unlikely(ret)) { + dev_err(dev, "failed to init src data, ret = %d!\n", ret); + goto clear_all; + } + } else { + msg->in = cpu_to_le64(ctx->ecdh.dma_g); + } + + ret = hpre_ecdh_dst_data_init(hpre_req, req->dst, req->dst_len); + if (unlikely(ret)) { + dev_err(dev, "failed to init dst data, ret = %d!\n", ret); + goto clear_all; + } + + msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL); + ret = hpre_send(ctx, msg); + if (likely(!ret)) + return -EINPROGRESS; + +clear_all: + hpre_rm_req_from_ctx(hpre_req); + hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); + return ret; +} + +static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + /* max size is the pub_key_size, include x and y */ + return ctx->key_sz << 1; +} + +static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + ctx->curve_id = ECC_CURVE_NIST_P192; + + return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); +} + +static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + ctx->curve_id = ECC_CURVE_NIST_P256; + + return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); +} + +static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + hpre_ecc_clear_ctx(ctx, true, true); +} + +static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf, + unsigned int len) +{ + u8 secret[CURVE25519_KEY_SIZE] = { 0 }; + unsigned int sz = ctx->key_sz; + const struct ecc_curve *curve; + unsigned int shift = sz << 1; + void *p; + + /* + * The key from 'buf' is in little-endian, we should preprocess it as + * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64", + * then convert it to big endian. Only in this way, the result can be + * the same as the software curve-25519 that exists in crypto. + */ + memcpy(secret, buf, len); + curve25519_clamp_secret(secret); + hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE); + + p = ctx->curve25519.p + sz - len; + + curve = ecc_get_curve25519(); + + /* fill curve parameters */ + fill_curve_param(p, curve->p, len, curve->g.ndigits); + fill_curve_param(p + sz, curve->a, len, curve->g.ndigits); + memcpy(p + shift, secret, len); + fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits); + memzero_explicit(secret, CURVE25519_KEY_SIZE); +} + +static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf, + unsigned int len) +{ + struct device *dev = HPRE_DEV(ctx); + unsigned int sz = ctx->key_sz; + unsigned int shift = sz << 1; + + /* p->a->k->gx */ + if (!ctx->curve25519.p) { + ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2, + &ctx->curve25519.dma_p, + GFP_KERNEL); + if (!ctx->curve25519.p) + return -ENOMEM; + } + + ctx->curve25519.g = ctx->curve25519.p + shift + sz; + ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz; + + hpre_curve25519_fill_curve(ctx, buf, len); + + return 0; +} + +static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf, + unsigned int len) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + struct device *dev = HPRE_DEV(ctx); + int ret = -EINVAL; + + if (len != CURVE25519_KEY_SIZE || + !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) { + dev_err(dev, "key is null or key len is not 32bytes!\n"); + return ret; + } + + /* Free old secret if any */ + hpre_ecc_clear_ctx(ctx, false, false); + + ctx->key_sz = CURVE25519_KEY_SIZE; + ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE); + if (ret) { + dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret); + hpre_ecc_clear_ctx(ctx, false, false); + return ret; + } + + return 0; +} + +static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx, + struct hpre_asym_request *req, + struct scatterlist *dst, + struct scatterlist *src) +{ + struct device *dev = HPRE_DEV(ctx); + struct hpre_sqe *sqe = &req->req; + dma_addr_t dma; + + dma = le64_to_cpu(sqe->in); + + if (src && req->src) + dma_free_coherent(dev, ctx->key_sz, req->src, dma); + + dma = le64_to_cpu(sqe->out); + + if (req->dst) + dma_free_coherent(dev, ctx->key_sz, req->dst, dma); + if (dst) + dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE); +} + +static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp) +{ + struct hpre_dfx *dfx = ctx->hpre->debug.dfx; + struct hpre_asym_request *req = NULL; + struct kpp_request *areq; + u64 overtime_thrhld; + int ret; + + ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req); + areq = req->areq.curve25519; + areq->dst_len = ctx->key_sz; + + overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value); + if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) + atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); + + hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE); + + hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src); + kpp_request_complete(areq, ret); + + atomic64_inc(&dfx[HPRE_RECV_CNT].value); +} + +static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx, + struct kpp_request *req) +{ + struct hpre_asym_request *h_req; + struct hpre_sqe *msg; + int req_id; + void *tmp; + + if (unlikely(req->dst_len < ctx->key_sz)) { + req->dst_len = ctx->key_sz; + return -EINVAL; + } + + tmp = kpp_request_ctx(req); + h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + h_req->cb = hpre_curve25519_cb; + h_req->areq.curve25519 = req; + msg = &h_req->req; + memset(msg, 0, sizeof(*msg)); + msg->key = cpu_to_le64(ctx->curve25519.dma_p); + + msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT); + msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1; + h_req->ctx = ctx; + + req_id = hpre_add_req_to_ctx(h_req); + if (req_id < 0) + return -EBUSY; + + msg->tag = cpu_to_le16((u16)req_id); + return 0; +} + +static void hpre_curve25519_src_modulo_p(u8 *ptr) +{ + int i; + + for (i = 0; i < CURVE25519_KEY_SIZE - 1; i++) + ptr[i] = 0; + + /* The modulus is ptr's last byte minus '0xed'(last byte of p) */ + ptr[i] -= 0xed; +} + +static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req, + struct scatterlist *data, unsigned int len) +{ + struct hpre_sqe *msg = &hpre_req->req; + struct hpre_ctx *ctx = hpre_req->ctx; + struct device *dev = HPRE_DEV(ctx); + u8 p[CURVE25519_KEY_SIZE] = { 0 }; + const struct ecc_curve *curve; + dma_addr_t dma = 0; + u8 *ptr; + + if (len != CURVE25519_KEY_SIZE) { + dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len); + return -EINVAL; + } + + ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL); + if (unlikely(!ptr)) + return -ENOMEM; + + scatterwalk_map_and_copy(ptr, data, 0, len, 0); + + if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) { + dev_err(dev, "gx is null!\n"); + goto err; + } + + /* + * Src_data(gx) is in little-endian order, MSB in the final byte should + * be masked as described in RFC7748, then transform it to big-endian + * form, then hisi_hpre can use the data. + */ + ptr[31] &= 0x7f; + hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE); + + curve = ecc_get_curve25519(); + + fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits); + + /* + * When src_data equals (2^255 - 19) ~ (2^255 - 1), it is out of p, + * we get its modulus to p, and then use it. + */ + if (memcmp(ptr, p, ctx->key_sz) >= 0) + hpre_curve25519_src_modulo_p(ptr); + + hpre_req->src = ptr; + msg->in = cpu_to_le64(dma); + return 0; + +err: + dma_free_coherent(dev, ctx->key_sz, ptr, dma); + return -EINVAL; +} + +static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req, + struct scatterlist *data, unsigned int len) +{ + struct hpre_sqe *msg = &hpre_req->req; + struct hpre_ctx *ctx = hpre_req->ctx; + struct device *dev = HPRE_DEV(ctx); + dma_addr_t dma = 0; + + if (!data || !sg_is_last(data) || len != ctx->key_sz) { + dev_err(dev, "data or data length is illegal!\n"); + return -EINVAL; + } + + hpre_req->dst = NULL; + dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, dma))) { + dev_err(dev, "dma map data err!\n"); + return -ENOMEM; + } + + msg->out = cpu_to_le64(dma); + return 0; +} + +static int hpre_curve25519_compute_value(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + struct device *dev = HPRE_DEV(ctx); + void *tmp = kpp_request_ctx(req); + struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + struct hpre_sqe *msg = &hpre_req->req; + int ret; + + ret = hpre_curve25519_msg_request_set(ctx, req); + if (unlikely(ret)) { + dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret); + return ret; + } + + if (req->src) { + ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len); + if (unlikely(ret)) { + dev_err(dev, "failed to init src data, ret = %d!\n", + ret); + goto clear_all; + } + } else { + msg->in = cpu_to_le64(ctx->curve25519.dma_g); + } + + ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len); + if (unlikely(ret)) { + dev_err(dev, "failed to init dst data, ret = %d!\n", ret); + goto clear_all; + } + + msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL); + ret = hpre_send(ctx, msg); + if (likely(!ret)) + return -EINPROGRESS; + +clear_all: + hpre_rm_req_from_ctx(hpre_req); + hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); + return ret; +} + +static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + return ctx->key_sz; +} + +static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); +} + +static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + hpre_ecc_clear_ctx(ctx, true, false); +} + static struct akcipher_alg rsa = { .sign = hpre_rsa_dec, .verify = hpre_rsa_enc, @@ -1135,7 +1911,6 @@ static struct akcipher_alg rsa = { }, }; -#ifdef CONFIG_CRYPTO_DH static struct kpp_alg dh = { .set_secret = hpre_dh_set_secret, .generate_public_key = hpre_dh_compute_value, @@ -1152,9 +1927,83 @@ static struct kpp_alg dh = { .cra_module = THIS_MODULE, }, }; -#endif -int hpre_algs_register(void) +static struct kpp_alg ecdh_nist_p192 = { + .set_secret = hpre_ecdh_set_secret, + .generate_public_key = hpre_ecdh_compute_value, + .compute_shared_secret = hpre_ecdh_compute_value, + .max_size = hpre_ecdh_max_size, + .init = hpre_ecdh_nist_p192_init_tfm, + .exit = hpre_ecdh_exit_tfm, + .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, + .base = { + .cra_ctxsize = sizeof(struct hpre_ctx), + .cra_priority = HPRE_CRYPTO_ALG_PRI, + .cra_name = "ecdh-nist-p192", + .cra_driver_name = "hpre-ecdh", + .cra_module = THIS_MODULE, + }, +}; + +static struct kpp_alg ecdh_nist_p256 = { + .set_secret = hpre_ecdh_set_secret, + .generate_public_key = hpre_ecdh_compute_value, + .compute_shared_secret = hpre_ecdh_compute_value, + .max_size = hpre_ecdh_max_size, + .init = hpre_ecdh_nist_p256_init_tfm, + .exit = hpre_ecdh_exit_tfm, + .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, + .base = { + .cra_ctxsize = sizeof(struct hpre_ctx), + .cra_priority = HPRE_CRYPTO_ALG_PRI, + .cra_name = "ecdh-nist-p256", + .cra_driver_name = "hpre-ecdh", + .cra_module = THIS_MODULE, + }, +}; + +static struct kpp_alg curve25519_alg = { + .set_secret = hpre_curve25519_set_secret, + .generate_public_key = hpre_curve25519_compute_value, + .compute_shared_secret = hpre_curve25519_compute_value, + .max_size = hpre_curve25519_max_size, + .init = hpre_curve25519_init_tfm, + .exit = hpre_curve25519_exit_tfm, + .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, + .base = { + .cra_ctxsize = sizeof(struct hpre_ctx), + .cra_priority = HPRE_CRYPTO_ALG_PRI, + .cra_name = "curve25519", + .cra_driver_name = "hpre-curve25519", + .cra_module = THIS_MODULE, + }, +}; + + +static int hpre_register_ecdh(void) +{ + int ret; + + ret = crypto_register_kpp(&ecdh_nist_p192); + if (ret) + return ret; + + ret = crypto_register_kpp(&ecdh_nist_p256); + if (ret) { + crypto_unregister_kpp(&ecdh_nist_p192); + return ret; + } + + return 0; +} + +static void hpre_unregister_ecdh(void) +{ + crypto_unregister_kpp(&ecdh_nist_p256); + crypto_unregister_kpp(&ecdh_nist_p192); +} + +int hpre_algs_register(struct hisi_qm *qm) { int ret; @@ -1162,19 +2011,37 @@ int hpre_algs_register(void) ret = crypto_register_akcipher(&rsa); if (ret) return ret; -#ifdef CONFIG_CRYPTO_DH + ret = crypto_register_kpp(&dh); if (ret) - crypto_unregister_akcipher(&rsa); -#endif + goto unreg_rsa; + if (qm->ver >= QM_HW_V3) { + ret = hpre_register_ecdh(); + if (ret) + goto unreg_dh; + ret = crypto_register_kpp(&curve25519_alg); + if (ret) + goto unreg_ecdh; + } + return 0; + +unreg_ecdh: + hpre_unregister_ecdh(); +unreg_dh: + crypto_unregister_kpp(&dh); +unreg_rsa: + crypto_unregister_akcipher(&rsa); return ret; } -void hpre_algs_unregister(void) +void hpre_algs_unregister(struct hisi_qm *qm) { - crypto_unregister_akcipher(&rsa); -#ifdef CONFIG_CRYPTO_DH + if (qm->ver >= QM_HW_V3) { + crypto_unregister_kpp(&curve25519_alg); + hpre_unregister_ecdh(); + } + crypto_unregister_kpp(&dh); -#endif + crypto_unregister_akcipher(&rsa); } diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index e7a2c70eb9cf..046bc962c8b2 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -13,7 +13,6 @@ #include #include "hpre.h" -#define HPRE_QUEUE_NUM_V2 1024 #define HPRE_QM_ABNML_INT_MASK 0x100004 #define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0) #define HPRE_COMM_CNT_CLR_CE 0x0 @@ -119,7 +118,6 @@ static struct hisi_qm_list hpre_devices = { }; static const char * const hpre_debug_file_name[] = { - [HPRE_CURRENT_QM] = "current_qm", [HPRE_CLEAR_ENABLE] = "rdclr_en", [HPRE_CLUSTER_CTRL] = "cluster_ctrl", }; @@ -226,41 +224,44 @@ static u32 vfs_num; module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); -struct hisi_qp *hpre_create_qp(void) +struct hisi_qp *hpre_create_qp(u8 type) { int node = cpu_to_node(smp_processor_id()); struct hisi_qp *qp = NULL; int ret; - ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, 0, node, &qp); + if (type != HPRE_V2_ALG_TYPE && type != HPRE_V3_ECC_ALG_TYPE) + return NULL; + + /* + * type: 0 - RSA/DH. algorithm supported in V2, + * 1 - ECC algorithm in V3. + */ + ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, type, node, &qp); if (!ret) return qp; return NULL; } -static void hpre_pasid_enable(struct hisi_qm *qm) +static void hpre_config_pasid(struct hisi_qm *qm) { - u32 val; + u32 val1, val2; - val = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG); - val |= BIT(HPRE_PASID_EN_BIT); - writel_relaxed(val, qm->io_base + HPRE_DATA_RUSER_CFG); - val = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG); - val |= BIT(HPRE_PASID_EN_BIT); - writel_relaxed(val, qm->io_base + HPRE_DATA_WUSER_CFG); -} + if (qm->ver >= QM_HW_V3) + return; -static void hpre_pasid_disable(struct hisi_qm *qm) -{ - u32 val; - - val = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG); - val &= ~BIT(HPRE_PASID_EN_BIT); - writel_relaxed(val, qm->io_base + HPRE_DATA_RUSER_CFG); - val = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG); - val &= ~BIT(HPRE_PASID_EN_BIT); - writel_relaxed(val, qm->io_base + HPRE_DATA_WUSER_CFG); + val1 = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG); + val2 = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG); + if (qm->use_sva) { + val1 |= BIT(HPRE_PASID_EN_BIT); + val2 |= BIT(HPRE_PASID_EN_BIT); + } else { + val1 &= ~BIT(HPRE_PASID_EN_BIT); + val2 &= ~BIT(HPRE_PASID_EN_BIT); + } + writel_relaxed(val1, qm->io_base + HPRE_DATA_RUSER_CFG); + writel_relaxed(val2, qm->io_base + HPRE_DATA_WUSER_CFG); } static int hpre_cfg_by_dsm(struct hisi_qm *qm) @@ -320,7 +321,7 @@ static int hpre_set_cluster(struct hisi_qm *qm) } /* - * For Kunpeng 920, we shoul disable FLR triggered by hardware (BME/PM/SRIOV). + * For Kunpeng 920, we should disable FLR triggered by hardware (BME/PM/SRIOV). * Or it may stay in D3 state when we bind and unbind hpre quickly, * as it does FLR triggered by hardware. */ @@ -383,15 +384,14 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) if (qm->ver == QM_HW_V2) { ret = hpre_cfg_by_dsm(qm); if (ret) - dev_err(dev, "acpi_evaluate_dsm err.\n"); + return ret; disable_flr_of_bme(qm); - - /* Enable data buffer pasid */ - if (qm->use_sva) - hpre_pasid_enable(qm); } + /* Config data buffer pasid needed by Kunpeng 920 */ + hpre_config_pasid(qm); + return ret; } @@ -401,10 +401,6 @@ static void hpre_cnt_regs_clear(struct hisi_qm *qm) unsigned long offset; int i; - /* clear current_qm */ - writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); - writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); - /* clear clusterX/cluster_ctrl */ for (i = 0; i < clusters_num; i++) { offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL; @@ -456,49 +452,6 @@ static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file) return &hpre->qm; } -static u32 hpre_current_qm_read(struct hpre_debugfs_file *file) -{ - struct hisi_qm *qm = hpre_file_to_qm(file); - - return readl(qm->io_base + QM_DFX_MB_CNT_VF); -} - -static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val) -{ - struct hisi_qm *qm = hpre_file_to_qm(file); - u32 num_vfs = qm->vfs_num; - u32 vfq_num, tmp; - - if (val > num_vfs) - return -EINVAL; - - /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */ - if (val == 0) { - qm->debug.curr_qm_qp_num = qm->qp_num; - } else { - vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs; - if (val == num_vfs) { - qm->debug.curr_qm_qp_num = - qm->ctrl_qp_num - qm->qp_num - (num_vfs - 1) * vfq_num; - } else { - qm->debug.curr_qm_qp_num = vfq_num; - } - } - - writel(val, qm->io_base + QM_DFX_MB_CNT_VF); - writel(val, qm->io_base + QM_DFX_DB_CNT_VF); - - tmp = val | - (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); - writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); - - tmp = val | - (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); - writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); - - return 0; -} - static u32 hpre_clear_enable_read(struct hpre_debugfs_file *file) { struct hisi_qm *qm = hpre_file_to_qm(file); @@ -519,7 +472,7 @@ static int hpre_clear_enable_write(struct hpre_debugfs_file *file, u32 val) ~HPRE_CTRL_CNT_CLR_CE_BIT) | val; writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE); - return 0; + return 0; } static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file) @@ -541,7 +494,7 @@ static int hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val) writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY); - return 0; + return 0; } static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf, @@ -554,9 +507,6 @@ static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf, spin_lock_irq(&file->lock); switch (file->type) { - case HPRE_CURRENT_QM: - val = hpre_current_qm_read(file); - break; case HPRE_CLEAR_ENABLE: val = hpre_clear_enable_read(file); break; @@ -597,11 +547,6 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf, spin_lock_irq(&file->lock); switch (file->type) { - case HPRE_CURRENT_QM: - ret = hpre_current_qm_write(file, val); - if (ret) - goto err_input; - break; case HPRE_CLEAR_ENABLE: ret = hpre_clear_enable_write(file, val); if (ret) @@ -740,11 +685,6 @@ static int hpre_ctrl_debug_init(struct hisi_qm *qm) { int ret; - ret = hpre_create_debugfs_file(qm, NULL, HPRE_CURRENT_QM, - HPRE_CURRENT_QM); - if (ret) - return ret; - ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE, HPRE_CLEAR_ENABLE); if (ret) @@ -812,9 +752,9 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) } if (pdev->revision >= QM_HW_V3) - qm->algs = "rsa\ndh\necdh\nx25519\nx448\necdsa\nsm2\n"; + qm->algs = "rsa\ndh\necdh\nx25519\nx448\necdsa\nsm2"; else - qm->algs = "rsa\ndh\n"; + qm->algs = "rsa\ndh"; qm->mode = uacce_mode; qm->pdev = pdev; qm->ver = pdev->revision; @@ -867,6 +807,20 @@ static void hpre_open_axi_master_ooo(struct hisi_qm *qm) HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB)); } +static void hpre_err_info_init(struct hisi_qm *qm) +{ + struct hisi_qm_err_info *err_info = &qm->err_info; + + err_info->ce = QM_BASE_CE; + err_info->fe = 0; + err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | + HPRE_OOO_ECC_2BIT_ERR; + err_info->dev_ce_mask = HPRE_HAC_RAS_CE_ENABLE; + err_info->msi_wr_port = HPRE_WR_MSI_PORT; + err_info->acpi_rst = "HRST"; + err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT; +} + static const struct hisi_qm_err_ini hpre_err_ini = { .hw_init = hpre_set_user_domain_and_cache, .hw_err_enable = hpre_hw_error_enable, @@ -875,16 +829,7 @@ static const struct hisi_qm_err_ini hpre_err_ini = { .clear_dev_hw_err_status = hpre_clear_hw_err_status, .log_dev_hw_err = hpre_log_hw_error, .open_axi_master_ooo = hpre_open_axi_master_ooo, - .err_info = { - .ce = QM_BASE_CE, - .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT, - .fe = 0, - .ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | - HPRE_OOO_ECC_2BIT_ERR, - .dev_ce_mask = HPRE_HAC_RAS_CE_ENABLE, - .msi_wr_port = HPRE_WR_MSI_PORT, - .acpi_rst = "HRST", - } + .err_info_init = hpre_err_info_init, }; static int hpre_pf_probe_init(struct hpre *hpre) @@ -892,13 +837,12 @@ static int hpre_pf_probe_init(struct hpre *hpre) struct hisi_qm *qm = &hpre->qm; int ret; - qm->ctrl_qp_num = HPRE_QUEUE_NUM_V2; - ret = hpre_set_user_domain_and_cache(qm); if (ret) return ret; qm->err_ini = &hpre_err_ini; + qm->err_ini->err_info_init(qm); hisi_qm_dev_err_init(qm); return 0; @@ -1006,8 +950,6 @@ static void hpre_remove(struct pci_dev *pdev) hisi_qm_stop(qm, QM_NORMAL); if (qm->fun_type == QM_HW_PF) { - if (qm->use_sva && qm->ver == QM_HW_V2) - hpre_pasid_disable(qm); hpre_cnt_regs_clear(qm); qm->debug.curr_qm_qp_num = 0; hisi_qm_dev_err_uninit(qm); @@ -1016,7 +958,6 @@ static void hpre_remove(struct pci_dev *pdev) hisi_qm_uninit(qm); } - static const struct pci_error_handlers hpre_err_handler = { .error_detected = hisi_qm_dev_err_detected, .slot_reset = hisi_qm_dev_slot_reset, @@ -1075,4 +1016,5 @@ module_exit(hpre_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Zaibo Xu "); +MODULE_AUTHOR("Meng Yu "); MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator"); diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 13cb4216561a..ce439a0c66c9 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -38,6 +38,7 @@ #define QM_MB_CMD_SQC_BT 0x4 #define QM_MB_CMD_CQC_BT 0x5 #define QM_MB_CMD_SQC_VFT_V2 0x6 +#define QM_MB_CMD_STOP_QP 0x8 #define QM_MB_CMD_SEND_BASE 0x300 #define QM_MB_EVENT_SHIFT 8 @@ -93,6 +94,12 @@ #define QM_DB_PRIORITY_SHIFT_V1 48 #define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000 #define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000 +#define QM_QUE_ISO_CFG_V 0x0030 +#define QM_QUE_ISO_EN 0x100154 +#define QM_CAPBILITY 0x100158 +#define QM_QP_NUN_MASK GENMASK(10, 0) +#define QM_QP_DB_INTERVAL 0x10000 +#define QM_QP_MAX_NUM_SHIFT 11 #define QM_DB_CMD_SHIFT_V2 12 #define QM_DB_RAND_SHIFT_V2 16 #define QM_DB_INDEX_SHIFT_V2 32 @@ -129,9 +136,9 @@ #define QM_DFX_CNT_CLR_CE 0x100118 #define QM_ABNORMAL_INT_SOURCE 0x100000 -#define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(12, 0) +#define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(14, 0) #define QM_ABNORMAL_INT_MASK 0x100004 -#define QM_ABNORMAL_INT_MASK_VALUE 0x1fff +#define QM_ABNORMAL_INT_MASK_VALUE 0x7fff #define QM_ABNORMAL_INT_STATUS 0x100008 #define QM_ABNORMAL_INT_SET 0x10000c #define QM_ABNORMAL_INF00 0x100010 @@ -164,6 +171,14 @@ #define ACC_AM_ROB_ECC_INT_STS 0x300104 #define ACC_ROB_ECC_ERR_MULTPL BIT(1) +#define QM_DFX_MB_CNT_VF 0x104010 +#define QM_DFX_DB_CNT_VF 0x104020 +#define QM_DFX_SQE_CNT_VF_SQN 0x104030 +#define QM_DFX_CQE_CNT_VF_CQN 0x104040 +#define QM_DFX_QN_SHIFT 16 +#define CURRENT_FUN_MASK GENMASK(5, 0) +#define CURRENT_Q_MASK GENMASK(31, 16) + #define POLL_PERIOD 10 #define POLL_TIMEOUT 1000 #define WAIT_PERIOD_US_MAX 200 @@ -173,6 +188,7 @@ #define QM_CACHE_WB_DONE 0x208 #define PCI_BAR_2 2 +#define PCI_BAR_4 4 #define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0) #define QMC_ALIGN(sz) ALIGN(sz, 32) @@ -334,6 +350,7 @@ struct hisi_qm_hw_ops { void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe); void (*hw_error_uninit)(struct hisi_qm *qm); enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm); + int (*stop_qp)(struct hisi_qp *qp); }; struct qm_dfx_item { @@ -350,6 +367,7 @@ static struct qm_dfx_item qm_dfx_files[] = { }; static const char * const qm_debug_file_name[] = { + [CURRENT_QM] = "current_qm", [CURRENT_Q] = "current_q", [CLEAR_ENABLE] = "clear_enable", }; @@ -373,6 +391,8 @@ static const struct hisi_qm_hw_error qm_hw_error[] = { { .int_msk = BIT(10), .msg = "qm_db_timeout" }, { .int_msk = BIT(11), .msg = "qm_of_fifo_of" }, { .int_msk = BIT(12), .msg = "qm_db_random_invalid" }, + { .int_msk = BIT(13), .msg = "qm_mailbox_timeout" }, + { .int_msk = BIT(14), .msg = "qm_flr_timeout" }, { /* sentinel */ } }; @@ -557,21 +577,22 @@ static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) { - u64 doorbell; - u64 dbase; + void __iomem *io_base = qm->io_base; u16 randata = 0; + u64 doorbell; if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ) - dbase = QM_DOORBELL_SQ_CQ_BASE_V2; + io_base = qm->db_io_base + (u64)qn * qm->db_interval + + QM_DOORBELL_SQ_CQ_BASE_V2; else - dbase = QM_DOORBELL_EQ_AEQ_BASE_V2; + io_base += QM_DOORBELL_EQ_AEQ_BASE_V2; doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) | ((u64)randata << QM_DB_RAND_SHIFT_V2) | ((u64)index << QM_DB_INDEX_SHIFT_V2) | ((u64)priority << QM_DB_PRIORITY_SHIFT_V2); - writeq(doorbell, qm->io_base + dbase); + writeq(doorbell, io_base); } static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) @@ -865,6 +886,26 @@ static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) return 0; } +static int qm_get_vf_qp_num(struct hisi_qm *qm, u32 fun_num) +{ + u32 remain_q_num, vfq_num; + u32 num_vfs = qm->vfs_num; + + vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs; + if (vfq_num >= qm->max_qp_num) + return qm->max_qp_num; + + remain_q_num = (qm->ctrl_qp_num - qm->qp_num) % num_vfs; + if (vfq_num + remain_q_num <= qm->max_qp_num) + return fun_num == num_vfs ? vfq_num + remain_q_num : vfq_num; + + /* + * if vfq_num + remain_q_num > max_qp_num, the last VFs, + * each with one more queue. + */ + return fun_num + remain_q_num > num_vfs ? vfq_num + 1 : vfq_num; +} + static struct hisi_qm *file_to_qm(struct debugfs_file *file) { struct qm_debug *debug = file->debug; @@ -918,6 +959,41 @@ static int clear_enable_write(struct debugfs_file *file, u32 rd_clr_ctrl) return 0; } +static u32 current_qm_read(struct debugfs_file *file) +{ + struct hisi_qm *qm = file_to_qm(file); + + return readl(qm->io_base + QM_DFX_MB_CNT_VF); +} + +static int current_qm_write(struct debugfs_file *file, u32 val) +{ + struct hisi_qm *qm = file_to_qm(file); + u32 tmp; + + if (val > qm->vfs_num) + return -EINVAL; + + /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */ + if (!val) + qm->debug.curr_qm_qp_num = qm->qp_num; + else + qm->debug.curr_qm_qp_num = qm_get_vf_qp_num(qm, val); + + writel(val, qm->io_base + QM_DFX_MB_CNT_VF); + writel(val, qm->io_base + QM_DFX_DB_CNT_VF); + + tmp = val | + (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); + writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); + + tmp = val | + (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); + writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); + + return 0; +} + static ssize_t qm_debug_read(struct file *filp, char __user *buf, size_t count, loff_t *pos) { @@ -929,6 +1005,9 @@ static ssize_t qm_debug_read(struct file *filp, char __user *buf, mutex_lock(&file->lock); switch (index) { + case CURRENT_QM: + val = current_qm_read(file); + break; case CURRENT_Q: val = current_q_read(file); break; @@ -971,27 +1050,24 @@ static ssize_t qm_debug_write(struct file *filp, const char __user *buf, mutex_lock(&file->lock); switch (index) { + case CURRENT_QM: + ret = current_qm_write(file, val); + break; case CURRENT_Q: ret = current_q_write(file, val); - if (ret) - goto err_input; break; case CLEAR_ENABLE: ret = clear_enable_write(file, val); - if (ret) - goto err_input; break; default: ret = -EINVAL; - goto err_input; } mutex_unlock(&file->lock); - return count; + if (ret) + return ret; -err_input: - mutex_unlock(&file->lock); - return ret; + return count; } static const struct file_operations qm_debug_fops = { @@ -1529,12 +1605,12 @@ static const struct file_operations qm_cmd_fops = { .write = qm_cmd_write, }; -static void qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index) +static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir, + enum qm_debug_file index) { - struct dentry *qm_d = qm->debug.qm_d; struct debugfs_file *file = qm->debug.files + index; - debugfs_create_file(qm_debug_file_name[index], 0600, qm_d, file, + debugfs_create_file(qm_debug_file_name[index], 0600, dir, file, &qm_debug_fops); file->index = index; @@ -1628,7 +1704,7 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) if (val == (QM_DB_RANDOM_INVALID | QM_BASE_CE)) { writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); - writel(qm->err_ini->err_info.nfe, + writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE); return ACC_ERR_RECOVERED; } @@ -1639,6 +1715,11 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) return ACC_ERR_RECOVERED; } +static int qm_stop_qp(struct hisi_qp *qp) +{ + return qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); +} + static const struct hisi_qm_hw_ops qm_hw_ops_v1 = { .qm_db = qm_db_v1, .get_irq_num = qm_get_irq_num_v1, @@ -1654,6 +1735,16 @@ static const struct hisi_qm_hw_ops qm_hw_ops_v2 = { .hw_error_handle = qm_hw_error_handle_v2, }; +static const struct hisi_qm_hw_ops qm_hw_ops_v3 = { + .get_vft = qm_get_vft_v2, + .qm_db = qm_db_v2, + .get_irq_num = qm_get_irq_num_v2, + .hw_error_init = qm_hw_error_init_v2, + .hw_error_uninit = qm_hw_error_uninit_v2, + .hw_error_handle = qm_hw_error_handle_v2, + .stop_qp = qm_stop_qp, +}; + static void *qm_get_avail_sqe(struct hisi_qp *qp) { struct hisi_qp_status *qp_status = &qp->qp_status; @@ -1933,6 +2024,14 @@ static int qm_drain_qp(struct hisi_qp *qp) if (qm->err_status.is_qm_ecc_mbit || qm->err_status.is_dev_ecc_mbit) return 0; + /* Kunpeng930 supports drain qp by device */ + if (qm->ops->stop_qp) { + ret = qm->ops->stop_qp(qp); + if (ret) + dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id); + return ret; + } + addr = qm_ctx_alloc(qm, size, &dma_addr); if (IS_ERR(addr)) { dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n"); @@ -2132,6 +2231,8 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q, { struct hisi_qp *qp = q->priv; struct hisi_qm *qm = qp->qm; + resource_size_t phys_base = qm->db_phys_base + + qp->qp_id * qm->db_interval; size_t sz = vma->vm_end - vma->vm_start; struct pci_dev *pdev = qm->pdev; struct device *dev = &pdev->dev; @@ -2143,16 +2244,19 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q, if (qm->ver == QM_HW_V1) { if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR) return -EINVAL; - } else { + } else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) { if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR + QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE)) return -EINVAL; + } else { + if (sz > qm->db_interval) + return -EINVAL; } vma->vm_flags |= VM_IO; return remap_pfn_range(vma, vma->vm_start, - qm->phys_base >> PAGE_SHIFT, + phys_base >> PAGE_SHIFT, sz, pgprot_noncached(vma->vm_page_prot)); case UACCE_QFRT_DUS: if (sz != qp->qdma.size) @@ -2267,14 +2371,20 @@ static int qm_alloc_uacce(struct hisi_qm *qm) uacce->priv = qm; uacce->algs = qm->algs; - if (qm->ver == QM_HW_V1) { - mmio_page_nr = QM_DOORBELL_PAGE_NR; + if (qm->ver == QM_HW_V1) uacce->api_ver = HISI_QM_API_VER_BASE; - } else { + else if (qm->ver == QM_HW_V2) + uacce->api_ver = HISI_QM_API_VER2_BASE; + else + uacce->api_ver = HISI_QM_API_VER3_BASE; + + if (qm->ver == QM_HW_V1) + mmio_page_nr = QM_DOORBELL_PAGE_NR; + else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) mmio_page_nr = QM_DOORBELL_PAGE_NR + QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE; - uacce->api_ver = HISI_QM_API_VER2_BASE; - } + else + mmio_page_nr = qm->db_interval / PAGE_SIZE; dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH + sizeof(struct qm_cqe) * QM_Q_DEPTH) >> PAGE_SHIFT; @@ -2482,8 +2592,10 @@ static void hisi_qm_pre_init(struct hisi_qm *qm) if (qm->ver == QM_HW_V1) qm->ops = &qm_hw_ops_v1; - else + else if (qm->ver == QM_HW_V2) qm->ops = &qm_hw_ops_v2; + else + qm->ops = &qm_hw_ops_v3; pci_set_drvdata(pdev, qm); mutex_init(&qm->mailbox_lock); @@ -2492,13 +2604,23 @@ static void hisi_qm_pre_init(struct hisi_qm *qm) qm->misc_ctl = false; } +static void qm_put_pci_res(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + + if (qm->use_db_isolation) + iounmap(qm->db_io_base); + + iounmap(qm->io_base); + pci_release_mem_regions(pdev); +} + static void hisi_qm_pci_uninit(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; pci_free_irq_vectors(pdev); - iounmap(qm->io_base); - pci_release_mem_regions(pdev); + qm_put_pci_res(qm); pci_disable_device(pdev); } @@ -2527,7 +2649,6 @@ void hisi_qm_uninit(struct hisi_qm *qm) hisi_qm_cache_wb(qm); dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); - memset(&qm->qdma, 0, sizeof(qm->qdma)); } qm_irq_unregister(qm); @@ -2681,7 +2802,7 @@ static int __hisi_qm_start(struct hisi_qm *qm) { int ret; - WARN_ON(!qm->qdma.dma); + WARN_ON(!qm->qdma.va); if (qm->fun_type == QM_HW_PF) { ret = qm_dev_mem_reset(qm); @@ -2930,9 +3051,11 @@ void hisi_qm_debug_init(struct hisi_qm *qm) qm->debug.qm_d = qm_d; /* only show this in PF */ - if (qm->fun_type == QM_HW_PF) + if (qm->fun_type == QM_HW_PF) { + qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM); for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++) - qm_create_debugfs_file(qm, i); + qm_create_debugfs_file(qm, qm_d, i); + } debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops); @@ -2960,6 +3083,10 @@ void hisi_qm_debug_regs_clear(struct hisi_qm *qm) struct qm_dfx_registers *regs; int i; + /* clear current_qm */ + writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); + writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); + /* clear current_q */ writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); @@ -2982,7 +3109,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear); static void qm_hw_error_init(struct hisi_qm *qm) { - const struct hisi_qm_err_info *err_info = &qm->err_ini->err_info; + struct hisi_qm_err_info *err_info = &qm->err_info; if (!qm->ops->hw_error_init) { dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n"); @@ -3175,30 +3302,46 @@ EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node); static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs) { - u32 remain_q_num, q_num, i, j; + u32 remain_q_num, vfs_q_num, act_q_num, q_num, i, j; + u32 max_qp_num = qm->max_qp_num; u32 q_base = qm->qp_num; int ret; if (!num_vfs) return -EINVAL; - remain_q_num = qm->ctrl_qp_num - qm->qp_num; + vfs_q_num = qm->ctrl_qp_num - qm->qp_num; - /* If remain queues not enough, return error. */ - if (qm->ctrl_qp_num < qm->qp_num || remain_q_num < num_vfs) + /* If vfs_q_num is less than num_vfs, return error. */ + if (vfs_q_num < num_vfs) return -EINVAL; - q_num = remain_q_num / num_vfs; - for (i = 1; i <= num_vfs; i++) { - if (i == num_vfs) - q_num += remain_q_num % num_vfs; - ret = hisi_qm_set_vft(qm, i, q_base, q_num); + q_num = vfs_q_num / num_vfs; + remain_q_num = vfs_q_num % num_vfs; + + for (i = num_vfs; i > 0; i--) { + /* + * if q_num + remain_q_num > max_qp_num in last vf, divide the + * remaining queues equally. + */ + if (i == num_vfs && q_num + remain_q_num <= max_qp_num) { + act_q_num = q_num + remain_q_num; + remain_q_num = 0; + } else if (remain_q_num > 0) { + act_q_num = q_num + 1; + remain_q_num--; + } else { + act_q_num = q_num; + } + + act_q_num = min_t(int, act_q_num, max_qp_num); + ret = hisi_qm_set_vft(qm, i, q_base, act_q_num); if (ret) { - for (j = i; j > 0; j--) + for (j = num_vfs; j > i; j--) hisi_qm_set_vft(qm, j, 0, 0); return ret; } - q_base += q_num; + q_base += act_q_num; } return 0; @@ -3318,15 +3461,15 @@ static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm) /* get device hardware error status */ err_sts = qm->err_ini->get_dev_hw_err_status(qm); if (err_sts) { - if (err_sts & qm->err_ini->err_info.ecc_2bits_mask) + if (err_sts & qm->err_info.ecc_2bits_mask) qm->err_status.is_dev_ecc_mbit = true; if (qm->err_ini->log_dev_hw_err) qm->err_ini->log_dev_hw_err(qm, err_sts); /* ce error does not need to be reset */ - if ((err_sts | qm->err_ini->err_info.dev_ce_mask) == - qm->err_ini->err_info.dev_ce_mask) { + if ((err_sts | qm->err_info.dev_ce_mask) == + qm->err_info.dev_ce_mask) { if (qm->err_ini->clear_dev_hw_err_status) qm->err_ini->clear_dev_hw_err_status(qm, err_sts); @@ -3639,7 +3782,7 @@ static int qm_soft_reset(struct hisi_qm *qm) acpi_status s; s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), - qm->err_ini->err_info.acpi_rst, + qm->err_info.acpi_rst, NULL, &value); if (ACPI_FAILURE(s)) { pci_err(pdev, "NO controller reset method!\n"); @@ -3707,12 +3850,11 @@ static void qm_restart_prepare(struct hisi_qm *qm) /* temporarily close the OOO port used for PEH to write out MSI */ value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); - writel(value & ~qm->err_ini->err_info.msi_wr_port, + writel(value & ~qm->err_info.msi_wr_port, qm->io_base + ACC_AM_CFG_PORT_WR_EN); /* clear dev ecc 2bit error source if having */ - value = qm_get_dev_err_status(qm) & - qm->err_ini->err_info.ecc_2bits_mask; + value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask; if (value && qm->err_ini->clear_dev_hw_err_status) qm->err_ini->clear_dev_hw_err_status(qm, value); @@ -3736,7 +3878,7 @@ static void qm_restart_done(struct hisi_qm *qm) /* open the OOO port for PEH to write out MSI */ value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); - value |= qm->err_ini->err_info.msi_wr_port; + value |= qm->err_info.msi_wr_port; writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN); qm->err_status.is_qm_ecc_mbit = false; @@ -3875,8 +4017,7 @@ static int qm_check_dev_error(struct hisi_qm *qm) if (ret) return ret; - return (qm_get_dev_err_status(qm) & - qm->err_ini->err_info.ecc_2bits_mask); + return (qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask); } void hisi_qm_reset_prepare(struct pci_dev *pdev) @@ -4084,7 +4225,7 @@ int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list) mutex_unlock(&qm_list->lock); if (flag) { - ret = qm_list->register_to_crypto(); + ret = qm_list->register_to_crypto(qm); if (ret) { mutex_lock(&qm_list->lock); list_del(&qm->list); @@ -4115,10 +4256,97 @@ void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list) mutex_unlock(&qm_list->lock); if (list_empty(&qm_list->list)) - qm_list->unregister_from_crypto(); + qm_list->unregister_from_crypto(qm); } EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister); +static int qm_get_qp_num(struct hisi_qm *qm) +{ + if (qm->ver == QM_HW_V1) + qm->ctrl_qp_num = QM_QNUM_V1; + else if (qm->ver == QM_HW_V2) + qm->ctrl_qp_num = QM_QNUM_V2; + else + qm->ctrl_qp_num = readl(qm->io_base + QM_CAPBILITY) & + QM_QP_NUN_MASK; + + if (qm->use_db_isolation) + qm->max_qp_num = (readl(qm->io_base + QM_CAPBILITY) >> + QM_QP_MAX_NUM_SHIFT) & QM_QP_NUN_MASK; + else + qm->max_qp_num = qm->ctrl_qp_num; + + /* check if qp number is valid */ + if (qm->qp_num > qm->max_qp_num) { + dev_err(&qm->pdev->dev, "qp num(%u) is more than max qp num(%u)!\n", + qm->qp_num, qm->max_qp_num); + return -EINVAL; + } + + return 0; +} + +static int qm_get_pci_res(struct hisi_qm *qm) +{ + struct pci_dev *pdev = qm->pdev; + struct device *dev = &pdev->dev; + int ret; + + ret = pci_request_mem_regions(pdev, qm->dev_name); + if (ret < 0) { + dev_err(dev, "Failed to request mem regions!\n"); + return ret; + } + + qm->phys_base = pci_resource_start(pdev, PCI_BAR_2); + qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2)); + if (!qm->io_base) { + ret = -EIO; + goto err_request_mem_regions; + } + + if (qm->ver > QM_HW_V2) { + if (qm->fun_type == QM_HW_PF) + qm->use_db_isolation = readl(qm->io_base + + QM_QUE_ISO_EN) & BIT(0); + else + qm->use_db_isolation = readl(qm->io_base + + QM_QUE_ISO_CFG_V) & BIT(0); + } + + if (qm->use_db_isolation) { + qm->db_interval = QM_QP_DB_INTERVAL; + qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4); + qm->db_io_base = ioremap(qm->db_phys_base, + pci_resource_len(pdev, PCI_BAR_4)); + if (!qm->db_io_base) { + ret = -EIO; + goto err_ioremap; + } + } else { + qm->db_phys_base = qm->phys_base; + qm->db_io_base = qm->io_base; + qm->db_interval = 0; + } + + if (qm->fun_type == QM_HW_PF) { + ret = qm_get_qp_num(qm); + if (ret) + goto err_db_ioremap; + } + + return 0; + +err_db_ioremap: + if (qm->use_db_isolation) + iounmap(qm->db_io_base); +err_ioremap: + iounmap(qm->io_base); +err_request_mem_regions: + pci_release_mem_regions(pdev); + return ret; +} + static int hisi_qm_pci_init(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -4132,42 +4360,30 @@ static int hisi_qm_pci_init(struct hisi_qm *qm) return ret; } - ret = pci_request_mem_regions(pdev, qm->dev_name); - if (ret < 0) { - dev_err(dev, "Failed to request mem regions!\n"); + ret = qm_get_pci_res(qm); + if (ret) goto err_disable_pcidev; - } - - qm->phys_base = pci_resource_start(pdev, PCI_BAR_2); - qm->phys_size = pci_resource_len(qm->pdev, PCI_BAR_2); - qm->io_base = ioremap(qm->phys_base, qm->phys_size); - if (!qm->io_base) { - ret = -EIO; - goto err_release_mem_regions; - } ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (ret < 0) - goto err_iounmap; + goto err_get_pci_res; pci_set_master(pdev); if (!qm->ops->get_irq_num) { ret = -EOPNOTSUPP; - goto err_iounmap; + goto err_get_pci_res; } num_vec = qm->ops->get_irq_num(qm); ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI); if (ret < 0) { dev_err(dev, "Failed to enable MSI vectors!\n"); - goto err_iounmap; + goto err_get_pci_res; } return 0; -err_iounmap: - iounmap(qm->io_base); -err_release_mem_regions: - pci_release_mem_regions(pdev); +err_get_pci_res: + qm_put_pci_res(qm); err_disable_pcidev: pci_disable_device(pdev); return ret; @@ -4187,28 +4403,28 @@ int hisi_qm_init(struct hisi_qm *qm) hisi_qm_pre_init(qm); - ret = qm_alloc_uacce(qm); - if (ret < 0) - dev_warn(dev, "fail to alloc uacce (%d)\n", ret); - ret = hisi_qm_pci_init(qm); if (ret) - goto err_remove_uacce; + return ret; ret = qm_irq_register(qm); if (ret) - goto err_pci_uninit; + goto err_pci_init; if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) { /* v2 starts to support get vft by mailbox */ ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); if (ret) - goto err_irq_unregister; + goto err_irq_register; } + ret = qm_alloc_uacce(qm); + if (ret < 0) + dev_warn(dev, "fail to alloc uacce (%d)\n", ret); + ret = hisi_qm_memory_init(qm); if (ret) - goto err_irq_unregister; + goto err_alloc_uacce; INIT_WORK(&qm->work, qm_work_process); if (qm->fun_type == QM_HW_PF) @@ -4218,13 +4434,13 @@ int hisi_qm_init(struct hisi_qm *qm) return 0; -err_irq_unregister: - qm_irq_unregister(qm); -err_pci_uninit: - hisi_qm_pci_uninit(qm); -err_remove_uacce: +err_alloc_uacce: uacce_remove(qm->uacce); qm->uacce = NULL; +err_irq_register: + qm_irq_unregister(qm); +err_pci_init: + hisi_qm_pci_uninit(qm); return ret; } EXPORT_SYMBOL_GPL(hisi_qm_init); diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 54967c6b9c78..acefdf8b3a50 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -51,14 +51,6 @@ #define PEH_AXUSER_CFG 0x401001 #define PEH_AXUSER_CFG_ENABLE 0xffffffff -#define QM_DFX_MB_CNT_VF 0x104010 -#define QM_DFX_DB_CNT_VF 0x104020 -#define QM_DFX_SQE_CNT_VF_SQN 0x104030 -#define QM_DFX_CQE_CNT_VF_CQN 0x104040 -#define QM_DFX_QN_SHIFT 16 -#define CURRENT_FUN_MASK GENMASK(5, 0) -#define CURRENT_Q_MASK GENMASK(31, 16) - #define QM_AXI_RRESP BIT(0) #define QM_AXI_BRESP BIT(1) #define QM_ECC_MBIT BIT(2) @@ -72,10 +64,13 @@ #define QM_DB_TIMEOUT BIT(10) #define QM_OF_FIFO_OF BIT(11) #define QM_DB_RANDOM_INVALID BIT(12) +#define QM_MAILBOX_TIMEOUT BIT(13) +#define QM_FLR_TIMEOUT BIT(14) #define QM_BASE_NFE (QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \ QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \ - QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID) + QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID | \ + QM_MAILBOX_TIMEOUT | QM_FLR_TIMEOUT) #define QM_BASE_CE QM_ECC_1BIT #define QM_Q_DEPTH 1024 @@ -123,6 +118,7 @@ enum qm_fun_type { }; enum qm_debug_file { + CURRENT_QM, CURRENT_Q, CLEAR_ENABLE, DEBUG_FILE_NUM, @@ -193,14 +189,14 @@ struct hisi_qm_err_ini { void (*open_axi_master_ooo)(struct hisi_qm *qm); void (*close_axi_master_ooo)(struct hisi_qm *qm); void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts); - struct hisi_qm_err_info err_info; + void (*err_info_init)(struct hisi_qm *qm); }; struct hisi_qm_list { struct mutex lock; struct list_head list; - int (*register_to_crypto)(void); - void (*unregister_from_crypto)(void); + int (*register_to_crypto)(struct hisi_qm *qm); + void (*unregister_from_crypto)(struct hisi_qm *qm); }; struct hisi_qm { @@ -209,12 +205,15 @@ struct hisi_qm { const char *dev_name; struct pci_dev *pdev; void __iomem *io_base; + void __iomem *db_io_base; u32 sqe_size; u32 qp_base; u32 qp_num; u32 qp_in_used; u32 ctrl_qp_num; + u32 max_qp_num; u32 vfs_num; + u32 db_interval; struct list_head list; struct hisi_qm_list *qm_list; @@ -230,6 +229,7 @@ struct hisi_qm { struct hisi_qm_status status; const struct hisi_qm_err_ini *err_ini; + struct hisi_qm_err_info err_info; struct hisi_qm_err_status err_status; unsigned long misc_ctl; /* driver removing and reset sched */ @@ -252,8 +252,11 @@ struct hisi_qm { const char *algs; bool use_sva; bool is_frozen; + + /* doorbell isolation enable */ + bool use_db_isolation; resource_size_t phys_base; - resource_size_t phys_size; + resource_size_t db_phys_base; struct uacce_device *uacce; int mode; }; diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c index 8ca945ac297e..0a3c8f019b02 100644 --- a/drivers/crypto/hisilicon/sec/sec_algs.c +++ b/drivers/crypto/hisilicon/sec/sec_algs.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Copyright (c) 2016-2017 Hisilicon Limited. */ +/* Copyright (c) 2016-2017 HiSilicon Limited. */ #include #include #include diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c index 91ee2bb575df..c8de1b51c843 100644 --- a/drivers/crypto/hisilicon/sec/sec_drv.c +++ b/drivers/crypto/hisilicon/sec/sec_drv.c @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Driver for the Hisilicon SEC units found on Hip06 Hip07 + * Driver for the HiSilicon SEC units found on Hip06 Hip07 * - * Copyright (c) 2016-2017 Hisilicon Limited. + * Copyright (c) 2016-2017 HiSilicon Limited. */ #include #include @@ -233,7 +233,7 @@ static int sec_queue_map_io(struct sec_queue *queue) IORESOURCE_MEM, 2 + queue->queue_id); if (!res) { - dev_err(dev, "Failed to get queue %d memory resource\n", + dev_err(dev, "Failed to get queue %u memory resource\n", queue->queue_id); return -ENOMEM; } @@ -653,12 +653,12 @@ static int sec_queue_free(struct sec_queue *queue) struct sec_dev_info *info = queue->dev_info; if (queue->queue_id >= SEC_Q_NUM) { - dev_err(info->dev, "No queue %d\n", queue->queue_id); + dev_err(info->dev, "No queue %u\n", queue->queue_id); return -ENODEV; } if (!queue->in_use) { - dev_err(info->dev, "Queue %d is idle\n", queue->queue_id); + dev_err(info->dev, "Queue %u is idle\n", queue->queue_id); return -ENODEV; } @@ -834,6 +834,7 @@ int sec_queue_stop_release(struct sec_queue *queue) /** * sec_queue_empty() - Is this hardware queue currently empty. + * @queue: The queue to test * * We need to know if we have an empty queue for some of the chaining modes * as if it is not empty we may need to hold the message in a software queue @@ -1315,6 +1316,6 @@ static struct platform_driver sec_driver = { module_platform_driver(sec_driver); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Hisilicon Security Accelerators"); +MODULE_DESCRIPTION("HiSilicon Security Accelerators"); MODULE_AUTHOR("Zaibo Xu "); diff --git a/drivers/crypto/hisilicon/sec/sec_drv.h b/drivers/crypto/hisilicon/sec/sec_drv.h index 4d9063a8b10b..179a8250d691 100644 --- a/drivers/crypto/hisilicon/sec/sec_drv.h +++ b/drivers/crypto/hisilicon/sec/sec_drv.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2016-2017 Hisilicon Limited. */ +/* Copyright (c) 2016-2017 HiSilicon Limited. */ #ifndef _SEC_DRV_H_ #define _SEC_DRV_H_ diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 08491912afd5..dfdce2f21e65 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -4,8 +4,6 @@ #ifndef __HISI_SEC_V2_H #define __HISI_SEC_V2_H -#include - #include "../qm.h" #include "sec_crypto.h" @@ -50,7 +48,7 @@ struct sec_req { int err_type; int req_id; - int flag; + u32 flag; /* Status of the SEC request */ bool fake_busy; @@ -139,6 +137,7 @@ struct sec_ctx { bool pbuf_supported; struct sec_cipher_ctx c_ctx; struct sec_auth_ctx a_ctx; + struct device *dev; }; enum sec_endian { @@ -148,7 +147,6 @@ enum sec_endian { }; enum sec_debug_file_index { - SEC_CURRENT_QM, SEC_CLEAR_ENABLE, SEC_DEBUG_FILE_NUM, }; @@ -183,6 +181,6 @@ struct sec_dev { void sec_destroy_qps(struct hisi_qp **qps, int qp_num); struct hisi_qp **sec_create_qps(void); -int sec_register_to_crypto(void); -void sec_unregister_from_crypto(void); +int sec_register_to_crypto(struct hisi_qm *qm); +void sec_unregister_from_crypto(struct hisi_qm *qm); #endif diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 2eaa516b3231..133aede8bf07 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -43,7 +44,6 @@ #define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH) #define SEC_SGL_SGE_NR 128 -#define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev) #define SEC_CIPHER_AUTH 0xfe #define SEC_AUTH_CIPHER 0x1 #define SEC_MAX_MAC_LEN 64 @@ -96,7 +96,7 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx) 0, QM_Q_DEPTH, GFP_ATOMIC); mutex_unlock(&qp_ctx->req_lock); if (unlikely(req_id < 0)) { - dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n"); + dev_err(req->ctx->dev, "alloc req id fail!\n"); return req_id; } @@ -112,7 +112,7 @@ static void sec_free_req_id(struct sec_req *req) int req_id = req->req_id; if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) { - dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n"); + dev_err(req->ctx->dev, "free request id invalid!\n"); return; } @@ -138,7 +138,7 @@ static int sec_aead_verify(struct sec_req *req) aead_req->cryptlen + aead_req->assoclen - authsize); if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) { - dev_err(SEC_CTX_DEV(req->ctx), "aead verify failure!\n"); + dev_err(req->ctx->dev, "aead verify failure!\n"); return -EBADMSG; } @@ -177,7 +177,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp) if (unlikely(req->err_type || done != SEC_SQE_DONE || (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) || (ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) { - dev_err(SEC_CTX_DEV(ctx), + dev_err_ratelimited(ctx->dev, "err_type[%d],done[%d],flag[%d]\n", req->err_type, done, flag); err = -EIO; @@ -326,8 +326,8 @@ static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res) static int sec_alg_resource_alloc(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { - struct device *dev = SEC_CTX_DEV(ctx); struct sec_alg_res *res = qp_ctx->res; + struct device *dev = ctx->dev; int ret; ret = sec_alloc_civ_resource(dev, res); @@ -360,7 +360,7 @@ static int sec_alg_resource_alloc(struct sec_ctx *ctx, static void sec_alg_resource_free(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; sec_free_civ_resource(dev, qp_ctx->res); @@ -373,7 +373,7 @@ static void sec_alg_resource_free(struct sec_ctx *ctx, static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, int qp_ctx_id, int alg_type) { - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; struct sec_qp_ctx *qp_ctx; struct hisi_qp *qp; int ret = -ENOMEM; @@ -428,7 +428,7 @@ static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, static void sec_release_qp_ctx(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; hisi_qm_stop_qp(qp_ctx->qp); sec_alg_resource_free(ctx, qp_ctx); @@ -452,6 +452,7 @@ static int sec_ctx_base_init(struct sec_ctx *ctx) sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm); ctx->sec = sec; + ctx->dev = &sec->qm.pdev->dev; ctx->hlf_q_num = sec->ctx_q_num >> 1; ctx->pbuf_supported = ctx->sec->iommu_used; @@ -476,11 +477,9 @@ static int sec_ctx_base_init(struct sec_ctx *ctx) err_sec_release_qp_ctx: for (i = i - 1; i >= 0; i--) sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); - kfree(ctx->qp_ctx); err_destroy_qps: sec_destroy_qps(ctx->qps, sec->ctx_q_num); - return ret; } @@ -499,7 +498,7 @@ static int sec_cipher_init(struct sec_ctx *ctx) { struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; - c_ctx->c_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE, &c_ctx->c_key_dma, GFP_KERNEL); if (!c_ctx->c_key) return -ENOMEM; @@ -512,7 +511,7 @@ static void sec_cipher_uninit(struct sec_ctx *ctx) struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE); - dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE, c_ctx->c_key, c_ctx->c_key_dma); } @@ -520,7 +519,7 @@ static int sec_auth_init(struct sec_ctx *ctx) { struct sec_auth_ctx *a_ctx = &ctx->a_ctx; - a_ctx->a_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE, &a_ctx->a_key_dma, GFP_KERNEL); if (!a_ctx->a_key) return -ENOMEM; @@ -533,7 +532,7 @@ static void sec_auth_uninit(struct sec_ctx *ctx) struct sec_auth_ctx *a_ctx = &ctx->a_ctx; memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE); - dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE, a_ctx->a_key, a_ctx->a_key_dma); } @@ -546,7 +545,7 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm) crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req)); ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm); if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { - dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n"); + pr_err("get error skcipher iv size!\n"); return -EINVAL; } @@ -573,10 +572,18 @@ static void sec_skcipher_uninit(struct crypto_skcipher *tfm) sec_ctx_base_uninit(ctx); } -static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx, +static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key, const u32 keylen, const enum sec_cmode c_mode) { + struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + int ret; + + ret = verify_skcipher_des3_key(tfm, key); + if (ret) + return ret; + switch (keylen) { case SEC_DES3_2KEY_SIZE: c_ctx->c_key_len = SEC_CKEY_3DES_2KEY; @@ -633,12 +640,13 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, { struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + struct device *dev = ctx->dev; int ret; if (c_mode == SEC_CMODE_XTS) { ret = xts_verify_key(tfm, key, keylen); if (ret) { - dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n"); + dev_err(dev, "xts mode key err!\n"); return ret; } } @@ -648,7 +656,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, switch (c_alg) { case SEC_CALG_3DES: - ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode); + ret = sec_skcipher_3des_setkey(tfm, key, keylen, c_mode); break; case SEC_CALG_AES: case SEC_CALG_SM4: @@ -659,7 +667,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, } if (ret) { - dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n"); + dev_err(dev, "set sec key err!\n"); return ret; } @@ -691,7 +699,7 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req, struct aead_request *aead_req = req->aead_req.aead_req; struct sec_cipher_req *c_req = &req->c_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; int copy_size, pbuf_length; int req_id = req->req_id; @@ -701,21 +709,14 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req, copy_size = c_req->c_len; pbuf_length = sg_copy_to_buffer(src, sg_nents(src), - qp_ctx->res[req_id].pbuf, - copy_size); - + qp_ctx->res[req_id].pbuf, + copy_size); if (unlikely(pbuf_length != copy_size)) { dev_err(dev, "copy src data to pbuf error!\n"); return -EINVAL; } c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma; - - if (!c_req->c_in_dma) { - dev_err(dev, "fail to set pbuffer address!\n"); - return -ENOMEM; - } - c_req->c_out_dma = c_req->c_in_dma; return 0; @@ -727,7 +728,7 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req, struct aead_request *aead_req = req->aead_req.aead_req; struct sec_cipher_req *c_req = &req->c_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; int copy_size, pbuf_length; int req_id = req->req_id; @@ -739,7 +740,6 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req, pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), qp_ctx->res[req_id].pbuf, copy_size); - if (unlikely(pbuf_length != copy_size)) dev_err(dev, "copy pbuf data to dst error!\n"); } @@ -751,7 +751,7 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req, struct sec_aead_req *a_req = &req->aead_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; struct sec_alg_res *res = &qp_ctx->res[req->req_id]; - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; int ret; if (req->use_pbuf) { @@ -806,7 +806,7 @@ static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req, struct scatterlist *src, struct scatterlist *dst) { struct sec_cipher_req *c_req = &req->c_req; - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; if (req->use_pbuf) { sec_cipher_pbuf_unmap(ctx, req, dst); @@ -891,6 +891,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, { struct sec_ctx *ctx = crypto_aead_ctx(tfm); struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + struct device *dev = ctx->dev; struct crypto_authenc_keys keys; int ret; @@ -904,13 +905,13 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, ret = sec_aead_aes_set_key(c_ctx, &keys); if (ret) { - dev_err(SEC_CTX_DEV(ctx), "set sec cipher key err!\n"); + dev_err(dev, "set sec cipher key err!\n"); goto bad_key; } ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys); if (ret) { - dev_err(SEC_CTX_DEV(ctx), "set sec auth key err!\n"); + dev_err(dev, "set sec auth key err!\n"); goto bad_key; } @@ -1062,7 +1063,7 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size, cryptlen - iv_size); if (unlikely(sz != iv_size)) - dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n"); + dev_err(req->ctx->dev, "copy output iv error!\n"); } static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, @@ -1160,7 +1161,7 @@ static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req) ret = sec_skcipher_bd_fill(ctx, req); if (unlikely(ret)) { - dev_err(SEC_CTX_DEV(ctx), "skcipher bd fill is error!\n"); + dev_err(ctx->dev, "skcipher bd fill is error!\n"); return ret; } @@ -1194,7 +1195,7 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) a_req->assoclen); if (unlikely(sz != authsize)) { - dev_err(SEC_CTX_DEV(req->ctx), "copy out mac err!\n"); + dev_err(c->dev, "copy out mac err!\n"); err = -EINVAL; } } @@ -1259,7 +1260,7 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req) ret = ctx->req_op->bd_send(ctx, req); if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) || (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { - dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n"); + dev_err_ratelimited(ctx->dev, "send sec request failed!\n"); goto err_send_req; } @@ -1325,7 +1326,7 @@ static int sec_aead_init(struct crypto_aead *tfm) ctx->alg_type = SEC_AEAD; ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm); if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { - dev_err(SEC_CTX_DEV(ctx), "get error aead iv size!\n"); + dev_err(ctx->dev, "get error aead iv size!\n"); return -EINVAL; } @@ -1374,7 +1375,7 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name) auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); if (IS_ERR(auth_ctx->hash_tfm)) { - dev_err(SEC_CTX_DEV(ctx), "aead alloc shash error!\n"); + dev_err(ctx->dev, "aead alloc shash error!\n"); sec_aead_exit(tfm); return PTR_ERR(auth_ctx->hash_tfm); } @@ -1405,10 +1406,40 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm) return sec_aead_ctx_init(tfm, "sha512"); } + +static int sec_skcipher_cryptlen_ckeck(struct sec_ctx *ctx, + struct sec_req *sreq) +{ + u32 cryptlen = sreq->c_req.sk_req->cryptlen; + struct device *dev = ctx->dev; + u8 c_mode = ctx->c_ctx.c_mode; + int ret = 0; + + switch (c_mode) { + case SEC_CMODE_XTS: + if (unlikely(cryptlen < AES_BLOCK_SIZE)) { + dev_err(dev, "skcipher XTS mode input length error!\n"); + ret = -EINVAL; + } + break; + case SEC_CMODE_ECB: + case SEC_CMODE_CBC: + if (unlikely(cryptlen & (AES_BLOCK_SIZE - 1))) { + dev_err(dev, "skcipher AES input length error!\n"); + ret = -EINVAL; + } + break; + default: + ret = -EINVAL; + } + + return ret; +} + static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) { struct skcipher_request *sk_req = sreq->c_req.sk_req; - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; u8 c_alg = ctx->c_ctx.c_alg; if (unlikely(!sk_req->src || !sk_req->dst)) { @@ -1429,12 +1460,9 @@ static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) } return 0; } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) { - if (unlikely(sk_req->cryptlen & (AES_BLOCK_SIZE - 1))) { - dev_err(dev, "skcipher aes input length error!\n"); - return -EINVAL; - } - return 0; + return sec_skcipher_cryptlen_ckeck(ctx, sreq); } + dev_err(dev, "skcipher algorithm error!\n"); return -EINVAL; @@ -1531,14 +1559,15 @@ static struct skcipher_alg sec_skciphers[] = { static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) { - u8 c_alg = ctx->c_ctx.c_alg; struct aead_request *req = sreq->aead_req.aead_req; struct crypto_aead *tfm = crypto_aead_reqtfm(req); size_t authsize = crypto_aead_authsize(tfm); + struct device *dev = ctx->dev; + u8 c_alg = ctx->c_ctx.c_alg; if (unlikely(!req->src || !req->dst || !req->cryptlen || req->assoclen > SEC_MAX_AAD_LEN)) { - dev_err(SEC_CTX_DEV(ctx), "aead input param error!\n"); + dev_err(dev, "aead input param error!\n"); return -EINVAL; } @@ -1550,7 +1579,7 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) /* Support AES only */ if (unlikely(c_alg != SEC_CALG_AES)) { - dev_err(SEC_CTX_DEV(ctx), "aead crypto alg error!\n"); + dev_err(dev, "aead crypto alg error!\n"); return -EINVAL; } if (sreq->c_req.encrypt) @@ -1559,7 +1588,7 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) sreq->c_req.c_len = req->cryptlen - authsize; if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { - dev_err(SEC_CTX_DEV(ctx), "aead crypto length error!\n"); + dev_err(dev, "aead crypto length error!\n"); return -EINVAL; } @@ -1634,7 +1663,7 @@ static struct aead_alg sec_aeads[] = { AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE), }; -int sec_register_to_crypto(void) +int sec_register_to_crypto(struct hisi_qm *qm) { int ret; @@ -1651,7 +1680,7 @@ int sec_register_to_crypto(void) return ret; } -void sec_unregister_from_crypto(void) +void sec_unregister_from_crypto(struct hisi_qm *qm) { crypto_unregister_skciphers(sec_skciphers, ARRAY_SIZE(sec_skciphers)); diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h index b2786e17d8fe..9c78edac56a4 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.h +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h @@ -64,7 +64,6 @@ enum sec_addr_type { }; struct sec_sqe_type2 { - /* * mac_len: 0~4 bits * a_key_len: 5~10 bits @@ -120,7 +119,6 @@ struct sec_sqe_type2 { /* c_pad_len_field: 0~1 bits */ __le16 c_pad_len_field; - __le64 long_a_data_len; __le64 a_ivin_addr; __le64 a_key_addr; @@ -211,6 +209,6 @@ struct sec_sqe { struct sec_sqe_type2 type2; }; -int sec_register_to_crypto(void); -void sec_unregister_from_crypto(void); +int sec_register_to_crypto(struct hisi_qm *qm); +void sec_unregister_from_crypto(struct hisi_qm *qm); #endif diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index dc68ba76f65e..6f0062d4408c 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -19,7 +19,6 @@ #define SEC_VF_NUM 63 #define SEC_QUEUE_NUM_V1 4096 -#define SEC_QUEUE_NUM_V2 1024 #define SEC_PF_PCI_DEVICE_ID 0xa255 #define SEC_VF_PCI_DEVICE_ID 0xa256 @@ -35,18 +34,16 @@ #define SEC_CTX_Q_NUM_MAX 32 #define SEC_CTRL_CNT_CLR_CE 0x301120 -#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0) -#define SEC_ENGINE_PF_CFG_OFF 0x300000 -#define SEC_ACC_COMMON_REG_OFF 0x1000 +#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0) #define SEC_CORE_INT_SOURCE 0x301010 #define SEC_CORE_INT_MASK 0x301000 #define SEC_CORE_INT_STATUS 0x301008 #define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14 -#define SEC_ECC_NUM(err) (((err) >> 16) & 0xFF) -#define SEC_ECC_ADDR(err) ((err) >> 0) +#define SEC_ECC_NUM 16 +#define SEC_ECC_MASH 0xFF #define SEC_CORE_INT_DISABLE 0x0 -#define SEC_CORE_INT_ENABLE 0x1ff -#define SEC_CORE_INT_CLEAR 0x1ff +#define SEC_CORE_INT_ENABLE 0x7c1ff +#define SEC_CORE_INT_CLEAR 0x7c1ff #define SEC_SAA_ENABLE 0x17f #define SEC_RAS_CE_REG 0x301050 @@ -54,24 +51,24 @@ #define SEC_RAS_NFE_REG 0x301058 #define SEC_RAS_CE_ENB_MSK 0x88 #define SEC_RAS_FE_ENB_MSK 0x0 -#define SEC_RAS_NFE_ENB_MSK 0x177 -#define SEC_RAS_DISABLE 0x0 -#define SEC_MEM_START_INIT_REG 0x0100 -#define SEC_MEM_INIT_DONE_REG 0x0104 +#define SEC_RAS_NFE_ENB_MSK 0x7c177 +#define SEC_RAS_DISABLE 0x0 +#define SEC_MEM_START_INIT_REG 0x301100 +#define SEC_MEM_INIT_DONE_REG 0x301104 -#define SEC_CONTROL_REG 0x0200 +#define SEC_CONTROL_REG 0x301200 #define SEC_TRNG_EN_SHIFT 8 #define SEC_CLK_GATE_ENABLE BIT(3) #define SEC_CLK_GATE_DISABLE (~BIT(3)) #define SEC_AXI_SHUTDOWN_ENABLE BIT(12) #define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF -#define SEC_INTERFACE_USER_CTRL0_REG 0x0220 -#define SEC_INTERFACE_USER_CTRL1_REG 0x0224 -#define SEC_SAA_EN_REG 0x0270 -#define SEC_BD_ERR_CHK_EN_REG0 0x0380 -#define SEC_BD_ERR_CHK_EN_REG1 0x0384 -#define SEC_BD_ERR_CHK_EN_REG3 0x038c +#define SEC_INTERFACE_USER_CTRL0_REG 0x301220 +#define SEC_INTERFACE_USER_CTRL1_REG 0x301224 +#define SEC_SAA_EN_REG 0x301270 +#define SEC_BD_ERR_CHK_EN_REG0 0x301380 +#define SEC_BD_ERR_CHK_EN_REG1 0x301384 +#define SEC_BD_ERR_CHK_EN_REG3 0x30138c #define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15)) #define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7)) @@ -95,9 +92,6 @@ #define SEC_SQE_MASK_OFFSET 64 #define SEC_SQE_MASK_LEN 48 -#define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \ - SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF) - struct sec_hw_error { u32 int_msk; const char *msg; @@ -117,20 +111,66 @@ static struct hisi_qm_list sec_devices = { }; static const struct sec_hw_error sec_hw_errors[] = { - {.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"}, - {.int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint"}, - {.int_msk = BIT(2), .msg = "sec_ecc_2bit_err_rint"}, - {.int_msk = BIT(3), .msg = "sec_ecc_1bit_err_rint"}, - {.int_msk = BIT(4), .msg = "sec_req_trng_timeout_rint"}, - {.int_msk = BIT(5), .msg = "sec_fsm_hbeat_rint"}, - {.int_msk = BIT(6), .msg = "sec_channel_req_rng_timeout_rint"}, - {.int_msk = BIT(7), .msg = "sec_bd_err_rint"}, - {.int_msk = BIT(8), .msg = "sec_chain_buff_err_rint"}, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "sec_axi_rresp_err_rint" + }, + { + .int_msk = BIT(1), + .msg = "sec_axi_bresp_err_rint" + }, + { + .int_msk = BIT(2), + .msg = "sec_ecc_2bit_err_rint" + }, + { + .int_msk = BIT(3), + .msg = "sec_ecc_1bit_err_rint" + }, + { + .int_msk = BIT(4), + .msg = "sec_req_trng_timeout_rint" + }, + { + .int_msk = BIT(5), + .msg = "sec_fsm_hbeat_rint" + }, + { + .int_msk = BIT(6), + .msg = "sec_channel_req_rng_timeout_rint" + }, + { + .int_msk = BIT(7), + .msg = "sec_bd_err_rint" + }, + { + .int_msk = BIT(8), + .msg = "sec_chain_buff_err_rint" + }, + { + .int_msk = BIT(14), + .msg = "sec_no_secure_access" + }, + { + .int_msk = BIT(15), + .msg = "sec_wrapping_key_auth_err" + }, + { + .int_msk = BIT(16), + .msg = "sec_km_key_crc_fail" + }, + { + .int_msk = BIT(17), + .msg = "sec_axi_poison_err" + }, + { + .int_msk = BIT(18), + .msg = "sec_sva_err" + }, + {} }; static const char * const sec_dbg_file_name[] = { - [SEC_CURRENT_QM] = "current_qm", [SEC_CLEAR_ENABLE] = "clear_enable", }; @@ -277,9 +317,7 @@ static u8 sec_get_endian(struct hisi_qm *qm) "cannot access a register in VF!\n"); return SEC_LE; } - reg = readl_relaxed(qm->io_base + SEC_ENGINE_PF_CFG_OFF + - SEC_ACC_COMMON_REG_OFF + SEC_CONTROL_REG); - + reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); /* BD little endian mode */ if (!(reg & BIT(0))) return SEC_LE; @@ -299,13 +337,13 @@ static int sec_engine_init(struct hisi_qm *qm) u32 reg; /* disable clock gate control */ - reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); + reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); reg &= SEC_CLK_GATE_DISABLE; - writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); + writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); - writel_relaxed(0x1, SEC_ADDR(qm, SEC_MEM_START_INIT_REG)); + writel_relaxed(0x1, qm->io_base + SEC_MEM_START_INIT_REG); - ret = readl_relaxed_poll_timeout(SEC_ADDR(qm, SEC_MEM_INIT_DONE_REG), + ret = readl_relaxed_poll_timeout(qm->io_base + SEC_MEM_INIT_DONE_REG, reg, reg & 0x1, SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); if (ret) { @@ -313,40 +351,40 @@ static int sec_engine_init(struct hisi_qm *qm) return ret; } - reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); + reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); reg |= (0x1 << SEC_TRNG_EN_SHIFT); - writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); + writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); - reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG)); + reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL0_REG); reg |= SEC_USER0_SMMU_NORMAL; - writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG)); + writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL0_REG); - reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG)); + reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL1_REG); reg &= SEC_USER1_SMMU_MASK; if (qm->use_sva && qm->ver == QM_HW_V2) reg |= SEC_USER1_SMMU_SVA; else reg |= SEC_USER1_SMMU_NORMAL; - writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG)); + writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL1_REG); writel(SEC_SINGLE_PORT_MAX_TRANS, qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS); - writel(SEC_SAA_ENABLE, SEC_ADDR(qm, SEC_SAA_EN_REG)); + writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG); /* Enable sm4 extra mode, as ctr/ecb */ writel_relaxed(SEC_BD_ERR_CHK_EN0, - SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG0)); + qm->io_base + SEC_BD_ERR_CHK_EN_REG0); /* Enable sm4 xts mode multiple iv */ writel_relaxed(SEC_BD_ERR_CHK_EN1, - SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1)); + qm->io_base + SEC_BD_ERR_CHK_EN_REG1); writel_relaxed(SEC_BD_ERR_CHK_EN3, - SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG3)); + qm->io_base + SEC_BD_ERR_CHK_EN_REG3); /* config endian */ - reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); + reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); reg |= sec_get_endian(qm); - writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); + writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); return 0; } @@ -381,10 +419,6 @@ static void sec_debug_regs_clear(struct hisi_qm *qm) { int i; - /* clear current_qm */ - writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); - writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); - /* clear sec dfx regs */ writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE); for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) @@ -406,7 +440,7 @@ static void sec_hw_error_enable(struct hisi_qm *qm) return; } - val = readl(SEC_ADDR(qm, SEC_CONTROL_REG)); + val = readl(qm->io_base + SEC_CONTROL_REG); /* clear SEC hw error source if having */ writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE); @@ -422,14 +456,14 @@ static void sec_hw_error_enable(struct hisi_qm *qm) /* enable SEC block master OOO when m-bit error occur */ val = val | SEC_AXI_SHUTDOWN_ENABLE; - writel(val, SEC_ADDR(qm, SEC_CONTROL_REG)); + writel(val, qm->io_base + SEC_CONTROL_REG); } static void sec_hw_error_disable(struct hisi_qm *qm) { u32 val; - val = readl(SEC_ADDR(qm, SEC_CONTROL_REG)); + val = readl(qm->io_base + SEC_CONTROL_REG); /* disable RAS int */ writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG); @@ -442,51 +476,7 @@ static void sec_hw_error_disable(struct hisi_qm *qm) /* disable SEC block master OOO when m-bit error occur */ val = val & SEC_AXI_SHUTDOWN_DISABLE; - writel(val, SEC_ADDR(qm, SEC_CONTROL_REG)); -} - -static u32 sec_current_qm_read(struct sec_debug_file *file) -{ - struct hisi_qm *qm = file->qm; - - return readl(qm->io_base + QM_DFX_MB_CNT_VF); -} - -static int sec_current_qm_write(struct sec_debug_file *file, u32 val) -{ - struct hisi_qm *qm = file->qm; - u32 vfq_num; - u32 tmp; - - if (val > qm->vfs_num) - return -EINVAL; - - /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */ - if (!val) { - qm->debug.curr_qm_qp_num = qm->qp_num; - } else { - vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num; - - if (val == qm->vfs_num) - qm->debug.curr_qm_qp_num = - qm->ctrl_qp_num - qm->qp_num - - (qm->vfs_num - 1) * vfq_num; - else - qm->debug.curr_qm_qp_num = vfq_num; - } - - writel(val, qm->io_base + QM_DFX_MB_CNT_VF); - writel(val, qm->io_base + QM_DFX_DB_CNT_VF); - - tmp = val | - (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); - writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); - - tmp = val | - (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); - writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); - - return 0; + writel(val, qm->io_base + SEC_CONTROL_REG); } static u32 sec_clear_enable_read(struct sec_debug_file *file) @@ -523,9 +513,6 @@ static ssize_t sec_debug_read(struct file *filp, char __user *buf, spin_lock_irq(&file->lock); switch (file->index) { - case SEC_CURRENT_QM: - val = sec_current_qm_read(file); - break; case SEC_CLEAR_ENABLE: val = sec_clear_enable_read(file); break; @@ -566,11 +553,6 @@ static ssize_t sec_debug_write(struct file *filp, const char __user *buf, spin_lock_irq(&file->lock); switch (file->index) { - case SEC_CURRENT_QM: - ret = sec_current_qm_write(file, val); - if (ret) - goto err_input; - break; case SEC_CLEAR_ENABLE: ret = sec_clear_enable_write(file, val); if (ret) @@ -655,7 +637,7 @@ static int sec_debug_init(struct hisi_qm *qm) int i; if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) { - for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) { + for (i = SEC_CLEAR_ENABLE; i < SEC_DEBUG_FILE_NUM; i++) { spin_lock_init(&sec->debug.files[i].lock); sec->debug.files[i].index = i; sec->debug.files[i].qm = qm; @@ -712,7 +694,8 @@ static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts) err_val = readl(qm->io_base + SEC_CORE_SRAM_ECC_ERR_INFO); dev_err(dev, "multi ecc sram num=0x%x\n", - SEC_ECC_NUM(err_val)); + ((err_val) >> SEC_ECC_NUM) & + SEC_ECC_MASH); } } errs++; @@ -733,9 +716,23 @@ static void sec_open_axi_master_ooo(struct hisi_qm *qm) { u32 val; - val = readl(SEC_ADDR(qm, SEC_CONTROL_REG)); - writel(val & SEC_AXI_SHUTDOWN_DISABLE, SEC_ADDR(qm, SEC_CONTROL_REG)); - writel(val | SEC_AXI_SHUTDOWN_ENABLE, SEC_ADDR(qm, SEC_CONTROL_REG)); + val = readl(qm->io_base + SEC_CONTROL_REG); + writel(val & SEC_AXI_SHUTDOWN_DISABLE, qm->io_base + SEC_CONTROL_REG); + writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG); +} + +static void sec_err_info_init(struct hisi_qm *qm) +{ + struct hisi_qm_err_info *err_info = &qm->err_info; + + err_info->ce = QM_BASE_CE; + err_info->fe = 0; + err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC; + err_info->dev_ce_mask = SEC_RAS_CE_ENB_MSK; + err_info->msi_wr_port = BIT(0); + err_info->acpi_rst = "SRST"; + err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT | + QM_ACC_WB_NOT_READY_TIMEOUT; } static const struct hisi_qm_err_ini sec_err_ini = { @@ -746,16 +743,7 @@ static const struct hisi_qm_err_ini sec_err_ini = { .clear_dev_hw_err_status = sec_clear_hw_err_status, .log_dev_hw_err = sec_log_hw_error, .open_axi_master_ooo = sec_open_axi_master_ooo, - .err_info = { - .ce = QM_BASE_CE, - .nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT | - QM_ACC_WB_NOT_READY_TIMEOUT, - .fe = 0, - .ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC, - .dev_ce_mask = SEC_RAS_CE_ENB_MSK, - .msi_wr_port = BIT(0), - .acpi_rst = "SRST", - } + .err_info_init = sec_err_info_init, }; static int sec_pf_probe_init(struct sec_dev *sec) @@ -763,12 +751,8 @@ static int sec_pf_probe_init(struct sec_dev *sec) struct hisi_qm *qm = &sec->qm; int ret; - if (qm->ver == QM_HW_V1) - qm->ctrl_qp_num = SEC_QUEUE_NUM_V1; - else - qm->ctrl_qp_num = SEC_QUEUE_NUM_V2; - qm->err_ini = &sec_err_ini; + qm->err_ini->err_info_init(qm); ret = sec_set_user_domain_and_cache(qm); if (ret) @@ -786,7 +770,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) qm->pdev = pdev; qm->ver = pdev->revision; - qm->algs = "cipher\ndigest\naead\n"; + qm->algs = "cipher\ndigest\naead"; qm->mode = uacce_mode; qm->sqe_size = SEC_SQE_SIZE; qm->dev_name = sec_name; @@ -909,10 +893,15 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) pci_warn(pdev, "Failed to init debugfs!\n"); - ret = hisi_qm_alg_register(qm, &sec_devices); - if (ret < 0) { - pr_err("Failed to register driver to crypto.\n"); - goto err_qm_stop; + if (qm->qp_num >= ctx_q_num) { + ret = hisi_qm_alg_register(qm, &sec_devices); + if (ret < 0) { + pr_err("Failed to register driver to crypto.\n"); + goto err_qm_stop; + } + } else { + pci_warn(qm->pdev, + "Failed to use kernel mode, qp not enough!\n"); } if (qm->uacce) { @@ -948,7 +937,9 @@ static void sec_remove(struct pci_dev *pdev) struct hisi_qm *qm = pci_get_drvdata(pdev); hisi_qm_wait_task_finish(qm, &sec_devices); - hisi_qm_alg_unregister(qm, &sec_devices); + if (qm->qp_num >= ctx_q_num) + hisi_qm_alg_unregister(qm, &sec_devices); + if (qm->fun_type == QM_HW_PF && qm->vfs_num) hisi_qm_sriov_disable(pdev, true); diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c index 3bff6394acaf..057273769f26 100644 --- a/drivers/crypto/hisilicon/sgl.c +++ b/drivers/crypto/hisilicon/sgl.c @@ -56,7 +56,7 @@ struct hisi_acc_sgl_pool { struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, u32 count, u32 sge_nr) { - u32 sgl_size, block_size, sgl_num_per_block, block_num, remain_sgl = 0; + u32 sgl_size, block_size, sgl_num_per_block, block_num, remain_sgl; struct hisi_acc_sgl_pool *pool; struct mem_block *block; u32 i, j; @@ -66,6 +66,11 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, sgl_size = sizeof(struct acc_hw_sge) * sge_nr + sizeof(struct hisi_acc_hw_sgl); + + /* + * the pool may allocate a block of memory of size PAGE_SIZE * 2^(MAX_ORDER - 1), + * block size may exceed 2^31 on ia64, so the max of block size is 2^31 + */ block_size = 1 << (PAGE_SHIFT + MAX_ORDER <= 32 ? PAGE_SHIFT + MAX_ORDER - 1 : 31); sgl_num_per_block = block_size / sgl_size; @@ -85,8 +90,10 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, block[i].sgl = dma_alloc_coherent(dev, block_size, &block[i].sgl_dma, GFP_KERNEL); - if (!block[i].sgl) + if (!block[i].sgl) { + dev_err(dev, "Fail to allocate hw SG buffer!\n"); goto err_free_mem; + } block[i].size = block_size; } @@ -95,8 +102,10 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, block[i].sgl = dma_alloc_coherent(dev, remain_sgl * sgl_size, &block[i].sgl_dma, GFP_KERNEL); - if (!block[i].sgl) + if (!block[i].sgl) { + dev_err(dev, "Fail to allocate remained hw SG buffer!\n"); goto err_free_mem; + } block[i].size = remain_sgl * sgl_size; } @@ -167,6 +176,7 @@ static void sg_map_to_hw_sg(struct scatterlist *sgl, { hw_sge->buf = sg_dma_address(sgl); hw_sge->len = cpu_to_le32(sg_dma_len(sgl)); + hw_sge->page_ctrl = sg_virt(sgl); } static void inc_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl) @@ -182,6 +192,18 @@ static void update_hw_sgl_sum_sge(struct hisi_acc_hw_sgl *hw_sgl, u16 sum) hw_sgl->entry_sum_in_chain = cpu_to_le16(sum); } +static void clear_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl) +{ + struct acc_hw_sge *hw_sge = hw_sgl->sge_entries; + int i; + + for (i = 0; i < le16_to_cpu(hw_sgl->entry_sum_in_sgl); i++) { + hw_sge[i].page_ctrl = NULL; + hw_sge[i].buf = 0; + hw_sge[i].len = 0; + } +} + /** * hisi_acc_sg_buf_map_to_hw_sgl - Map a scatterlist to a hw sgl. * @dev: The device which hw sgl belongs to. @@ -211,16 +233,19 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, sg_n = sg_nents(sgl); sg_n_mapped = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); - if (!sg_n_mapped) + if (!sg_n_mapped) { + dev_err(dev, "DMA mapping for SG error!\n"); return ERR_PTR(-EINVAL); + } if (sg_n_mapped > pool->sge_nr) { - dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); + dev_err(dev, "the number of entries in input scatterlist is bigger than SGL pool setting.\n"); return ERR_PTR(-EINVAL); } curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma); if (IS_ERR(curr_hw_sgl)) { + dev_err(dev, "Get SGL error!\n"); dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); return ERR_PTR(-ENOMEM); @@ -256,7 +281,7 @@ void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl, return; dma_unmap_sg(dev, sgl, sg_nents(sgl), DMA_BIDIRECTIONAL); - + clear_hw_sgl_sge(hw_sgl); hw_sgl->entry_sum_in_chain = 0; hw_sgl->entry_sum_in_sgl = 0; hw_sgl->entry_length_in_sgl = 0; diff --git a/drivers/crypto/hisilicon/trng/trng.c b/drivers/crypto/hisilicon/trng/trng.c index 29712685498a..829f2caf0f67 100644 --- a/drivers/crypto/hisilicon/trng/trng.c +++ b/drivers/crypto/hisilicon/trng/trng.c @@ -18,6 +18,8 @@ #define HISI_TRNG_REG 0x00F0 #define HISI_TRNG_BYTES 4 #define HISI_TRNG_QUALITY 512 +#define HISI_TRNG_VERSION 0x01B8 +#define HISI_TRNG_VER_V1 GENMASK(31, 0) #define SLEEP_US 10 #define TIMEOUT_US 10000 #define SW_DRBG_NUM_SHIFT 2 @@ -50,6 +52,7 @@ struct hisi_trng { struct hisi_trng_list *trng_list; struct list_head list; struct hwrng rng; + u32 ver; bool is_used; struct mutex mutex; }; @@ -260,6 +263,7 @@ static int hisi_trng_probe(struct platform_device *pdev) return PTR_ERR(trng->base); trng->is_used = false; + trng->ver = readl(trng->base + HISI_TRNG_VERSION); if (!trng_devices.is_init) { INIT_LIST_HEAD(&trng_devices.list); mutex_init(&trng_devices.lock); @@ -267,7 +271,8 @@ static int hisi_trng_probe(struct platform_device *pdev) } hisi_trng_add_to_list(trng); - if (atomic_inc_return(&trng_active_devs) == 1) { + if (trng->ver != HISI_TRNG_VER_V1 && + atomic_inc_return(&trng_active_devs) == 1) { ret = crypto_register_rng(&hisi_trng_alg); if (ret) { dev_err(&pdev->dev, @@ -289,7 +294,8 @@ static int hisi_trng_probe(struct platform_device *pdev) return ret; err_crypto_unregister: - if (atomic_dec_return(&trng_active_devs) == 0) + if (trng->ver != HISI_TRNG_VER_V1 && + atomic_dec_return(&trng_active_devs) == 0) crypto_unregister_rng(&hisi_trng_alg); err_remove_from_list: @@ -305,7 +311,8 @@ static int hisi_trng_remove(struct platform_device *pdev) while (hisi_trng_del_from_list(trng)) ; - if (atomic_dec_return(&trng_active_devs) == 0) + if (trng->ver != HISI_TRNG_VER_V1 && + atomic_dec_return(&trng_active_devs) == 0) crypto_unregister_rng(&hisi_trng_alg); return 0; diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h index 92397f993e23..517fdbdff3ea 100644 --- a/drivers/crypto/hisilicon/zip/zip.h +++ b/drivers/crypto/hisilicon/zip/zip.h @@ -33,35 +33,55 @@ struct hisi_zip_sqe { u32 consumed; u32 produced; u32 comp_data_length; + /* + * status: 0~7 bits + * rsvd: 8~31 bits + */ u32 dw3; u32 input_data_length; - u32 lba_l; - u32 lba_h; + u32 dw5; + u32 dw6; + /* + * in_sge_data_offset: 0~23 bits + * rsvd: 24~27 bits + * sqe_type: 29~31 bits + */ u32 dw7; + /* + * out_sge_data_offset: 0~23 bits + * rsvd: 24~31 bits + */ u32 dw8; + /* + * request_type: 0~7 bits + * buffer_type: 8~11 bits + * rsvd: 13~31 bits + */ u32 dw9; u32 dw10; - u32 priv_info; + u32 dw11; u32 dw12; - u32 tag; + /* tag: in sqe type 0 */ + u32 dw13; u32 dest_avail_out; - u32 rsvd0; - u32 comp_head_addr_l; - u32 comp_head_addr_h; + u32 dw15; + u32 dw16; + u32 dw17; u32 source_addr_l; u32 source_addr_h; u32 dest_addr_l; u32 dest_addr_h; - u32 stream_ctx_addr_l; - u32 stream_ctx_addr_h; - u32 cipher_key1_addr_l; - u32 cipher_key1_addr_h; - u32 cipher_key2_addr_l; - u32 cipher_key2_addr_h; + u32 dw22; + u32 dw23; + u32 dw24; + u32 dw25; + /* tag: in sqe type 3 */ + u32 dw26; + u32 dw27; u32 rsvd1[4]; }; int zip_create_qps(struct hisi_qp **qps, int ctx_num, int node); -int hisi_zip_register_to_crypto(void); -void hisi_zip_unregister_from_crypto(void); +int hisi_zip_register_to_crypto(struct hisi_qm *qm); +void hisi_zip_unregister_from_crypto(struct hisi_qm *qm); #endif diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 08b4660b014c..9520a4113c81 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -10,6 +10,7 @@ #define HZIP_BD_STATUS_M GENMASK(7, 0) /* hisi_zip_sqe dw7 */ #define HZIP_IN_SGE_DATA_OFFSET_M GENMASK(23, 0) +#define HZIP_SQE_TYPE_M GENMASK(31, 28) /* hisi_zip_sqe dw8 */ #define HZIP_OUT_SGE_DATA_OFFSET_M GENMASK(23, 0) /* hisi_zip_sqe dw9 */ @@ -91,8 +92,22 @@ struct hisi_zip_qp_ctx { struct hisi_zip_ctx *ctx; }; +struct hisi_zip_sqe_ops { + u8 sqe_type; + void (*fill_addr)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req); + void (*fill_buf_size)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req); + void (*fill_buf_type)(struct hisi_zip_sqe *sqe, u8 buf_type); + void (*fill_req_type)(struct hisi_zip_sqe *sqe, u8 req_type); + void (*fill_tag)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req); + void (*fill_sqe_type)(struct hisi_zip_sqe *sqe, u8 sqe_type); + u32 (*get_tag)(struct hisi_zip_sqe *sqe); + u32 (*get_status)(struct hisi_zip_sqe *sqe); + u32 (*get_dstlen)(struct hisi_zip_sqe *sqe); +}; + struct hisi_zip_ctx { struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM]; + const struct hisi_zip_sqe_ops *ops; }; static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp) @@ -119,104 +134,6 @@ static u16 sgl_sge_nr = HZIP_SGL_SGE_NR; module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444); MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)"); -static void hisi_zip_config_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type) -{ - u32 val; - - val = (sqe->dw9) & ~HZIP_BUF_TYPE_M; - val |= FIELD_PREP(HZIP_BUF_TYPE_M, buf_type); - sqe->dw9 = val; -} - -static void hisi_zip_config_tag(struct hisi_zip_sqe *sqe, u32 tag) -{ - sqe->tag = tag; -} - -static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type, - dma_addr_t s_addr, dma_addr_t d_addr, u32 slen, - u32 dlen, u32 sskip, u32 dskip) -{ - memset(sqe, 0, sizeof(struct hisi_zip_sqe)); - - sqe->input_data_length = slen - sskip; - sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, sskip); - sqe->dw8 = FIELD_PREP(HZIP_OUT_SGE_DATA_OFFSET_M, dskip); - sqe->dw9 = FIELD_PREP(HZIP_REQ_TYPE_M, req_type); - sqe->dest_avail_out = dlen - dskip; - sqe->source_addr_l = lower_32_bits(s_addr); - sqe->source_addr_h = upper_32_bits(s_addr); - sqe->dest_addr_l = lower_32_bits(d_addr); - sqe->dest_addr_h = upper_32_bits(d_addr); -} - -static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx, - int alg_type, int req_type) -{ - struct device *dev = &qp->qm->pdev->dev; - int ret; - - qp->req_type = req_type; - qp->alg_type = alg_type; - qp->qp_ctx = ctx; - - ret = hisi_qm_start_qp(qp, 0); - if (ret < 0) { - dev_err(dev, "failed to start qp (%d)!\n", ret); - return ret; - } - - ctx->qp = qp; - - return 0; -} - -static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx) -{ - hisi_qm_stop_qp(ctx->qp); - hisi_qm_release_qp(ctx->qp); -} - -static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int node) -{ - struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL }; - struct hisi_zip *hisi_zip; - int ret, i, j; - - ret = zip_create_qps(qps, HZIP_CTX_Q_NUM, node); - if (ret) { - pr_err("failed to create zip qps (%d)!\n", ret); - return -ENODEV; - } - - hisi_zip = container_of(qps[0]->qm, struct hisi_zip, qm); - - for (i = 0; i < HZIP_CTX_Q_NUM; i++) { - /* alg_type = 0 for compress, 1 for decompress in hw sqe */ - ret = hisi_zip_start_qp(qps[i], &hisi_zip_ctx->qp_ctx[i], i, - req_type); - if (ret) { - for (j = i - 1; j >= 0; j--) - hisi_qm_stop_qp(hisi_zip_ctx->qp_ctx[j].qp); - - hisi_qm_free_qps(qps, HZIP_CTX_Q_NUM); - return ret; - } - - hisi_zip_ctx->qp_ctx[i].zip_dev = hisi_zip; - } - - return 0; -} - -static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx) -{ - int i; - - for (i = 1; i >= 0; i--) - hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]); -} - static u16 get_extra_field_size(const u8 *start) { return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN; @@ -249,6 +166,437 @@ static u32 __get_gzip_head_size(const u8 *src) return size; } +static size_t __maybe_unused get_gzip_head_size(struct scatterlist *sgl) +{ + char buf[HZIP_GZIP_HEAD_BUF]; + + sg_copy_to_buffer(sgl, sg_nents(sgl), buf, sizeof(buf)); + + return __get_gzip_head_size(buf); +} + +static int add_comp_head(struct scatterlist *dst, u8 req_type) +{ + int head_size = TO_HEAD_SIZE(req_type); + const u8 *head = TO_HEAD(req_type); + int ret; + + ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size); + if (ret != head_size) { + pr_err("the head size of buffer is wrong (%d)!\n", ret); + return -ENOMEM; + } + + return head_size; +} + +static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type) +{ + if (!acomp_req->src || !acomp_req->slen) + return -EINVAL; + + if (req_type == HZIP_ALG_TYPE_GZIP && + acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT) + return -EINVAL; + + switch (req_type) { + case HZIP_ALG_TYPE_ZLIB: + return TO_HEAD_SIZE(HZIP_ALG_TYPE_ZLIB); + case HZIP_ALG_TYPE_GZIP: + return TO_HEAD_SIZE(HZIP_ALG_TYPE_GZIP); + default: + pr_err("request type does not support!\n"); + return -EINVAL; + } +} + +static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req, + struct hisi_zip_qp_ctx *qp_ctx, + size_t head_size, bool is_comp) +{ + struct hisi_zip_req_q *req_q = &qp_ctx->req_q; + struct hisi_zip_req *q = req_q->q; + struct hisi_zip_req *req_cache; + int req_id; + + write_lock(&req_q->req_lock); + + req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size); + if (req_id >= req_q->size) { + write_unlock(&req_q->req_lock); + dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n"); + return ERR_PTR(-EAGAIN); + } + set_bit(req_id, req_q->req_bitmap); + + req_cache = q + req_id; + req_cache->req_id = req_id; + req_cache->req = req; + + if (is_comp) { + req_cache->sskip = 0; + req_cache->dskip = head_size; + } else { + req_cache->sskip = head_size; + req_cache->dskip = 0; + } + + write_unlock(&req_q->req_lock); + + return req_cache; +} + +static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx, + struct hisi_zip_req *req) +{ + struct hisi_zip_req_q *req_q = &qp_ctx->req_q; + + write_lock(&req_q->req_lock); + clear_bit(req->req_id, req_q->req_bitmap); + memset(req, 0, sizeof(struct hisi_zip_req)); + write_unlock(&req_q->req_lock); +} + +static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req) +{ + sqe->source_addr_l = lower_32_bits(req->dma_src); + sqe->source_addr_h = upper_32_bits(req->dma_src); + sqe->dest_addr_l = lower_32_bits(req->dma_dst); + sqe->dest_addr_h = upper_32_bits(req->dma_dst); +} + +static void hisi_zip_fill_buf_size(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req) +{ + struct acomp_req *a_req = req->req; + + sqe->input_data_length = a_req->slen - req->sskip; + sqe->dest_avail_out = a_req->dlen - req->dskip; + sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, req->sskip); + sqe->dw8 = FIELD_PREP(HZIP_OUT_SGE_DATA_OFFSET_M, req->dskip); +} + +static void hisi_zip_fill_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type) +{ + u32 val; + + val = sqe->dw9 & ~HZIP_BUF_TYPE_M; + val |= FIELD_PREP(HZIP_BUF_TYPE_M, buf_type); + sqe->dw9 = val; +} + +static void hisi_zip_fill_req_type(struct hisi_zip_sqe *sqe, u8 req_type) +{ + u32 val; + + val = sqe->dw9 & ~HZIP_REQ_TYPE_M; + val |= FIELD_PREP(HZIP_REQ_TYPE_M, req_type); + sqe->dw9 = val; +} + +static void hisi_zip_fill_tag_v1(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req) +{ + sqe->dw13 = req->req_id; +} + +static void hisi_zip_fill_tag_v2(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req) +{ + sqe->dw26 = req->req_id; +} + +static void hisi_zip_fill_sqe_type(struct hisi_zip_sqe *sqe, u8 sqe_type) +{ + u32 val; + + val = sqe->dw7 & ~HZIP_SQE_TYPE_M; + val |= FIELD_PREP(HZIP_SQE_TYPE_M, sqe_type); + sqe->dw7 = val; +} + +static void hisi_zip_fill_sqe(struct hisi_zip_ctx *ctx, struct hisi_zip_sqe *sqe, + u8 req_type, struct hisi_zip_req *req) +{ + const struct hisi_zip_sqe_ops *ops = ctx->ops; + + memset(sqe, 0, sizeof(struct hisi_zip_sqe)); + + ops->fill_addr(sqe, req); + ops->fill_buf_size(sqe, req); + ops->fill_buf_type(sqe, HZIP_SGL); + ops->fill_req_type(sqe, req_type); + ops->fill_tag(sqe, req); + ops->fill_sqe_type(sqe, ops->sqe_type); +} + +static int hisi_zip_do_work(struct hisi_zip_req *req, + struct hisi_zip_qp_ctx *qp_ctx) +{ + struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool; + struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx; + struct acomp_req *a_req = req->req; + struct hisi_qp *qp = qp_ctx->qp; + struct device *dev = &qp->qm->pdev->dev; + struct hisi_zip_sqe zip_sqe; + int ret; + + if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen) + return -EINVAL; + + req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool, + req->req_id << 1, &req->dma_src); + if (IS_ERR(req->hw_src)) { + dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n", + PTR_ERR(req->hw_src)); + return PTR_ERR(req->hw_src); + } + + req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool, + (req->req_id << 1) + 1, + &req->dma_dst); + if (IS_ERR(req->hw_dst)) { + ret = PTR_ERR(req->hw_dst); + dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n", + ret); + goto err_unmap_input; + } + + hisi_zip_fill_sqe(qp_ctx->ctx, &zip_sqe, qp->req_type, req); + + /* send command to start a task */ + atomic64_inc(&dfx->send_cnt); + ret = hisi_qp_send(qp, &zip_sqe); + if (ret < 0) { + atomic64_inc(&dfx->send_busy_cnt); + ret = -EAGAIN; + dev_dbg_ratelimited(dev, "failed to send request!\n"); + goto err_unmap_output; + } + + return -EINPROGRESS; + +err_unmap_output: + hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst); +err_unmap_input: + hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src); + return ret; +} + +static u32 hisi_zip_get_tag_v1(struct hisi_zip_sqe *sqe) +{ + return sqe->dw13; +} + +static u32 hisi_zip_get_tag_v2(struct hisi_zip_sqe *sqe) +{ + return sqe->dw26; +} + +static u32 hisi_zip_get_status(struct hisi_zip_sqe *sqe) +{ + return sqe->dw3 & HZIP_BD_STATUS_M; +} + +static u32 hisi_zip_get_dstlen(struct hisi_zip_sqe *sqe) +{ + return sqe->produced; +} + +static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data) +{ + struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx; + const struct hisi_zip_sqe_ops *ops = qp_ctx->ctx->ops; + struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx; + struct hisi_zip_req_q *req_q = &qp_ctx->req_q; + struct device *dev = &qp->qm->pdev->dev; + struct hisi_zip_sqe *sqe = data; + u32 tag = ops->get_tag(sqe); + struct hisi_zip_req *req = req_q->q + tag; + struct acomp_req *acomp_req = req->req; + u32 status, dlen, head_size; + int err = 0; + + atomic64_inc(&dfx->recv_cnt); + status = ops->get_status(sqe); + if (status != 0 && status != HZIP_NC_ERR) { + dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n", + (qp->alg_type == 0) ? "" : "de", qp->qp_id, status, + sqe->produced); + atomic64_inc(&dfx->err_bd_cnt); + err = -EIO; + } + + dlen = ops->get_dstlen(sqe); + + hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src); + hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst); + + head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0; + acomp_req->dlen = dlen + head_size; + + if (acomp_req->base.complete) + acomp_request_complete(acomp_req, err); + + hisi_zip_remove_req(qp_ctx, req); +} + +static int hisi_zip_acompress(struct acomp_req *acomp_req) +{ + struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm); + struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP]; + struct device *dev = &qp_ctx->qp->qm->pdev->dev; + struct hisi_zip_req *req; + int head_size; + int ret; + + /* let's output compression head now */ + head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type); + if (head_size < 0) { + dev_err_ratelimited(dev, "failed to add comp head (%d)!\n", + head_size); + return head_size; + } + + req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, true); + if (IS_ERR(req)) + return PTR_ERR(req); + + ret = hisi_zip_do_work(req, qp_ctx); + if (ret != -EINPROGRESS) { + dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret); + hisi_zip_remove_req(qp_ctx, req); + } + + return ret; +} + +static int hisi_zip_adecompress(struct acomp_req *acomp_req) +{ + struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm); + struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP]; + struct device *dev = &qp_ctx->qp->qm->pdev->dev; + struct hisi_zip_req *req; + int head_size, ret; + + head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type); + if (head_size < 0) { + dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n", + head_size); + return head_size; + } + + req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, false); + if (IS_ERR(req)) + return PTR_ERR(req); + + ret = hisi_zip_do_work(req, qp_ctx); + if (ret != -EINPROGRESS) { + dev_info_ratelimited(dev, "failed to do decompress (%d)!\n", + ret); + hisi_zip_remove_req(qp_ctx, req); + } + + return ret; +} + +static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx, + int alg_type, int req_type) +{ + struct device *dev = &qp->qm->pdev->dev; + int ret; + + qp->req_type = req_type; + qp->alg_type = alg_type; + qp->qp_ctx = ctx; + + ret = hisi_qm_start_qp(qp, 0); + if (ret < 0) { + dev_err(dev, "failed to start qp (%d)!\n", ret); + return ret; + } + + ctx->qp = qp; + + return 0; +} + +static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx) +{ + hisi_qm_stop_qp(ctx->qp); + hisi_qm_release_qp(ctx->qp); +} + +static const struct hisi_zip_sqe_ops hisi_zip_ops_v1 = { + .sqe_type = 0, + .fill_addr = hisi_zip_fill_addr, + .fill_buf_size = hisi_zip_fill_buf_size, + .fill_buf_type = hisi_zip_fill_buf_type, + .fill_req_type = hisi_zip_fill_req_type, + .fill_tag = hisi_zip_fill_tag_v1, + .fill_sqe_type = hisi_zip_fill_sqe_type, + .get_tag = hisi_zip_get_tag_v1, + .get_status = hisi_zip_get_status, + .get_dstlen = hisi_zip_get_dstlen, +}; + +static const struct hisi_zip_sqe_ops hisi_zip_ops_v2 = { + .sqe_type = 0x3, + .fill_addr = hisi_zip_fill_addr, + .fill_buf_size = hisi_zip_fill_buf_size, + .fill_buf_type = hisi_zip_fill_buf_type, + .fill_req_type = hisi_zip_fill_req_type, + .fill_tag = hisi_zip_fill_tag_v2, + .fill_sqe_type = hisi_zip_fill_sqe_type, + .get_tag = hisi_zip_get_tag_v2, + .get_status = hisi_zip_get_status, + .get_dstlen = hisi_zip_get_dstlen, +}; + +static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int node) +{ + struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL }; + struct hisi_zip_qp_ctx *qp_ctx; + struct hisi_zip *hisi_zip; + int ret, i, j; + + ret = zip_create_qps(qps, HZIP_CTX_Q_NUM, node); + if (ret) { + pr_err("failed to create zip qps (%d)!\n", ret); + return -ENODEV; + } + + hisi_zip = container_of(qps[0]->qm, struct hisi_zip, qm); + + for (i = 0; i < HZIP_CTX_Q_NUM; i++) { + /* alg_type = 0 for compress, 1 for decompress in hw sqe */ + qp_ctx = &hisi_zip_ctx->qp_ctx[i]; + qp_ctx->ctx = hisi_zip_ctx; + ret = hisi_zip_start_qp(qps[i], qp_ctx, i, req_type); + if (ret) { + for (j = i - 1; j >= 0; j--) + hisi_qm_stop_qp(hisi_zip_ctx->qp_ctx[j].qp); + + hisi_qm_free_qps(qps, HZIP_CTX_Q_NUM); + return ret; + } + + qp_ctx->zip_dev = hisi_zip; + } + + if (hisi_zip->qm.ver < QM_HW_V3) + hisi_zip_ctx->ops = &hisi_zip_ops_v1; + else + hisi_zip_ctx->ops = &hisi_zip_ops_v2; + + return 0; +} + +static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx) +{ + int i; + + for (i = 1; i >= 0; i--) + hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]); +} + static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx) { struct hisi_zip_req_q *req_q; @@ -336,52 +684,6 @@ static void hisi_zip_release_sgl_pool(struct hisi_zip_ctx *ctx) ctx->qp_ctx[i].sgl_pool); } -static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx, - struct hisi_zip_req *req) -{ - struct hisi_zip_req_q *req_q = &qp_ctx->req_q; - - write_lock(&req_q->req_lock); - clear_bit(req->req_id, req_q->req_bitmap); - memset(req, 0, sizeof(struct hisi_zip_req)); - write_unlock(&req_q->req_lock); -} - -static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data) -{ - struct hisi_zip_sqe *sqe = data; - struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx; - struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx; - struct hisi_zip_req_q *req_q = &qp_ctx->req_q; - struct hisi_zip_req *req = req_q->q + sqe->tag; - struct acomp_req *acomp_req = req->req; - struct device *dev = &qp->qm->pdev->dev; - u32 status, dlen, head_size; - int err = 0; - - atomic64_inc(&dfx->recv_cnt); - status = sqe->dw3 & HZIP_BD_STATUS_M; - if (status != 0 && status != HZIP_NC_ERR) { - dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n", - (qp->alg_type == 0) ? "" : "de", qp->qp_id, status, - sqe->produced); - atomic64_inc(&dfx->err_bd_cnt); - err = -EIO; - } - dlen = sqe->produced; - - hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src); - hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst); - - head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0; - acomp_req->dlen = dlen + head_size; - - if (acomp_req->base.complete) - acomp_request_complete(acomp_req, err); - - hisi_zip_remove_req(qp_ctx, req); -} - static void hisi_zip_set_acomp_cb(struct hisi_zip_ctx *ctx, void (*fn)(struct hisi_qp *, void *)) { @@ -439,204 +741,6 @@ static void hisi_zip_acomp_exit(struct crypto_acomp *tfm) hisi_zip_ctx_exit(ctx); } -static int add_comp_head(struct scatterlist *dst, u8 req_type) -{ - int head_size = TO_HEAD_SIZE(req_type); - const u8 *head = TO_HEAD(req_type); - int ret; - - ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size); - if (ret != head_size) { - pr_err("the head size of buffer is wrong (%d)!\n", ret); - return -ENOMEM; - } - - return head_size; -} - -static size_t __maybe_unused get_gzip_head_size(struct scatterlist *sgl) -{ - char buf[HZIP_GZIP_HEAD_BUF]; - - sg_copy_to_buffer(sgl, sg_nents(sgl), buf, sizeof(buf)); - - return __get_gzip_head_size(buf); -} - -static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type) -{ - if (!acomp_req->src || !acomp_req->slen) - return -EINVAL; - - if ((req_type == HZIP_ALG_TYPE_GZIP) && - (acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT)) - return -EINVAL; - - switch (req_type) { - case HZIP_ALG_TYPE_ZLIB: - return TO_HEAD_SIZE(HZIP_ALG_TYPE_ZLIB); - case HZIP_ALG_TYPE_GZIP: - return TO_HEAD_SIZE(HZIP_ALG_TYPE_GZIP); - default: - pr_err("request type does not support!\n"); - return -EINVAL; - } -} - -static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req, - struct hisi_zip_qp_ctx *qp_ctx, - size_t head_size, bool is_comp) -{ - struct hisi_zip_req_q *req_q = &qp_ctx->req_q; - struct hisi_zip_req *q = req_q->q; - struct hisi_zip_req *req_cache; - int req_id; - - write_lock(&req_q->req_lock); - - req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size); - if (req_id >= req_q->size) { - write_unlock(&req_q->req_lock); - dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n"); - return ERR_PTR(-EAGAIN); - } - set_bit(req_id, req_q->req_bitmap); - - req_cache = q + req_id; - req_cache->req_id = req_id; - req_cache->req = req; - - if (is_comp) { - req_cache->sskip = 0; - req_cache->dskip = head_size; - } else { - req_cache->sskip = head_size; - req_cache->dskip = 0; - } - - write_unlock(&req_q->req_lock); - - return req_cache; -} - -static int hisi_zip_do_work(struct hisi_zip_req *req, - struct hisi_zip_qp_ctx *qp_ctx) -{ - struct acomp_req *a_req = req->req; - struct hisi_qp *qp = qp_ctx->qp; - struct device *dev = &qp->qm->pdev->dev; - struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool; - struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx; - struct hisi_zip_sqe zip_sqe; - dma_addr_t input, output; - int ret; - - if (!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen) - return -EINVAL; - - req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool, - req->req_id << 1, &input); - if (IS_ERR(req->hw_src)) { - dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n", - PTR_ERR(req->hw_src)); - return PTR_ERR(req->hw_src); - } - req->dma_src = input; - - req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool, - (req->req_id << 1) + 1, - &output); - if (IS_ERR(req->hw_dst)) { - ret = PTR_ERR(req->hw_dst); - dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n", - ret); - goto err_unmap_input; - } - req->dma_dst = output; - - hisi_zip_fill_sqe(&zip_sqe, qp->req_type, input, output, a_req->slen, - a_req->dlen, req->sskip, req->dskip); - hisi_zip_config_buf_type(&zip_sqe, HZIP_SGL); - hisi_zip_config_tag(&zip_sqe, req->req_id); - - /* send command to start a task */ - atomic64_inc(&dfx->send_cnt); - ret = hisi_qp_send(qp, &zip_sqe); - if (ret < 0) { - atomic64_inc(&dfx->send_busy_cnt); - ret = -EAGAIN; - dev_dbg_ratelimited(dev, "failed to send request!\n"); - goto err_unmap_output; - } - - return -EINPROGRESS; - -err_unmap_output: - hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst); -err_unmap_input: - hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src); - return ret; -} - -static int hisi_zip_acompress(struct acomp_req *acomp_req) -{ - struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm); - struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP]; - struct device *dev = &qp_ctx->qp->qm->pdev->dev; - struct hisi_zip_req *req; - int head_size; - int ret; - - /* let's output compression head now */ - head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type); - if (head_size < 0) { - dev_err_ratelimited(dev, "failed to add comp head (%d)!\n", - head_size); - return head_size; - } - - req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, true); - if (IS_ERR(req)) - return PTR_ERR(req); - - ret = hisi_zip_do_work(req, qp_ctx); - if (ret != -EINPROGRESS) { - dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret); - hisi_zip_remove_req(qp_ctx, req); - } - - return ret; -} - -static int hisi_zip_adecompress(struct acomp_req *acomp_req) -{ - struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm); - struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP]; - struct device *dev = &qp_ctx->qp->qm->pdev->dev; - struct hisi_zip_req *req; - int head_size, ret; - - head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type); - if (head_size < 0) { - dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n", - head_size); - return head_size; - } - - req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, false); - if (IS_ERR(req)) - return PTR_ERR(req); - - ret = hisi_zip_do_work(req, qp_ctx); - if (ret != -EINPROGRESS) { - dev_info_ratelimited(dev, "failed to do decompress (%d)!\n", - ret); - hisi_zip_remove_req(qp_ctx, req); - } - - return ret; -} - static struct acomp_alg hisi_zip_acomp_zlib = { .init = hisi_zip_acomp_init, .exit = hisi_zip_acomp_exit, @@ -665,7 +769,7 @@ static struct acomp_alg hisi_zip_acomp_gzip = { } }; -int hisi_zip_register_to_crypto(void) +int hisi_zip_register_to_crypto(struct hisi_qm *qm) { int ret; @@ -684,7 +788,7 @@ int hisi_zip_register_to_crypto(void) return ret; } -void hisi_zip_unregister_from_crypto(void) +void hisi_zip_unregister_from_crypto(struct hisi_qm *qm) { crypto_unregister_acomp(&hisi_zip_acomp_gzip); crypto_unregister_acomp(&hisi_zip_acomp_zlib); diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 02c445722445..2178b40e9f82 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -18,7 +18,6 @@ #define PCI_DEVICE_ID_ZIP_VF 0xa251 #define HZIP_QUEUE_NUM_V1 4096 -#define HZIP_QUEUE_NUM_V2 1024 #define HZIP_CLOCK_GATE_CTRL 0x301004 #define COMP0_ENABLE BIT(0) @@ -69,10 +68,10 @@ #define HZIP_CORE_INT_RAS_CE_ENABLE 0x1 #define HZIP_CORE_INT_RAS_NFE_ENB 0x301164 #define HZIP_CORE_INT_RAS_FE_ENB 0x301168 -#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x7FE +#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x1FFE #define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16 #define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24 -#define HZIP_CORE_INT_MASK_ALL GENMASK(10, 0) +#define HZIP_CORE_INT_MASK_ALL GENMASK(12, 0) #define HZIP_COMP_CORE_NUM 2 #define HZIP_DECOMP_CORE_NUM 6 #define HZIP_CORE_NUM (HZIP_COMP_CORE_NUM + \ @@ -134,17 +133,17 @@ static const struct hisi_zip_hw_error zip_hw_error[] = { { .int_msk = BIT(8), .msg = "zip_com_inf_err" }, { .int_msk = BIT(9), .msg = "zip_enc_inf_err" }, { .int_msk = BIT(10), .msg = "zip_pre_out_err" }, + { .int_msk = BIT(11), .msg = "zip_axi_poison_err" }, + { .int_msk = BIT(12), .msg = "zip_sva_err" }, { /* sentinel */ } }; enum ctrl_debug_file_index { - HZIP_CURRENT_QM, HZIP_CLEAR_ENABLE, HZIP_DEBUG_FILE_NUM, }; static const char * const ctrl_debug_file_name[] = { - [HZIP_CURRENT_QM] = "current_qm", [HZIP_CLEAR_ENABLE] = "clear_enable", }; @@ -363,48 +362,6 @@ static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file) return &hisi_zip->qm; } -static u32 current_qm_read(struct ctrl_debug_file *file) -{ - struct hisi_qm *qm = file_to_qm(file); - - return readl(qm->io_base + QM_DFX_MB_CNT_VF); -} - -static int current_qm_write(struct ctrl_debug_file *file, u32 val) -{ - struct hisi_qm *qm = file_to_qm(file); - u32 vfq_num; - u32 tmp; - - if (val > qm->vfs_num) - return -EINVAL; - - /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */ - if (val == 0) { - qm->debug.curr_qm_qp_num = qm->qp_num; - } else { - vfq_num = (qm->ctrl_qp_num - qm->qp_num) / qm->vfs_num; - if (val == qm->vfs_num) - qm->debug.curr_qm_qp_num = qm->ctrl_qp_num - - qm->qp_num - (qm->vfs_num - 1) * vfq_num; - else - qm->debug.curr_qm_qp_num = vfq_num; - } - - writel(val, qm->io_base + QM_DFX_MB_CNT_VF); - writel(val, qm->io_base + QM_DFX_DB_CNT_VF); - - tmp = val | - (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); - writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); - - tmp = val | - (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); - writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); - - return 0; -} - static u32 clear_enable_read(struct ctrl_debug_file *file) { struct hisi_qm *qm = file_to_qm(file); @@ -438,9 +395,6 @@ static ssize_t hisi_zip_ctrl_debug_read(struct file *filp, char __user *buf, spin_lock_irq(&file->lock); switch (file->index) { - case HZIP_CURRENT_QM: - val = current_qm_read(file); - break; case HZIP_CLEAR_ENABLE: val = clear_enable_read(file); break; @@ -478,11 +432,6 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp, spin_lock_irq(&file->lock); switch (file->index) { - case HZIP_CURRENT_QM: - ret = current_qm_write(file, val); - if (ret) - goto err_input; - break; case HZIP_CLEAR_ENABLE: ret = clear_enable_write(file, val); if (ret) @@ -580,7 +529,7 @@ static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm) struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm); int i; - for (i = HZIP_CURRENT_QM; i < HZIP_DEBUG_FILE_NUM; i++) { + for (i = HZIP_CLEAR_ENABLE; i < HZIP_DEBUG_FILE_NUM; i++) { spin_lock_init(&zip->ctrl->files[i].lock); zip->ctrl->files[i].ctrl = zip->ctrl; zip->ctrl->files[i].index = i; @@ -627,10 +576,6 @@ static void hisi_zip_debug_regs_clear(struct hisi_qm *qm) { int i, j; - /* clear current_qm */ - writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); - writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); - /* enable register read_clear bit */ writel(HZIP_RD_CNT_CLR_CE_EN, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE); for (i = 0; i < ARRAY_SIZE(core_offsets); i++) @@ -714,6 +659,22 @@ static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm) qm->io_base + HZIP_CORE_INT_SET); } +static void hisi_zip_err_info_init(struct hisi_qm *qm) +{ + struct hisi_qm_err_info *err_info = &qm->err_info; + + err_info->ce = QM_BASE_CE; + err_info->fe = 0; + err_info->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC; + err_info->dev_ce_mask = HZIP_CORE_INT_RAS_CE_ENABLE; + err_info->msi_wr_port = HZIP_WR_PORT; + err_info->acpi_rst = "ZRST"; + err_info->nfe = QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT; + + if (qm->ver >= QM_HW_V3) + err_info->nfe |= QM_ACC_DO_TASK_TIMEOUT; +} + static const struct hisi_qm_err_ini hisi_zip_err_ini = { .hw_init = hisi_zip_set_user_domain_and_cache, .hw_err_enable = hisi_zip_hw_error_enable, @@ -723,16 +684,7 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = { .log_dev_hw_err = hisi_zip_log_hw_error, .open_axi_master_ooo = hisi_zip_open_axi_master_ooo, .close_axi_master_ooo = hisi_zip_close_axi_master_ooo, - .err_info = { - .ce = QM_BASE_CE, - .nfe = QM_BASE_NFE | - QM_ACC_WB_NOT_READY_TIMEOUT, - .fe = 0, - .ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC, - .dev_ce_mask = HZIP_CORE_INT_RAS_CE_ENABLE, - .msi_wr_port = HZIP_WR_PORT, - .acpi_rst = "ZRST", - } + .err_info_init = hisi_zip_err_info_init, }; static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) @@ -746,13 +698,8 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) hisi_zip->ctrl = ctrl; ctrl->hisi_zip = hisi_zip; - - if (qm->ver == QM_HW_V1) - qm->ctrl_qp_num = HZIP_QUEUE_NUM_V1; - else - qm->ctrl_qp_num = HZIP_QUEUE_NUM_V2; - qm->err_ini = &hisi_zip_err_ini; + qm->err_ini->err_info_init(qm); hisi_zip_set_user_domain_and_cache(qm); hisi_qm_dev_err_init(qm); diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c index e813115d5432..aa4c7b2af3e2 100644 --- a/drivers/crypto/img-hash.c +++ b/drivers/crypto/img-hash.c @@ -963,8 +963,6 @@ static int img_hash_probe(struct platform_device *pdev) hdev->io_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(hdev->io_base)) { err = PTR_ERR(hdev->io_base); - dev_err(dev, "can't ioremap, returned %d\n", err); - goto res_err; } @@ -972,7 +970,6 @@ static int img_hash_probe(struct platform_device *pdev) hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); hdev->cpu_addr = devm_ioremap_resource(dev, hash_res); if (IS_ERR(hdev->cpu_addr)) { - dev_err(dev, "can't ioremap write port\n"); err = PTR_ERR(hdev->cpu_addr); goto res_err; } diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 6364583b88b2..9ff885d50edf 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -688,7 +688,7 @@ static int safexcel_hw_init(struct safexcel_crypto_priv *priv) /* Leave the DSE threads reset state */ writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe)); - /* Configure the procesing engine thresholds */ + /* Configure the processing engine thresholds */ writel(EIP197_PE_OUT_DBUF_THRES_MIN(opbuflo) | EIP197_PE_OUT_DBUF_THRES_MAX(opbufhi), EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe)); diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 8b0f17fc09fb..0616e369522e 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -265,7 +265,7 @@ static int setup_crypt_desc(void) return 0; } -static spinlock_t desc_lock; +static DEFINE_SPINLOCK(desc_lock); static struct crypt_ctl *get_crypt_desc(void) { int i; @@ -293,7 +293,7 @@ static struct crypt_ctl *get_crypt_desc(void) } } -static spinlock_t emerg_lock; +static DEFINE_SPINLOCK(emerg_lock); static struct crypt_ctl *get_crypt_desc_emerg(void) { int i; @@ -1379,9 +1379,6 @@ static int __init ixp_module_init(void) if (IS_ERR(pdev)) return PTR_ERR(pdev); - spin_lock_init(&desc_lock); - spin_lock_init(&emerg_lock); - err = init_ixp_crypto(&pdev->dev); if (err) { platform_device_unregister(pdev); diff --git a/drivers/crypto/keembay/keembay-ocs-aes-core.c b/drivers/crypto/keembay/keembay-ocs-aes-core.c index b6b25d994af3..e2a39fdaf623 100644 --- a/drivers/crypto/keembay/keembay-ocs-aes-core.c +++ b/drivers/crypto/keembay/keembay-ocs-aes-core.c @@ -1623,10 +1623,8 @@ static int kmb_ocs_aes_probe(struct platform_device *pdev) } aes_dev->base_reg = devm_ioremap_resource(&pdev->dev, aes_mem); - if (IS_ERR(aes_dev->base_reg)) { - dev_err(dev, "Failed to get base address\n"); + if (IS_ERR(aes_dev->base_reg)) return PTR_ERR(aes_dev->base_reg); - } /* Get and request IRQ */ aes_dev->irq = platform_get_irq(pdev, 0); @@ -1649,8 +1647,10 @@ static int kmb_ocs_aes_probe(struct platform_device *pdev) /* Initialize crypto engine */ aes_dev->engine = crypto_engine_alloc_init(dev, true); - if (!aes_dev->engine) + if (!aes_dev->engine) { + rc = -ENOMEM; goto list_del; + } rc = crypto_engine_start(aes_dev->engine); if (rc) { diff --git a/drivers/crypto/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/keembay/keembay-ocs-hcu-core.c index c4b97b4160e9..0379dbf32a4c 100644 --- a/drivers/crypto/keembay/keembay-ocs-hcu-core.c +++ b/drivers/crypto/keembay/keembay-ocs-hcu-core.c @@ -1192,10 +1192,8 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev) } hcu_dev->io_base = devm_ioremap_resource(dev, hcu_mem); - if (IS_ERR(hcu_dev->io_base)) { - dev_err(dev, "Could not io-remap mem resource.\n"); + if (IS_ERR(hcu_dev->io_base)) return PTR_ERR(hcu_dev->io_base); - } init_completion(&hcu_dev->irq_done); @@ -1220,8 +1218,10 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev) /* Initialize crypto engine */ hcu_dev->engine = crypto_engine_alloc_init(dev, 1); - if (!hcu_dev->engine) + if (!hcu_dev->engine) { + rc = -ENOMEM; goto list_del; + } rc = crypto_engine_start(hcu_dev->engine); if (rc) { diff --git a/drivers/crypto/keembay/ocs-hcu.c b/drivers/crypto/keembay/ocs-hcu.c index 81eecacf603a..deb9bd460ee6 100644 --- a/drivers/crypto/keembay/ocs-hcu.c +++ b/drivers/crypto/keembay/ocs-hcu.c @@ -93,7 +93,7 @@ #define OCS_HCU_WAIT_BUSY_TIMEOUT_US 1000000 /** - * struct ocs_hcu_dma_list - An entry in an OCS DMA linked list. + * struct ocs_hcu_dma_entry - An entry in an OCS DMA linked list. * @src_addr: Source address of the data. * @src_len: Length of data to be fetched. * @nxt_desc: Next descriptor to fetch. @@ -107,7 +107,7 @@ struct ocs_hcu_dma_entry { }; /** - * struct ocs_dma_list - OCS-specific DMA linked list. + * struct ocs_hcu_dma_list - OCS-specific DMA linked list. * @head: The head of the list (points to the array backing the list). * @tail: The current tail of the list; NULL if the list is empty. * @dma_addr: The DMA address of @head (i.e., the DMA address of the backing @@ -597,7 +597,7 @@ int ocs_hcu_hash_init(struct ocs_hcu_hash_ctx *ctx, enum ocs_hcu_algo algo) } /** - * ocs_hcu_digest() - Perform a hashing iteration. + * ocs_hcu_hash_update() - Perform a hashing iteration. * @hcu_dev: The OCS HCU device to use. * @ctx: The OCS HCU hashing context. * @dma_list: The OCS DMA list mapping the input data to process. @@ -632,7 +632,7 @@ int ocs_hcu_hash_update(struct ocs_hcu_dev *hcu_dev, } /** - * ocs_hcu_hash_final() - Update and finalize hash computation. + * ocs_hcu_hash_finup() - Update and finalize hash computation. * @hcu_dev: The OCS HCU device to use. * @ctx: The OCS HCU hashing context. * @dma_list: The OCS DMA list mapping the input data to process. diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h index 3518fac29834..ecedd91a8d85 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h @@ -121,14 +121,14 @@ int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev); int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox, struct pci_dev *pdev); -int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, - struct pci_dev *pdev, u64 reg, u64 *val); +int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, + u64 reg, u64 *val, int blkaddr); int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, - u64 reg, u64 val); + u64 reg, u64 val, int blkaddr); int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, - u64 reg, u64 *val); + u64 reg, u64 *val, int blkaddr); int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, - u64 reg, u64 val); + u64 reg, u64 val, int blkaddr); struct otx2_cptlfs_info; int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs); int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c index 51cb6404ded7..9074876d38e5 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c @@ -43,7 +43,7 @@ int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox, struct pci_dev *pdev) } int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, - u64 reg, u64 *val) + u64 reg, u64 *val, int blkaddr) { struct cpt_rd_wr_reg_msg *reg_msg; @@ -62,12 +62,13 @@ int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, reg_msg->is_write = 0; reg_msg->reg_offset = reg; reg_msg->ret_val = val; + reg_msg->blkaddr = blkaddr; return 0; } int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, - u64 reg, u64 val) + u64 reg, u64 val, int blkaddr) { struct cpt_rd_wr_reg_msg *reg_msg; @@ -86,16 +87,17 @@ int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, reg_msg->is_write = 1; reg_msg->reg_offset = reg; reg_msg->val = val; + reg_msg->blkaddr = blkaddr; return 0; } int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, - u64 reg, u64 *val) + u64 reg, u64 *val, int blkaddr) { int ret; - ret = otx2_cpt_add_read_af_reg(mbox, pdev, reg, val); + ret = otx2_cpt_add_read_af_reg(mbox, pdev, reg, val, blkaddr); if (ret) return ret; @@ -103,11 +105,11 @@ int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, } int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev, - u64 reg, u64 val) + u64 reg, u64 val, int blkaddr) { int ret; - ret = otx2_cpt_add_write_af_reg(mbox, pdev, reg, val); + ret = otx2_cpt_add_write_af_reg(mbox, pdev, reg, val, blkaddr); if (ret) return ret; diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c index 823a4571fd67..34aba1532761 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c @@ -56,7 +56,7 @@ static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri) ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev, CPT_AF_LFX_CTL(lf->slot), - &lf_ctrl.u); + &lf_ctrl.u, lfs->blkaddr); if (ret) return ret; @@ -64,7 +64,7 @@ static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri) ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev, CPT_AF_LFX_CTL(lf->slot), - lf_ctrl.u); + lf_ctrl.u, lfs->blkaddr); return ret; } @@ -77,7 +77,7 @@ static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf, ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev, CPT_AF_LFX_CTL(lf->slot), - &lf_ctrl.u); + &lf_ctrl.u, lfs->blkaddr); if (ret) return ret; @@ -85,7 +85,7 @@ static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf, ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev, CPT_AF_LFX_CTL(lf->slot), - lf_ctrl.u); + lf_ctrl.u, lfs->blkaddr); return ret; } diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h index 314e97354100..ab1678fc564d 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h @@ -95,6 +95,7 @@ struct otx2_cptlfs_info { u8 kcrypto_eng_grp_num; /* Kernel crypto engine group number */ u8 kvf_limits; /* Kernel crypto limits */ atomic_t state; /* LF's state. started/reset */ + int blkaddr; /* CPT blkaddr: BLKADDR_CPT0/BLKADDR_CPT1 */ }; static inline void otx2_cpt_free_instruction_queues( diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h index 8c899ad531a5..e19af1356f12 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h @@ -51,6 +51,7 @@ struct otx2_cptpf_dev { u8 max_vfs; /* Maximum number of VFs supported by CPT */ u8 enabled_vfs; /* Number of enabled VFs */ u8 kvf_limits; /* Kernel crypto limits */ + bool has_cpt1; }; irqreturn_t otx2_cptpf_afpf_mbox_intr(int irq, void *arg); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c index 5277e04badd9..58f47e3ab62e 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c @@ -451,19 +451,19 @@ static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf) return 0; } -static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf) +static int cptx_device_reset(struct otx2_cptpf_dev *cptpf, int blkaddr) { int timeout = 10, ret; u64 reg = 0; ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev, - CPT_AF_BLK_RST, 0x1); + CPT_AF_BLK_RST, 0x1, blkaddr); if (ret) return ret; do { ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev, - CPT_AF_BLK_RST, ®); + CPT_AF_BLK_RST, ®, blkaddr); if (ret) return ret; @@ -478,11 +478,35 @@ static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf) return ret; } +static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf) +{ + int ret = 0; + + if (cptpf->has_cpt1) { + ret = cptx_device_reset(cptpf, BLKADDR_CPT1); + if (ret) + return ret; + } + return cptx_device_reset(cptpf, BLKADDR_CPT0); +} + +static void cptpf_check_block_implemented(struct otx2_cptpf_dev *cptpf) +{ + u64 cfg; + + cfg = otx2_cpt_read64(cptpf->reg_base, BLKADDR_RVUM, 0, + RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_CPT1)); + if (cfg & BIT_ULL(11)) + cptpf->has_cpt1 = true; +} + static int cptpf_device_init(struct otx2_cptpf_dev *cptpf) { union otx2_cptx_af_constants1 af_cnsts1 = {0}; int ret = 0; + /* check if 'implemented' bit is set for block BLKADDR_CPT1 */ + cptpf_check_block_implemented(cptpf); /* Reset the CPT PF device */ ret = cptpf_device_reset(cptpf); if (ret) @@ -490,7 +514,8 @@ static int cptpf_device_init(struct otx2_cptpf_dev *cptpf) /* Get number of SE, IE and AE engines */ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev, - CPT_AF_CONSTANTS1, &af_cnsts1.u); + CPT_AF_CONSTANTS1, &af_cnsts1.u, + BLKADDR_CPT0); if (ret) return ret; diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c index 1dc3ba298139..a531f4c8b441 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c @@ -153,16 +153,16 @@ static int get_ucode_type(struct device *dev, } static int __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng, - dma_addr_t dma_addr) + dma_addr_t dma_addr, int blkaddr) { return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev, CPT_AF_EXEX_UCODE_BASE(eng), - (u64)dma_addr); + (u64)dma_addr, blkaddr); } -static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj) +static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, + struct otx2_cptpf_dev *cptpf, int blkaddr) { - struct otx2_cptpf_dev *cptpf = obj; struct otx2_cpt_engs_rsvd *engs; dma_addr_t dma_addr; int i, bit, ret; @@ -170,7 +170,7 @@ static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj) /* Set PF number for microcode fetches */ ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev, CPT_AF_PF_FUNC, - cptpf->pf_id << RVU_PFVF_PF_SHIFT); + cptpf->pf_id << RVU_PFVF_PF_SHIFT, blkaddr); if (ret) return ret; @@ -187,7 +187,8 @@ static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj) */ for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num) if (!eng_grp->g->eng_ref_cnt[bit]) { - ret = __write_ucode_base(cptpf, bit, dma_addr); + ret = __write_ucode_base(cptpf, bit, dma_addr, + blkaddr); if (ret) return ret; } @@ -195,23 +196,32 @@ static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj) return 0; } -static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp, - void *obj) +static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj) { struct otx2_cptpf_dev *cptpf = obj; - struct otx2_cpt_bitmap bmap; + int ret; + + if (cptpf->has_cpt1) { + ret = cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT1); + if (ret) + return ret; + } + return cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT0); +} + +static int cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp, + struct otx2_cptpf_dev *cptpf, + struct otx2_cpt_bitmap bmap, + int blkaddr) +{ int i, timeout = 10; int busy, ret; u64 reg = 0; - bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp); - if (!bmap.size) - return -EINVAL; - /* Detach the cores from group */ for_each_set_bit(i, bmap.bits, bmap.size) { ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev, - CPT_AF_EXEX_CTL2(i), ®); + CPT_AF_EXEX_CTL2(i), ®, blkaddr); if (ret) return ret; @@ -221,7 +231,8 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp, ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev, - CPT_AF_EXEX_CTL2(i), reg); + CPT_AF_EXEX_CTL2(i), reg, + blkaddr); if (ret) return ret; } @@ -237,7 +248,8 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp, for_each_set_bit(i, bmap.bits, bmap.size) { ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev, - CPT_AF_EXEX_STS(i), ®); + CPT_AF_EXEX_STS(i), ®, + blkaddr); if (ret) return ret; @@ -253,7 +265,8 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp, if (!eng_grp->g->eng_ref_cnt[i]) { ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev, - CPT_AF_EXEX_CTL(i), 0x0); + CPT_AF_EXEX_CTL(i), 0x0, + blkaddr); if (ret) return ret; } @@ -262,22 +275,39 @@ static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp, return 0; } -static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp, - void *obj) +static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp, + void *obj) { struct otx2_cptpf_dev *cptpf = obj; struct otx2_cpt_bitmap bmap; - u64 reg = 0; - int i, ret; + int ret; bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp); if (!bmap.size) return -EINVAL; + if (cptpf->has_cpt1) { + ret = cptx_detach_and_disable_cores(eng_grp, cptpf, bmap, + BLKADDR_CPT1); + if (ret) + return ret; + } + return cptx_detach_and_disable_cores(eng_grp, cptpf, bmap, + BLKADDR_CPT0); +} + +static int cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp, + struct otx2_cptpf_dev *cptpf, + struct otx2_cpt_bitmap bmap, + int blkaddr) +{ + u64 reg = 0; + int i, ret; + /* Attach the cores to the group */ for_each_set_bit(i, bmap.bits, bmap.size) { ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev, - CPT_AF_EXEX_CTL2(i), ®); + CPT_AF_EXEX_CTL2(i), ®, blkaddr); if (ret) return ret; @@ -287,7 +317,8 @@ static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp, ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev, - CPT_AF_EXEX_CTL2(i), reg); + CPT_AF_EXEX_CTL2(i), reg, + blkaddr); if (ret) return ret; } @@ -295,15 +326,33 @@ static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp, /* Enable the cores */ for_each_set_bit(i, bmap.bits, bmap.size) { - ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, - cptpf->pdev, - CPT_AF_EXEX_CTL(i), 0x1); + ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev, + CPT_AF_EXEX_CTL(i), 0x1, + blkaddr); if (ret) return ret; } - ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev); + return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev); +} - return ret; +static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp, + void *obj) +{ + struct otx2_cptpf_dev *cptpf = obj; + struct otx2_cpt_bitmap bmap; + int ret; + + bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp); + if (!bmap.size) + return -EINVAL; + + if (cptpf->has_cpt1) { + ret = cptx_attach_and_enable_cores(eng_grp, cptpf, bmap, + BLKADDR_CPT1); + if (ret) + return ret; + } + return cptx_attach_and_enable_cores(eng_grp, cptpf, bmap, BLKADDR_CPT0); } static int load_fw(struct device *dev, struct fw_info_t *fw_info, @@ -1140,20 +1189,18 @@ int otx2_cpt_create_eng_grps(struct pci_dev *pdev, return ret; } -int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf) +static int cptx_disable_all_cores(struct otx2_cptpf_dev *cptpf, int total_cores, + int blkaddr) { - int i, ret, busy, total_cores; - int timeout = 10; - u64 reg = 0; - - total_cores = cptpf->eng_grps.avail.max_se_cnt + - cptpf->eng_grps.avail.max_ie_cnt + - cptpf->eng_grps.avail.max_ae_cnt; + int timeout = 10, ret; + int i, busy; + u64 reg; /* Disengage the cores from groups */ for (i = 0; i < total_cores; i++) { ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev, - CPT_AF_EXEX_CTL2(i), 0x0); + CPT_AF_EXEX_CTL2(i), 0x0, + blkaddr); if (ret) return ret; @@ -1173,7 +1220,8 @@ int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf) for (i = 0; i < total_cores; i++) { ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev, - CPT_AF_EXEX_STS(i), ®); + CPT_AF_EXEX_STS(i), ®, + blkaddr); if (ret) return ret; @@ -1187,13 +1235,30 @@ int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf) /* Disable the cores */ for (i = 0; i < total_cores; i++) { ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev, - CPT_AF_EXEX_CTL(i), 0x0); + CPT_AF_EXEX_CTL(i), 0x0, + blkaddr); if (ret) return ret; } return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev); } +int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf) +{ + int total_cores, ret; + + total_cores = cptpf->eng_grps.avail.max_se_cnt + + cptpf->eng_grps.avail.max_ie_cnt + + cptpf->eng_grps.avail.max_ae_cnt; + + if (cptpf->has_cpt1) { + ret = cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT1); + if (ret) + return ret; + } + return cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT0); +} + void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev, struct otx2_cpt_eng_grps *eng_grps) { @@ -1354,6 +1419,7 @@ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf) lfs->pdev = pdev; lfs->reg_base = cptpf->reg_base; lfs->mbox = &cptpf->afpf_mbox; + lfs->blkaddr = BLKADDR_CPT0; ret = otx2_cptlf_init(&cptpf->lfs, OTX2_CPT_ALL_ENG_GRPS_MASK, OTX2_CPT_QUEUE_HI_PRIO, 1); if (ret) diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c index 92e921eceed7..d6314ea9ae89 100644 --- a/drivers/crypto/nx/nx-aes-cbc.c +++ b/drivers/crypto/nx/nx-aes-cbc.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * AES CBC routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2011-2012 International Business Machines Inc. diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c index 4c9362eebefd..e7384d107573 100644 --- a/drivers/crypto/nx/nx-aes-ccm.c +++ b/drivers/crypto/nx/nx-aes-ccm.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * AES CCM routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2012 International Business Machines Inc. diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c index 6d5ce1a66f1e..13f518802343 100644 --- a/drivers/crypto/nx/nx-aes-ctr.c +++ b/drivers/crypto/nx/nx-aes-ctr.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * AES CTR routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2011-2012 International Business Machines Inc. diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c index 77e338dc33f1..7a729dc2bc17 100644 --- a/drivers/crypto/nx/nx-aes-ecb.c +++ b/drivers/crypto/nx/nx-aes-ecb.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * AES ECB routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2011-2012 International Business Machines Inc. diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c index 19c6ed5baea4..fc9baca13920 100644 --- a/drivers/crypto/nx/nx-aes-gcm.c +++ b/drivers/crypto/nx/nx-aes-gcm.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * AES GCM routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2012 International Business Machines Inc. diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c index 48dc1c98ca52..eb5c8f689360 100644 --- a/drivers/crypto/nx/nx-aes-xcbc.c +++ b/drivers/crypto/nx/nx-aes-xcbc.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * AES XCBC routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2011-2012 International Business Machines Inc. diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c index 13c65deda8e9..446f611726df 100644 --- a/drivers/crypto/nx/nx-common-powernv.c +++ b/drivers/crypto/nx/nx-common-powernv.c @@ -932,8 +932,10 @@ static int __init nx_powernv_probe_vas(struct device_node *pn) ret = find_nx_device_tree(dn, chip_id, vasid, NX_CT_GZIP, "ibm,p9-nx-gzip", &ct_gzip); - if (ret) + if (ret) { + of_node_put(dn); return ret; + } } if (!ct_842 || !ct_gzip) { diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c index 90d9a37a57f6..b0ad665e4bda 100644 --- a/drivers/crypto/nx/nx-sha256.c +++ b/drivers/crypto/nx/nx-sha256.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * SHA-256 routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2011-2012 International Business Machines Inc. diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c index eb8627a0f317..c29103a1a0b6 100644 --- a/drivers/crypto/nx/nx-sha512.c +++ b/drivers/crypto/nx/nx-sha512.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * SHA-512 routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2011-2012 International Business Machines Inc. diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index 1d0e8a1ba160..010e87d9da36 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * Routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2011-2012 International Business Machines Inc. @@ -200,7 +200,8 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst, * @sg: sg list head * @end: sg lisg end * @delta: is the amount we need to crop in order to bound the list. - * + * @nbytes: length of data in the scatterlists or data length - whichever + * is greater. */ static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, diff --git a/drivers/crypto/nx/nx_debugfs.c b/drivers/crypto/nx/nx_debugfs.c index 1975bcbee997..ee7cd88bb10a 100644 --- a/drivers/crypto/nx/nx_debugfs.c +++ b/drivers/crypto/nx/nx_debugfs.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * debugfs routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2011-2012 International Business Machines Inc. diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index a45bdcf3026d..0dd4c6b157de 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -103,9 +103,8 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd) dd->err = 0; } - err = pm_runtime_get_sync(dd->dev); + err = pm_runtime_resume_and_get(dd->dev); if (err < 0) { - pm_runtime_put_noidle(dd->dev); dev_err(dd->dev, "failed to get sync: %d\n", err); return err; } @@ -1134,7 +1133,7 @@ static int omap_aes_probe(struct platform_device *pdev) pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY); pm_runtime_enable(dev); - err = pm_runtime_get_sync(dev); + err = pm_runtime_resume_and_get(dev); if (err < 0) { dev_err(dev, "%s: failed to get_sync(%d)\n", __func__, err); @@ -1303,7 +1302,7 @@ static int omap_aes_suspend(struct device *dev) static int omap_aes_resume(struct device *dev) { - pm_runtime_get_sync(dev); + pm_runtime_resume_and_get(dev); return 0; } #endif diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c index 6a9be01fdf33..3524ddd48930 100644 --- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -224,6 +224,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data) hw_data->uof_get_name = uof_get_name; hw_data->uof_get_ae_mask = uof_get_ae_mask; hw_data->set_msix_rttable = set_msix_default_rttable; + hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); } diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c index f5990d042c9a..1dd64af22bea 100644 --- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c +++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c @@ -212,6 +212,7 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data) hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms; hw_data->reset_device = adf_reset_flr; hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; + hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer; adf_gen2_init_hw_csr_ops(&hw_data->csr_ops); } diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c index 1d1532e8fb6d..067ca5e17d38 100644 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c @@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_err_free_reg; - set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); - ret = adf_dev_init(accel_dev); if (ret) goto out_err_dev_shutdown; + set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); + ret = adf_dev_start(accel_dev); if (ret) goto out_err_dev_stop; diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c index cadcf12884c8..30337390513c 100644 --- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c +++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c @@ -214,6 +214,7 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data) hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms; hw_data->reset_device = adf_reset_flr; hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION; + hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer; adf_gen2_init_hw_csr_ops(&hw_data->csr_ops); } diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c index 04742a6d91ca..51ea88c0b17d 100644 --- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c +++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c @@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_err_free_reg; - set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); - ret = adf_dev_init(accel_dev); if (ret) goto out_err_dev_shutdown; + set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); + ret = adf_dev_start(accel_dev); if (ret) goto out_err_dev_stop; diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index 5527344546e5..ac435b44f1d2 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h @@ -173,6 +173,7 @@ struct adf_hw_device_data { void (*configure_iov_threads)(struct adf_accel_dev *accel_dev, bool enable); void (*enable_ints)(struct adf_accel_dev *accel_dev); + void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev); int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev); void (*reset_device)(struct adf_accel_dev *accel_dev); void (*set_msix_rttable)(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c index 1aa17303838d..9e560c7d4163 100644 --- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c +++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c @@ -179,3 +179,28 @@ u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev) return capabilities; } EXPORT_SYMBOL_GPL(adf_gen2_get_accel_cap); + +void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + u32 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE; + u32 timer_val = ADF_SSM_WDT_DEFAULT_VALUE; + unsigned long accel_mask = hw_data->accel_mask; + void __iomem *pmisc_addr; + struct adf_bar *pmisc; + int pmisc_id; + u32 i = 0; + + pmisc_id = hw_data->get_misc_bar_id(hw_data); + pmisc = &GET_BARS(accel_dev)[pmisc_id]; + pmisc_addr = pmisc->virt_addr; + + /* Configures WDT timers */ + for_each_set_bit(i, &accel_mask, hw_data->num_accel) { + /* Enable WDT for sym and dc */ + ADF_CSR_WR(pmisc_addr, ADF_SSMWDT(i), timer_val); + /* Enable WDT for pke */ + ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKE(i), timer_val_pke); + } +} +EXPORT_SYMBOL_GPL(adf_gen2_set_ssm_wdtimer); diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h index 3816e6500352..756b0ddfac5e 100644 --- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h +++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h @@ -113,11 +113,24 @@ do { \ /* Power gating */ #define ADF_POWERGATE_PKE BIT(24) +/* WDT timers + * + * Timeout is in cycles. Clock speed may vary across products but this + * value should be a few milli-seconds. + */ +#define ADF_SSM_WDT_DEFAULT_VALUE 0x200000 +#define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x2000000 +#define ADF_SSMWDT_OFFSET 0x54 +#define ADF_SSMWDTPKE_OFFSET 0x58 +#define ADF_SSMWDT(i) (ADF_SSMWDT_OFFSET + ((i) * 0x4000)) +#define ADF_SSMWDTPKE(i) (ADF_SSMWDTPKE_OFFSET + ((i) * 0x4000)) + void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable, int num_a_regs, int num_b_regs); void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info); void adf_gen2_get_arb_info(struct arb_info *arb_info); u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev); +void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); #endif diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c index b72ff58e0bc7..000528327b29 100644 --- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.c @@ -99,3 +99,43 @@ void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en; } EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops); + +static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper, + u32 *lower) +{ + *lower = lower_32_bits(value); + *upper = upper_32_bits(value); +} + +void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE; + u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE; + u32 ssm_wdt_pke_high = 0; + u32 ssm_wdt_pke_low = 0; + u32 ssm_wdt_high = 0; + u32 ssm_wdt_low = 0; + void __iomem *pmisc_addr; + struct adf_bar *pmisc; + int pmisc_id; + + pmisc_id = hw_data->get_misc_bar_id(hw_data); + pmisc = &GET_BARS(accel_dev)[pmisc_id]; + pmisc_addr = pmisc->virt_addr; + + /* Convert 64bit WDT timer value into 32bit values for + * mmio write to 32bit CSRs. + */ + adf_gen4_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low); + adf_gen4_unpack_ssm_wdtimer(timer_val_pke, &ssm_wdt_pke_high, + &ssm_wdt_pke_low); + + /* Enable WDT for sym and dc */ + ADF_CSR_WR(pmisc_addr, ADF_SSMWDTL_OFFSET, ssm_wdt_low); + ADF_CSR_WR(pmisc_addr, ADF_SSMWDTH_OFFSET, ssm_wdt_high); + /* Enable WDT for pke */ + ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ssm_wdt_pke_low); + ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEH_OFFSET, ssm_wdt_pke_high); +} +EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer); diff --git a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h index 8ab62b2ac311..b8fca1ff7aab 100644 --- a/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/qat/qat_common/adf_gen4_hw_data.h @@ -94,6 +94,18 @@ do { \ ADF_RING_BUNDLE_SIZE * (bank) + \ ADF_RING_CSR_RING_SRV_ARB_EN, (value)) -void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); +/* WDT timers + * + * Timeout is in cycles. Clock speed may vary across products but this + * value should be a few milli-seconds. + */ +#define ADF_SSM_WDT_DEFAULT_VALUE 0x200000 +#define ADF_SSM_WDT_PKE_DEFAULT_VALUE 0x8000000 +#define ADF_SSMWDTL_OFFSET 0x54 +#define ADF_SSMWDTH_OFFSET 0x5C +#define ADF_SSMWDTPKEL_OFFSET 0x58 +#define ADF_SSMWDTPKEH_OFFSET 0x60 +void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); +void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); #endif diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c index 42029153408e..744c40351428 100644 --- a/drivers/crypto/qat/qat_common/adf_init.c +++ b/drivers/crypto/qat/qat_common/adf_init.c @@ -162,6 +162,10 @@ int adf_dev_start(struct adf_accel_dev *accel_dev) return -EFAULT; } + /* Set ssm watch dog timer */ + if (hw_data->set_ssm_wdtimer) + hw_data->set_ssm_wdtimer(accel_dev); + list_for_each(list_itr, &service_table) { service = list_entry(list_itr, struct service_hndl, list); if (service->event_hld(accel_dev, ADF_EVENT_START)) { diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c index c45853463530..e3ad5587be49 100644 --- a/drivers/crypto/qat/qat_common/adf_isr.c +++ b/drivers/crypto/qat/qat_common/adf_isr.c @@ -291,19 +291,32 @@ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev) ret = adf_isr_alloc_msix_entry_table(accel_dev); if (ret) - return ret; - if (adf_enable_msix(accel_dev)) goto err_out; - if (adf_setup_bh(accel_dev)) - goto err_out; + ret = adf_enable_msix(accel_dev); + if (ret) + goto err_free_msix_table; - if (adf_request_irqs(accel_dev)) - goto err_out; + ret = adf_setup_bh(accel_dev); + if (ret) + goto err_disable_msix; + + ret = adf_request_irqs(accel_dev); + if (ret) + goto err_cleanup_bh; return 0; + +err_cleanup_bh: + adf_cleanup_bh(accel_dev); + +err_disable_msix: + adf_disable_msix(&accel_dev->accel_pci_dev); + +err_free_msix_table: + adf_isr_free_msix_entry_table(accel_dev); + err_out: - adf_isr_resource_free(accel_dev); - return -EFAULT; + return ret; } EXPORT_SYMBOL_GPL(adf_isr_resource_alloc); diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c index 8b090b7ae8c6..a1b77bd7a894 100644 --- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c @@ -169,7 +169,7 @@ static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr) * @msg: Message to send * @vf_nr: VF number to which the message will be sent * - * Function sends a messge from the PF to a VF + * Function sends a message from the PF to a VF * * Return: 0 on success, error code otherwise. */ diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c index 888c1e047295..8ba28409fb74 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.c +++ b/drivers/crypto/qat/qat_common/adf_transport.c @@ -172,6 +172,7 @@ static int adf_init_ring(struct adf_etr_ring_data *ring) dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n"); dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes, ring->base_addr, ring->dma_addr); + ring->base_addr = NULL; return -EFAULT; } diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c index 2c98fb63f7b7..e85bd62d134a 100644 --- a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c @@ -8,7 +8,7 @@ * adf_vf2pf_init() - send init msg to PF * @accel_dev: Pointer to acceleration VF device. * - * Function sends an init messge from the VF to a PF + * Function sends an init message from the VF to a PF * * Return: 0 on success, error code otherwise. */ @@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(adf_vf2pf_init); * adf_vf2pf_shutdown() - send shutdown msg to PF * @accel_dev: Pointer to acceleration VF device. * - * Function sends a shutdown messge from the VF to a PF + * Function sends a shutdown message from the VF to a PF * * Return: void */ diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c index 38d316a42ba6..888388acb6bd 100644 --- a/drivers/crypto/qat/qat_common/adf_vf_isr.c +++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c @@ -261,17 +261,26 @@ int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev) goto err_out; if (adf_setup_pf2vf_bh(accel_dev)) - goto err_out; + goto err_disable_msi; if (adf_setup_bh(accel_dev)) - goto err_out; + goto err_cleanup_pf2vf_bh; if (adf_request_msi_irq(accel_dev)) - goto err_out; + goto err_cleanup_bh; return 0; + +err_cleanup_bh: + adf_cleanup_bh(accel_dev); + +err_cleanup_pf2vf_bh: + adf_cleanup_pf2vf_bh(accel_dev); + +err_disable_msi: + adf_disable_msi(accel_dev); + err_out: - adf_vf_isr_resource_free(accel_dev); return -EFAULT; } EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc); diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index ff78c73c47e3..f998ed58457c 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -718,8 +718,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, int n = sg_nents(sgl); struct qat_alg_buf_list *bufl; struct qat_alg_buf_list *buflout = NULL; - dma_addr_t blp; - dma_addr_t bloutp = 0; + dma_addr_t blp = DMA_MAPPING_ERROR; + dma_addr_t bloutp = DMA_MAPPING_ERROR; struct scatterlist *sg; size_t sz_out, sz = struct_size(bufl, bufers, n + 1); @@ -731,9 +731,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, if (unlikely(!bufl)) return -ENOMEM; - blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, blp))) - goto err_in; + for_each_sg(sgl, sg, n, i) + bufl->bufers[i].addr = DMA_MAPPING_ERROR; for_each_sg(sgl, sg, n, i) { int y = sg_nctr; @@ -750,6 +749,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, sg_nctr++; } bufl->num_bufs = sg_nctr; + blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, blp))) + goto err_in; qat_req->buf.bl = bufl; qat_req->buf.blp = blp; qat_req->buf.sz = sz; @@ -764,10 +766,11 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, dev_to_node(&GET_DEV(inst->accel_dev))); if (unlikely(!buflout)) goto err_in; - bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, bloutp))) - goto err_out; + bufers = buflout->bufers; + for_each_sg(sglout, sg, n, i) + bufers[i].addr = DMA_MAPPING_ERROR; + for_each_sg(sglout, sg, n, i) { int y = sg_nctr; @@ -784,6 +787,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, } buflout->num_bufs = sg_nctr; buflout->num_mapped_bufs = sg_nctr; + bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, bloutp))) + goto err_out; qat_req->buf.blout = buflout; qat_req->buf.bloutp = bloutp; qat_req->buf.sz_out = sz_out; @@ -795,17 +801,21 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, return 0; err_out: + if (!dma_mapping_error(dev, bloutp)) + dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE); + n = sg_nents(sglout); for (i = 0; i < n; i++) if (!dma_mapping_error(dev, buflout->bufers[i].addr)) dma_unmap_single(dev, buflout->bufers[i].addr, buflout->bufers[i].len, DMA_BIDIRECTIONAL); - if (!dma_mapping_error(dev, bloutp)) - dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE); kfree(buflout); err_in: + if (!dma_mapping_error(dev, blp)) + dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); + n = sg_nents(sgl); for (i = 0; i < n; i++) if (!dma_mapping_error(dev, bufl->bufers[i].addr)) @@ -813,8 +823,6 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, bufl->bufers[i].len, DMA_BIDIRECTIONAL); - if (!dma_mapping_error(dev, blp)) - dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); kfree(bufl); dev_err(dev, "Failed to map buf for dma\n"); diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c index c972554a755e..29999da716cc 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c @@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_err_free_reg; - set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); - ret = adf_dev_init(accel_dev); if (ret) goto out_err_dev_shutdown; + set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); + ret = adf_dev_start(accel_dev); if (ret) goto out_err_dev_stop; diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h index cffa9fc628ff..850f257d00f3 100644 --- a/drivers/crypto/qce/cipher.h +++ b/drivers/crypto/qce/cipher.h @@ -40,7 +40,6 @@ struct qce_cipher_reqctx { struct scatterlist result_sg; struct sg_table dst_tbl; struct scatterlist *dst_sg; - struct sg_table src_tbl; struct scatterlist *src_sg; unsigned int cryptlen; struct skcipher_request fallback_req; // keep at the end diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c index a73db2a5637f..dceb9579d87a 100644 --- a/drivers/crypto/qce/common.c +++ b/drivers/crypto/qce/common.c @@ -140,8 +140,7 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size) return cfg; } -static int qce_setup_regs_ahash(struct crypto_async_request *async_req, - u32 totallen, u32 offset) +static int qce_setup_regs_ahash(struct crypto_async_request *async_req) { struct ahash_request *req = ahash_request_cast(async_req); struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm); @@ -295,19 +294,18 @@ static void qce_xtskey(struct qce_device *qce, const u8 *enckey, { u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0}; unsigned int xtsklen = enckeylen / (2 * sizeof(u32)); - unsigned int xtsdusize; qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2, enckeylen / 2); qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen); - /* xts du size 512B */ - xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen); - qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize); + /* Set data unit size to cryptlen. Anything else causes + * crypto engine to return back incorrect results. + */ + qce_write(qce, REG_ENCR_XTS_DU_SIZE, cryptlen); } -static int qce_setup_regs_skcipher(struct crypto_async_request *async_req, - u32 totallen, u32 offset) +static int qce_setup_regs_skcipher(struct crypto_async_request *async_req) { struct skcipher_request *req = skcipher_request_cast(async_req); struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); @@ -367,7 +365,7 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req, qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg); qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen); - qce_write(qce, REG_ENCR_SEG_START, offset & 0xffff); + qce_write(qce, REG_ENCR_SEG_START, 0); if (IS_CTR(flags)) { qce_write(qce, REG_CNTR_MASK, ~0); @@ -376,7 +374,7 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req, qce_write(qce, REG_CNTR_MASK2, ~0); } - qce_write(qce, REG_SEG_SIZE, totallen); + qce_write(qce, REG_SEG_SIZE, rctx->cryptlen); /* get little endianness */ config = qce_config_reg(qce, 1); @@ -388,17 +386,16 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req, } #endif -int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen, - u32 offset) +int qce_start(struct crypto_async_request *async_req, u32 type) { switch (type) { #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER case CRYPTO_ALG_TYPE_SKCIPHER: - return qce_setup_regs_skcipher(async_req, totallen, offset); + return qce_setup_regs_skcipher(async_req); #endif #ifdef CONFIG_CRYPTO_DEV_QCE_SHA case CRYPTO_ALG_TYPE_AHASH: - return qce_setup_regs_ahash(async_req, totallen, offset); + return qce_setup_regs_ahash(async_req); #endif default: return -EINVAL; diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h index 85ba16418a04..3bc244bcca2d 100644 --- a/drivers/crypto/qce/common.h +++ b/drivers/crypto/qce/common.h @@ -94,7 +94,6 @@ struct qce_alg_template { void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len); int qce_check_status(struct qce_device *qce, u32 *status); void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step); -int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen, - u32 offset); +int qce_start(struct crypto_async_request *async_req, u32 type); #endif /* _COMMON_H_ */ diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index 61c418c12345..8e6fcf2c21cc 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -12,9 +12,15 @@ #include "core.h" #include "sha.h" -/* crypto hw padding constant for first operation */ -#define SHA_PADDING 64 -#define SHA_PADDING_MASK (SHA_PADDING - 1) +struct qce_sha_saved_state { + u8 pending_buf[QCE_SHA_MAX_BLOCKSIZE]; + u8 partial_digest[QCE_SHA_MAX_DIGESTSIZE]; + __be32 byte_count[2]; + unsigned int pending_buflen; + unsigned int flags; + u64 count; + bool first_blk; +}; static LIST_HEAD(ahash_algs); @@ -107,7 +113,7 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req) qce_dma_issue_pending(&qce->dma); - ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0); + ret = qce_start(async_req, tmpl->crypto_alg_type); if (ret) goto error_terminate; @@ -139,97 +145,37 @@ static int qce_ahash_init(struct ahash_request *req) static int qce_ahash_export(struct ahash_request *req, void *out) { - struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct qce_sha_reqctx *rctx = ahash_request_ctx(req); - unsigned long flags = rctx->flags; - unsigned int digestsize = crypto_ahash_digestsize(ahash); - unsigned int blocksize = - crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); + struct qce_sha_saved_state *export_state = out; - if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { - struct sha1_state *out_state = out; - - out_state->count = rctx->count; - qce_cpu_to_be32p_array((__be32 *)out_state->state, - rctx->digest, digestsize); - memcpy(out_state->buffer, rctx->buf, blocksize); - } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) { - struct sha256_state *out_state = out; - - out_state->count = rctx->count; - qce_cpu_to_be32p_array((__be32 *)out_state->state, - rctx->digest, digestsize); - memcpy(out_state->buf, rctx->buf, blocksize); - } else { - return -EINVAL; - } - - return 0; -} - -static int qce_import_common(struct ahash_request *req, u64 in_count, - const u32 *state, const u8 *buffer, bool hmac) -{ - struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct qce_sha_reqctx *rctx = ahash_request_ctx(req); - unsigned int digestsize = crypto_ahash_digestsize(ahash); - unsigned int blocksize; - u64 count = in_count; - - blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); - rctx->count = in_count; - memcpy(rctx->buf, buffer, blocksize); - - if (in_count <= blocksize) { - rctx->first_blk = 1; - } else { - rctx->first_blk = 0; - /* - * For HMAC, there is a hardware padding done when first block - * is set. Therefore the byte_count must be incremened by 64 - * after the first block operation. - */ - if (hmac) - count += SHA_PADDING; - } - - rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK); - rctx->byte_count[1] = (__force __be32)(count >> 32); - qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state, - digestsize); - rctx->buflen = (unsigned int)(in_count & (blocksize - 1)); + memcpy(export_state->pending_buf, rctx->buf, rctx->buflen); + memcpy(export_state->partial_digest, rctx->digest, sizeof(rctx->digest)); + export_state->byte_count[0] = rctx->byte_count[0]; + export_state->byte_count[1] = rctx->byte_count[1]; + export_state->pending_buflen = rctx->buflen; + export_state->count = rctx->count; + export_state->first_blk = rctx->first_blk; + export_state->flags = rctx->flags; return 0; } static int qce_ahash_import(struct ahash_request *req, const void *in) { - struct qce_sha_reqctx *rctx; - unsigned long flags; - bool hmac; - int ret; + struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + const struct qce_sha_saved_state *import_state = in; - ret = qce_ahash_init(req); - if (ret) - return ret; + memset(rctx, 0, sizeof(*rctx)); + rctx->count = import_state->count; + rctx->buflen = import_state->pending_buflen; + rctx->first_blk = import_state->first_blk; + rctx->flags = import_state->flags; + rctx->byte_count[0] = import_state->byte_count[0]; + rctx->byte_count[1] = import_state->byte_count[1]; + memcpy(rctx->buf, import_state->pending_buf, rctx->buflen); + memcpy(rctx->digest, import_state->partial_digest, sizeof(rctx->digest)); - rctx = ahash_request_ctx(req); - flags = rctx->flags; - hmac = IS_SHA_HMAC(flags); - - if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { - const struct sha1_state *state = in; - - ret = qce_import_common(req, state->count, state->state, - state->buffer, hmac); - } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) { - const struct sha256_state *state = in; - - ret = qce_import_common(req, state->count, state->state, - state->buf, hmac); - } - - return ret; + return 0; } static int qce_ahash_update(struct ahash_request *req) @@ -270,6 +216,25 @@ static int qce_ahash_update(struct ahash_request *req) /* calculate how many bytes will be hashed later */ hash_later = total % blocksize; + + /* + * At this point, there is more than one block size of data. If + * the available data to transfer is exactly a multiple of block + * size, save the last block to be transferred in qce_ahash_final + * (with the last block bit set) if this is indeed the end of data + * stream. If not this saved block will be transferred as part of + * next update. If this block is not held back and if this is + * indeed the end of data stream, the digest obtained will be wrong + * since qce_ahash_final will see that rctx->buflen is 0 and return + * doing nothing which in turn means that a digest will not be + * copied to the destination result buffer. qce_ahash_final cannot + * be made to alter this behavior and allowed to proceed if + * rctx->buflen is 0 because the crypto engine BAM does not allow + * for zero length transfers. + */ + if (!hash_later) + hash_later = blocksize; + if (hash_later) { unsigned int src_offset = req->nbytes - hash_later; scatterwalk_map_and_copy(rctx->buf, req->src, src_offset, @@ -450,7 +415,7 @@ static const struct qce_ahash_def ahash_def[] = { .drv_name = "sha1-qce", .digestsize = SHA1_DIGEST_SIZE, .blocksize = SHA1_BLOCK_SIZE, - .statesize = sizeof(struct sha1_state), + .statesize = sizeof(struct qce_sha_saved_state), .std_iv = std_iv_sha1, }, { @@ -459,7 +424,7 @@ static const struct qce_ahash_def ahash_def[] = { .drv_name = "sha256-qce", .digestsize = SHA256_DIGEST_SIZE, .blocksize = SHA256_BLOCK_SIZE, - .statesize = sizeof(struct sha256_state), + .statesize = sizeof(struct qce_sha_saved_state), .std_iv = std_iv_sha256, }, { @@ -468,7 +433,7 @@ static const struct qce_ahash_def ahash_def[] = { .drv_name = "hmac-sha1-qce", .digestsize = SHA1_DIGEST_SIZE, .blocksize = SHA1_BLOCK_SIZE, - .statesize = sizeof(struct sha1_state), + .statesize = sizeof(struct qce_sha_saved_state), .std_iv = std_iv_sha1, }, { @@ -477,7 +442,7 @@ static const struct qce_ahash_def ahash_def[] = { .drv_name = "hmac-sha256-qce", .digestsize = SHA256_DIGEST_SIZE, .blocksize = SHA256_BLOCK_SIZE, - .statesize = sizeof(struct sha256_state), + .statesize = sizeof(struct qce_sha_saved_state), .std_iv = std_iv_sha256, }, }; diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c index a2d3da0ad95f..c0a0d8c4fce1 100644 --- a/drivers/crypto/qce/skcipher.c +++ b/drivers/crypto/qce/skcipher.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -143,7 +144,7 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req) qce_dma_issue_pending(&qce->dma); - ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0); + ret = qce_start(async_req, tmpl->crypto_alg_type); if (ret) goto error_terminate; @@ -167,16 +168,33 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key, struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk); struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); unsigned long flags = to_cipher_tmpl(ablk)->alg_flags; + unsigned int __keylen; int ret; if (!key || !keylen) return -EINVAL; - switch (IS_XTS(flags) ? keylen >> 1 : keylen) { + /* + * AES XTS key1 = key2 not supported by crypto engine. + * Revisit to request a fallback cipher in this case. + */ + if (IS_XTS(flags)) { + __keylen = keylen >> 1; + if (!memcmp(key, key + __keylen, __keylen)) + return -ENOKEY; + } else { + __keylen = keylen; + } + + switch (__keylen) { case AES_KEYSIZE_128: case AES_KEYSIZE_256: memcpy(ctx->enc_key, key, keylen); break; + case AES_KEYSIZE_192: + break; + default: + return -EINVAL; } ret = crypto_skcipher_setkey(ctx->fallback, key, keylen); @@ -204,12 +222,27 @@ static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key, unsigned int keylen) { struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk); + u32 _key[6]; int err; err = verify_skcipher_des3_key(ablk, key); if (err) return err; + /* + * The crypto engine does not support any two keys + * being the same for triple des algorithms. The + * verify_skcipher_des3_key does not check for all the + * below conditions. Return -ENOKEY in case any two keys + * are the same. Revisit to see if a fallback cipher + * is needed to handle this condition. + */ + memcpy(_key, key, DES3_EDE_KEY_SIZE); + if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) || + !((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) || + !((_key[0] ^ _key[4]) | (_key[1] ^ _key[5]))) + return -ENOKEY; + ctx->enc_keylen = keylen; memcpy(ctx->enc_key, key, keylen); return 0; @@ -221,6 +254,7 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt) struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); struct qce_alg_template *tmpl = to_cipher_tmpl(tfm); + unsigned int blocksize = crypto_skcipher_blocksize(tfm); int keylen; int ret; @@ -228,14 +262,31 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt) rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT; keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen; - /* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and - * is not a multiple of it; pass such requests to the fallback + /* CE does not handle 0 length messages */ + if (!req->cryptlen) + return 0; + + /* + * ECB and CBC algorithms require message lengths to be + * multiples of block size. + */ + if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags)) + if (!IS_ALIGNED(req->cryptlen, blocksize)) + return -EINVAL; + + /* + * Conditions for requesting a fallback cipher + * AES-192 (not supported by crypto engine (CE)) + * AES-XTS request with len <= 512 byte (not recommended to use CE) + * AES-XTS request with len > QCE_SECTOR_SIZE and + * is not a multiple of it.(Revisit this condition to check if it is + * needed in all versions of CE) */ if (IS_AES(rctx->flags) && - (((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) || - req->cryptlen <= aes_sw_max_len) || - (IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE && - req->cryptlen % QCE_SECTOR_SIZE))) { + ((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) || + (IS_XTS(rctx->flags) && ((req->cryptlen <= aes_sw_max_len) || + (req->cryptlen > QCE_SECTOR_SIZE && + req->cryptlen % QCE_SECTOR_SIZE))))) { skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); skcipher_request_set_callback(&rctx->fallback_req, req->base.flags, @@ -307,7 +358,7 @@ static const struct qce_skcipher_def skcipher_def[] = { .name = "ecb(aes)", .drv_name = "ecb-aes-qce", .blocksize = AES_BLOCK_SIZE, - .ivsize = AES_BLOCK_SIZE, + .ivsize = 0, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, }, diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c index 81befe7febaa..ed03058497bc 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c @@ -48,7 +48,7 @@ static void rk_ahash_reg_init(struct rk_crypto_info *dev) { struct ahash_request *req = ahash_request_cast(dev->async_req); struct rk_ahash_rctx *rctx = ahash_request_ctx(req); - int reg_status = 0; + int reg_status; reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) | RK_CRYPTO_HASH_FLUSH | _SBF(0xffff, 16); diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 682c8a450a57..55aa3a71169b 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -401,7 +402,7 @@ static const struct samsung_aes_variant exynos_aes_data = { static const struct samsung_aes_variant exynos5433_slim_aes_data = { .aes_offset = 0x400, .hash_offset = 0x800, - .clk_names = { "pclk", "aclk", }, + .clk_names = { "aclk", "pclk", }, }; static const struct of_device_id s5p_sss_dt_match[] = { @@ -424,13 +425,9 @@ MODULE_DEVICE_TABLE(of, s5p_sss_dt_match); static inline const struct samsung_aes_variant *find_s5p_sss_version (const struct platform_device *pdev) { - if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) { - const struct of_device_id *match; + if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) + return of_device_get_match_data(&pdev->dev); - match = of_match_node(s5p_sss_dt_match, - pdev->dev.of_node); - return (const struct samsung_aes_variant *)match->data; - } return (const struct samsung_aes_variant *) platform_get_device_id(pdev)->driver_data; } @@ -2159,7 +2156,7 @@ static struct skcipher_alg algs[] = { static int s5p_aes_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - int i, j, err = -ENODEV; + int i, j, err; const struct samsung_aes_variant *variant; struct s5p_aes_dev *pdata; struct resource *res; @@ -2189,14 +2186,14 @@ static int s5p_aes_probe(struct platform_device *pdev) } pdata->res = res; - pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res); + pdata->ioaddr = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->ioaddr)) { if (!pdata->use_hash) return PTR_ERR(pdata->ioaddr); /* try AES without HASH */ res->end -= 0x300; pdata->use_hash = false; - pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res); + pdata->ioaddr = devm_ioremap_resource(dev, res); if (IS_ERR(pdata->ioaddr)) return PTR_ERR(pdata->ioaddr); } diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c index f300b0a5958a..1c6929fb3a13 100644 --- a/drivers/crypto/sa2ul.c +++ b/drivers/crypto/sa2ul.c @@ -69,8 +69,24 @@ /* Max Authentication tag size */ #define SA_MAX_AUTH_TAG_SZ 64 -#define PRIV_ID 0x1 -#define PRIV 0x1 +enum sa_algo_id { + SA_ALG_CBC_AES = 0, + SA_ALG_EBC_AES, + SA_ALG_CBC_DES3, + SA_ALG_ECB_DES3, + SA_ALG_SHA1, + SA_ALG_SHA256, + SA_ALG_SHA512, + SA_ALG_AUTHENC_SHA1_AES, + SA_ALG_AUTHENC_SHA256_AES, +}; + +struct sa_match_data { + u8 priv; + u8 priv_id; + u32 supported_algos; + bool skip_engine_control; +}; static struct device *sa_k3_dev; @@ -696,8 +712,9 @@ static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr) } static -int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key, - u16 enc_key_sz, const u8 *auth_key, u16 auth_key_sz, +int sa_init_sc(struct sa_ctx_info *ctx, const struct sa_match_data *match_data, + const u8 *enc_key, u16 enc_key_sz, + const u8 *auth_key, u16 auth_key_sz, struct algo_data *ad, u8 enc, u32 *swinfo) { int enc_sc_offset = 0; @@ -732,8 +749,8 @@ int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key, sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0; memcpy(&sc_buf[2], &sc_id, 2); sc_buf[4] = 0x0; - sc_buf[5] = PRIV_ID; - sc_buf[6] = PRIV; + sc_buf[5] = match_data->priv_id; + sc_buf[6] = match_data->priv; sc_buf[7] = 0x0; /* Prepare context for encryption engine */ @@ -892,8 +909,8 @@ static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key, return ret; /* Setup Encryption Security Context & Command label template */ - if (sa_init_sc(&ctx->enc, key, keylen, NULL, 0, ad, 1, - &ctx->enc.epib[1])) + if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, key, keylen, NULL, 0, + ad, 1, &ctx->enc.epib[1])) goto badkey; cmdl_len = sa_format_cmdl_gen(&cfg, @@ -905,8 +922,8 @@ static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key, ctx->enc.cmdl_size = cmdl_len; /* Setup Decryption Security Context & Command label template */ - if (sa_init_sc(&ctx->dec, key, keylen, NULL, 0, ad, 0, - &ctx->dec.epib[1])) + if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, key, keylen, NULL, 0, + ad, 0, &ctx->dec.epib[1])) goto badkey; cfg.enc_eng_id = ad->enc_eng.eng_id; @@ -1106,7 +1123,7 @@ static int sa_run(struct sa_req *req) else dma_rx = pdata->dma_rx1; - ddev = dma_rx->device->dev; + ddev = dmaengine_get_dma_device(pdata->dma_tx); rxd->ddev = ddev; memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size); @@ -1146,8 +1163,10 @@ static int sa_run(struct sa_req *req) mapped_sg->sgt.sgl = src; mapped_sg->sgt.orig_nents = src_nents; ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0); - if (ret) + if (ret) { + kfree(rxd); return ret; + } mapped_sg->dir = dir_src; mapped_sg->mapped = true; @@ -1155,8 +1174,10 @@ static int sa_run(struct sa_req *req) mapped_sg->sgt.sgl = req->src; mapped_sg->sgt.orig_nents = sg_nents; ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0); - if (ret) + if (ret) { + kfree(rxd); return ret; + } mapped_sg->dir = dir_src; mapped_sg->mapped = true; @@ -1446,9 +1467,10 @@ static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct algo_data *ad) cfg.akey = NULL; cfg.akey_len = 0; + ctx->dev_data = dev_get_drvdata(sa_k3_dev); /* Setup Encryption Security Context & Command label template */ - if (sa_init_sc(&ctx->enc, NULL, 0, NULL, 0, ad, 0, - &ctx->enc.epib[1])) + if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, NULL, 0, NULL, 0, + ad, 0, &ctx->enc.epib[1])) goto badkey; cmdl_len = sa_format_cmdl_gen(&cfg, @@ -1716,6 +1738,7 @@ static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash, int ret; memzero_explicit(ctx, sizeof(*ctx)); + ctx->dev_data = data; ctx->shash = crypto_alloc_shash(hash, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->shash)) { @@ -1817,8 +1840,8 @@ static int sa_aead_setkey(struct crypto_aead *authenc, cfg.akey_len = keys.authkeylen; /* Setup Encryption Security Context & Command label template */ - if (sa_init_sc(&ctx->enc, keys.enckey, keys.enckeylen, - keys.authkey, keys.authkeylen, + if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, keys.enckey, + keys.enckeylen, keys.authkey, keys.authkeylen, ad, 1, &ctx->enc.epib[1])) return -EINVAL; @@ -1831,8 +1854,8 @@ static int sa_aead_setkey(struct crypto_aead *authenc, ctx->enc.cmdl_size = cmdl_len; /* Setup Decryption Security Context & Command label template */ - if (sa_init_sc(&ctx->dec, keys.enckey, keys.enckeylen, - keys.authkey, keys.authkeylen, + if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, keys.enckey, + keys.enckeylen, keys.authkey, keys.authkeylen, ad, 0, &ctx->dec.epib[1])) return -EINVAL; @@ -1950,7 +1973,7 @@ static int sa_aead_decrypt(struct aead_request *req) } static struct sa_alg_tmpl sa_algs[] = { - { + [SA_ALG_CBC_AES] = { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { .base.cra_name = "cbc(aes)", @@ -1973,7 +1996,7 @@ static struct sa_alg_tmpl sa_algs[] = { .decrypt = sa_decrypt, } }, - { + [SA_ALG_EBC_AES] = { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { .base.cra_name = "ecb(aes)", @@ -1995,7 +2018,7 @@ static struct sa_alg_tmpl sa_algs[] = { .decrypt = sa_decrypt, } }, - { + [SA_ALG_CBC_DES3] = { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { .base.cra_name = "cbc(des3_ede)", @@ -2018,7 +2041,7 @@ static struct sa_alg_tmpl sa_algs[] = { .decrypt = sa_decrypt, } }, - { + [SA_ALG_ECB_DES3] = { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { .base.cra_name = "ecb(des3_ede)", @@ -2040,7 +2063,7 @@ static struct sa_alg_tmpl sa_algs[] = { .decrypt = sa_decrypt, } }, - { + [SA_ALG_SHA1] = { .type = CRYPTO_ALG_TYPE_AHASH, .alg.ahash = { .halg.base = { @@ -2069,7 +2092,7 @@ static struct sa_alg_tmpl sa_algs[] = { .import = sa_sha_import, }, }, - { + [SA_ALG_SHA256] = { .type = CRYPTO_ALG_TYPE_AHASH, .alg.ahash = { .halg.base = { @@ -2098,7 +2121,7 @@ static struct sa_alg_tmpl sa_algs[] = { .import = sa_sha_import, }, }, - { + [SA_ALG_SHA512] = { .type = CRYPTO_ALG_TYPE_AHASH, .alg.ahash = { .halg.base = { @@ -2127,7 +2150,7 @@ static struct sa_alg_tmpl sa_algs[] = { .import = sa_sha_import, }, }, - { + [SA_ALG_AUTHENC_SHA1_AES] = { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { @@ -2154,7 +2177,7 @@ static struct sa_alg_tmpl sa_algs[] = { .decrypt = sa_aead_decrypt, }, }, - { + [SA_ALG_AUTHENC_SHA256_AES] = { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { @@ -2185,13 +2208,19 @@ static struct sa_alg_tmpl sa_algs[] = { }; /* Register the algorithms in crypto framework */ -static void sa_register_algos(const struct device *dev) +static void sa_register_algos(struct sa_crypto_data *dev_data) { + const struct sa_match_data *match_data = dev_data->match_data; + struct device *dev = dev_data->dev; char *alg_name; u32 type; int i, err; for (i = 0; i < ARRAY_SIZE(sa_algs); i++) { + /* Skip unsupported algos */ + if (!(match_data->supported_algos & BIT(i))) + continue; + type = sa_algs[i].type; if (type == CRYPTO_ALG_TYPE_SKCIPHER) { alg_name = sa_algs[i].alg.skcipher.base.cra_name; @@ -2329,14 +2358,39 @@ static int sa_link_child(struct device *dev, void *data) return 0; } +static struct sa_match_data am654_match_data = { + .priv = 1, + .priv_id = 1, + .supported_algos = GENMASK(SA_ALG_AUTHENC_SHA256_AES, 0), +}; + +static struct sa_match_data am64_match_data = { + .priv = 0, + .priv_id = 0, + .supported_algos = BIT(SA_ALG_CBC_AES) | + BIT(SA_ALG_EBC_AES) | + BIT(SA_ALG_SHA256) | + BIT(SA_ALG_SHA512) | + BIT(SA_ALG_AUTHENC_SHA256_AES), + .skip_engine_control = true, +}; + +static const struct of_device_id of_match[] = { + { .compatible = "ti,j721e-sa2ul", .data = &am654_match_data, }, + { .compatible = "ti,am654-sa2ul", .data = &am654_match_data, }, + { .compatible = "ti,am64-sa2ul", .data = &am64_match_data, }, + {}, +}; +MODULE_DEVICE_TABLE(of, of_match); + static int sa_ul_probe(struct platform_device *pdev) { + const struct of_device_id *match; struct device *dev = &pdev->dev; struct device_node *node = dev->of_node; struct resource *res; static void __iomem *saul_base; struct sa_crypto_data *dev_data; - u32 val; int ret; dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL); @@ -2350,7 +2404,7 @@ static int sa_ul_probe(struct platform_device *pdev) dev_set_drvdata(sa_k3_dev, dev_data); pm_runtime_enable(dev); - ret = pm_runtime_get_sync(dev); + ret = pm_runtime_resume_and_get(dev); if (ret < 0) { dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__, ret); @@ -2362,18 +2416,28 @@ static int sa_ul_probe(struct platform_device *pdev) if (ret) goto disable_pm_runtime; + match = of_match_node(of_match, dev->of_node); + if (!match) { + dev_err(dev, "No compatible match found\n"); + return -ENODEV; + } + dev_data->match_data = match->data; + spin_lock_init(&dev_data->scid_lock); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); saul_base = devm_ioremap_resource(dev, res); dev_data->base = saul_base; - val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN | - SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN | - SA_EEC_TRNG_EN; - writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL); + if (!dev_data->match_data->skip_engine_control) { + u32 val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN | + SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN | + SA_EEC_TRNG_EN; - sa_register_algos(dev); + writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL); + } + + sa_register_algos(dev_data); ret = of_platform_populate(node, NULL, NULL, &pdev->dev); if (ret) @@ -2419,13 +2483,6 @@ static int sa_ul_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id of_match[] = { - {.compatible = "ti,j721e-sa2ul",}, - {.compatible = "ti,am654-sa2ul",}, - {}, -}; -MODULE_DEVICE_TABLE(of, of_match); - static struct platform_driver sa_ul_driver = { .probe = sa_ul_probe, .remove = sa_ul_remove, diff --git a/drivers/crypto/sa2ul.h b/drivers/crypto/sa2ul.h index f597ddecde34..ed66d1f111db 100644 --- a/drivers/crypto/sa2ul.h +++ b/drivers/crypto/sa2ul.h @@ -171,9 +171,12 @@ struct sa_tfm_ctx; #define SA_UNSAFE_DATA_SZ_MIN 240 #define SA_UNSAFE_DATA_SZ_MAX 256 +struct sa_match_data; + /** * struct sa_crypto_data - Crypto driver instance data * @base: Base address of the register space + * @soc_data: Pointer to SoC specific data * @pdev: Platform device pointer * @sc_pool: security context pool * @dev: Device pointer @@ -189,6 +192,7 @@ struct sa_tfm_ctx; */ struct sa_crypto_data { void __iomem *base; + const struct sa_match_data *match_data; struct platform_device *pdev; struct dma_pool *sc_pool; struct device *dev; diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c index 2a4793176c71..7389a0536ff0 100644 --- a/drivers/crypto/stm32/stm32-cryp.c +++ b/drivers/crypto/stm32/stm32-cryp.c @@ -542,7 +542,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp) int ret; u32 cfg, hw_mode; - pm_runtime_get_sync(cryp->dev); + pm_runtime_resume_and_get(cryp->dev); /* Disable interrupt */ stm32_cryp_write(cryp, CRYP_IMSCR, 0); @@ -2043,7 +2043,7 @@ static int stm32_cryp_remove(struct platform_device *pdev) if (!cryp) return -ENODEV; - ret = pm_runtime_get_sync(cryp->dev); + ret = pm_runtime_resume_and_get(cryp->dev); if (ret < 0) return ret; diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index 7ac0573ef663..389de9e3302d 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -813,7 +813,7 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err) static int stm32_hash_hw_init(struct stm32_hash_dev *hdev, struct stm32_hash_request_ctx *rctx) { - pm_runtime_get_sync(hdev->dev); + pm_runtime_resume_and_get(hdev->dev); if (!(HASH_FLAGS_INIT & hdev->flags)) { stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT); @@ -962,7 +962,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out) u32 *preg; unsigned int i; - pm_runtime_get_sync(hdev->dev); + pm_runtime_resume_and_get(hdev->dev); while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY)) cpu_relax(); @@ -1000,7 +1000,7 @@ static int stm32_hash_import(struct ahash_request *req, const void *in) preg = rctx->hw_context; - pm_runtime_get_sync(hdev->dev); + pm_runtime_resume_and_get(hdev->dev); stm32_hash_write(hdev, HASH_IMR, *preg++); stm32_hash_write(hdev, HASH_STR, *preg++); @@ -1566,7 +1566,7 @@ static int stm32_hash_remove(struct platform_device *pdev) if (!hdev) return -ENODEV; - ret = pm_runtime_get_sync(hdev->dev); + ret = pm_runtime_resume_and_get(hdev->dev); if (ret < 0) return ret; diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c index 9866c2a5e9a7..759d0d9786fd 100644 --- a/drivers/crypto/ux500/cryp/cryp.c +++ b/drivers/crypto/ux500/cryp/cryp.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * Copyright (C) ST-Ericsson SA 2010 * Author: Shujuan Chen for ST-Ericsson. * Author: Jonas Linde for ST-Ericsson. @@ -15,7 +15,7 @@ #include "cryp_p.h" #include "cryp.h" -/** +/* * cryp_wait_until_done - wait until the device logic is not busy */ void cryp_wait_until_done(struct cryp_device_data *device_data) @@ -285,6 +285,7 @@ int cryp_configure_init_vector(struct cryp_device_data *device_data, * other device context parameter * @device_data: Pointer to the device data struct for base address. * @ctx: Crypto device context + * @cryp_mode: Mode: Polling, Interrupt or DMA */ void cryp_save_device_context(struct cryp_device_data *device_data, struct cryp_device_context *ctx, diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h index 8da7f87b339b..db5713d7c940 100644 --- a/drivers/crypto/ux500/cryp/cryp.h +++ b/drivers/crypto/ux500/cryp/cryp.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/** +/* * Copyright (C) ST-Ericsson SA 2010 * Author: Shujuan Chen for ST-Ericsson. * Author: Jonas Linde for ST-Ericsson. diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index c3adeb2e5823..30cdd5253929 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * Copyright (C) ST-Ericsson SA 2010 * Author: Shujuan Chen for ST-Ericsson. * Author: Joakim Bech for ST-Ericsson. @@ -62,7 +62,7 @@ struct cryp_driver_data { /** * struct cryp_ctx - Crypto context * @config: Crypto mode. - * @key[CRYP_MAX_KEY_SIZE]: Key. + * @key: Key array. * @keylen: Length of key. * @iv: Pointer to initialization vector. * @indata: Pointer to indata. @@ -73,6 +73,7 @@ struct cryp_driver_data { * @updated: Updated flag. * @dev_ctx: Device dependent context. * @device: Pointer to the device. + * @session_id: Atomic session ID. */ struct cryp_ctx { struct cryp_config config; @@ -608,12 +609,12 @@ static void cryp_dma_done(struct cryp_ctx *ctx) chan = ctx->device->dma.chan_mem2cryp; dmaengine_terminate_all(chan); dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src, - ctx->device->dma.sg_src_len, DMA_TO_DEVICE); + ctx->device->dma.nents_src, DMA_TO_DEVICE); chan = ctx->device->dma.chan_cryp2mem; dmaengine_terminate_all(chan); dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst, - ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE); + ctx->device->dma.nents_dst, DMA_FROM_DEVICE); } static int cryp_dma_write(struct cryp_ctx *ctx, struct scatterlist *sg, @@ -1290,7 +1291,6 @@ static int ux500_cryp_probe(struct platform_device *pdev) device_data->phybase = res->start; device_data->base = devm_ioremap_resource(dev, res); if (IS_ERR(device_data->base)) { - dev_err(dev, "[%s]: ioremap failed!", __func__); ret = PTR_ERR(device_data->base); goto out; } diff --git a/drivers/crypto/ux500/cryp/cryp_irq.c b/drivers/crypto/ux500/cryp/cryp_irq.c index 7ebde69e8c76..6d2f07bec98a 100644 --- a/drivers/crypto/ux500/cryp/cryp_irq.c +++ b/drivers/crypto/ux500/cryp/cryp_irq.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * Copyright (C) ST-Ericsson SA 2010 * Author: Shujuan Chen for ST-Ericsson. * Author: Jonas Linde for ST-Ericsson. diff --git a/drivers/crypto/ux500/cryp/cryp_irq.h b/drivers/crypto/ux500/cryp/cryp_irq.h index 1984f30100ff..da90029ea141 100644 --- a/drivers/crypto/ux500/cryp/cryp_irq.h +++ b/drivers/crypto/ux500/cryp/cryp_irq.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/** +/* * Copyright (C) ST-Ericsson SA 2010 * Author: Shujuan Chen for ST-Ericsson. * Author: Jonas Linde for ST-Ericsson. @@ -19,7 +19,7 @@ enum cryp_irq_src_id { CRYP_IRQ_SRC_ALL = 0x3 }; -/** +/* * M0 Funtions */ void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src); diff --git a/drivers/crypto/ux500/cryp/cryp_irqp.h b/drivers/crypto/ux500/cryp/cryp_irqp.h index 879ed68a12d7..4981a3f461e5 100644 --- a/drivers/crypto/ux500/cryp/cryp_irqp.h +++ b/drivers/crypto/ux500/cryp/cryp_irqp.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/** +/* * Copyright (C) ST-Ericsson SA 2010 * Author: Shujuan Chen for ST-Ericsson. * Author: Jonas Linde for ST-Ericsson. @@ -13,7 +13,7 @@ #include "cryp_irq.h" -/** +/* * * CRYP Registers - Offset mapping * +-----------------+ diff --git a/drivers/crypto/ux500/cryp/cryp_p.h b/drivers/crypto/ux500/cryp/cryp_p.h index 0df84eaa8531..60b47fe4de35 100644 --- a/drivers/crypto/ux500/cryp/cryp_p.h +++ b/drivers/crypto/ux500/cryp/cryp_p.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/** +/* * Copyright (C) ST-Ericsson SA 2010 * Author: Shujuan Chen for ST-Ericsson. * Author: Jonas Linde for ST-Ericsson. @@ -17,7 +17,7 @@ #include "cryp.h" #include "cryp_irqp.h" -/** +/* * Generic Macros */ #define CRYP_SET_BITS(reg_name, mask) \ @@ -34,7 +34,7 @@ writel_relaxed(((readl_relaxed(reg) & ~(mask)) | \ (((u32)val << shift) & (mask))), reg) -/** +/* * CRYP specific Macros */ #define CRYP_PERIPHERAL_ID0 0xE3 @@ -48,7 +48,7 @@ #define CRYP_PCELL_ID2 0x05 #define CRYP_PCELL_ID3 0xB1 -/** +/* * CRYP register default values */ #define MAX_DEVICE_SUPPORT 2 @@ -62,7 +62,7 @@ #define CRYP_KEY_DEFAULT 0x0 #define CRYP_INIT_VECT_DEFAULT 0x0 -/** +/* * CRYP Control register specific mask */ #define CRYP_CR_SECURE_MASK BIT(0) @@ -81,7 +81,6 @@ CRYP_CR_PRLG_MASK |\ CRYP_CR_ALGODIR_MASK |\ CRYP_CR_ALGOMODE_MASK |\ - CRYP_CR_DATATYPE_MASK |\ CRYP_CR_KEYSIZE_MASK |\ CRYP_CR_KEYRDEN_MASK |\ CRYP_CR_DATATYPE_MASK) @@ -91,7 +90,7 @@ #define CRYP_SR_IFEM_MASK BIT(0) #define CRYP_SR_BUSY_MASK BIT(4) -/** +/* * Bit position used while setting bits in register */ #define CRYP_CR_PRLG_POS 1 @@ -107,7 +106,7 @@ #define CRYP_SR_BUSY_POS 4 -/** +/* * CRYP PCRs------PC_NAND control register * BIT_MASK */ diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index da284b0ea1b2..ecb7412e84e3 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -190,7 +190,7 @@ static void hash_dma_done(struct hash_ctx *ctx) chan = ctx->device->dma.chan_mem2hash; dmaengine_terminate_all(chan); dma_unmap_sg(chan->device->dev, ctx->device->dma.sg, - ctx->device->dma.sg_len, DMA_TO_DEVICE); + ctx->device->dma.nents, DMA_TO_DEVICE); } static int hash_dma_write(struct hash_ctx *ctx, @@ -356,7 +356,7 @@ static int hash_enable_power(struct hash_device_data *device_data, /** * hash_get_device_data - Checks for an available hash device and return it. - * @hash_ctx: Structure for the hash context. + * @ctx: Structure for the hash context. * @device_data: Structure for the hash device. * * This function check for an available hash device and return it to @@ -542,7 +542,7 @@ static bool hash_dma_valid_data(struct scatterlist *sg, int datasize) } /** - * hash_init - Common hash init function for SHA1/SHA2 (SHA256). + * ux500_hash_init - Common hash init function for SHA1/SHA2 (SHA256). * @req: The hash request for the job. * * Initialize structures. @@ -585,6 +585,7 @@ static int ux500_hash_init(struct ahash_request *req) * @device_data: Structure for the hash device. * @message: Block (512 bits) of message to be written to * the HASH hardware. + * @length: Message length * */ static void hash_processblock(struct hash_device_data *device_data, @@ -1295,7 +1296,7 @@ void hash_get_digest(struct hash_device_data *device_data, } /** - * hash_update - The hash update function for SHA1/SHA2 (SHA256). + * ahash_update - The hash update function for SHA1/SHA2 (SHA256). * @req: The hash request for the job. */ static int ahash_update(struct ahash_request *req) @@ -1315,7 +1316,7 @@ static int ahash_update(struct ahash_request *req) } /** - * hash_final - The hash final function for SHA1/SHA2 (SHA256). + * ahash_final - The hash final function for SHA1/SHA2 (SHA256). * @req: The hash request for the job. */ static int ahash_final(struct ahash_request *req) @@ -1615,9 +1616,6 @@ static struct hash_algo_template hash_algs[] = { } }; -/** - * hash_algs_register_all - - */ static int ahash_algs_register_all(struct hash_device_data *device_data) { int ret; @@ -1640,9 +1638,6 @@ static int ahash_algs_register_all(struct hash_device_data *device_data) return ret; } -/** - * hash_algs_unregister_all - - */ static void ahash_algs_unregister_all(struct hash_device_data *device_data) { int i; @@ -1681,7 +1676,6 @@ static int ux500_hash_probe(struct platform_device *pdev) device_data->phybase = res->start; device_data->base = devm_ioremap_resource(dev, res); if (IS_ERR(device_data->base)) { - dev_err(dev, "%s: ioremap() failed!\n", __func__); ret = PTR_ERR(device_data->base); goto out; } diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c index d05c02baebcf..ec06189fbf99 100644 --- a/drivers/crypto/vmx/aes.c +++ b/drivers/crypto/vmx/aes.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * AES routines supporting VMX instructions on the Power 8 * * Copyright (C) 2015 International Business Machines Inc. diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c index d88084447f1c..ed0debc7acb5 100644 --- a/drivers/crypto/vmx/aes_cbc.c +++ b/drivers/crypto/vmx/aes_cbc.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * AES CBC routines supporting VMX instructions on the Power 8 * * Copyright (C) 2015 International Business Machines Inc. diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c index 79ba062ee1c1..9a3da8cd62f3 100644 --- a/drivers/crypto/vmx/aes_ctr.c +++ b/drivers/crypto/vmx/aes_ctr.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * AES CTR routines supporting VMX instructions on the Power 8 * * Copyright (C) 2015 International Business Machines Inc. diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c index 9fee1b1532a4..dabbccb41550 100644 --- a/drivers/crypto/vmx/aes_xts.c +++ b/drivers/crypto/vmx/aes_xts.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * AES XTS routines supporting VMX In-core instructions on Power 8 * * Copyright (C) 2015 International Business Machines Inc. diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c index 14807ac2e3b9..5bc5710a6de0 100644 --- a/drivers/crypto/vmx/ghash.c +++ b/drivers/crypto/vmx/ghash.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/** +/* * GHASH routines supporting VMX instructions on the Power 8 * * Copyright (C) 2015, 2019 International Business Machines Inc. diff --git a/drivers/crypto/vmx/vmx.c b/drivers/crypto/vmx/vmx.c index a40d08e75fc0..7eb713cc87c8 100644 --- a/drivers/crypto/vmx/vmx.c +++ b/drivers/crypto/vmx/vmx.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * Routines supporting VMX instructions on the Power 8 * * Copyright (C) 2015 International Business Machines Inc. diff --git a/fs/crypto/Kconfig b/fs/crypto/Kconfig index a5f5c30368a2..2d0c8922f635 100644 --- a/fs/crypto/Kconfig +++ b/fs/crypto/Kconfig @@ -14,16 +14,30 @@ config FS_ENCRYPTION F2FS and UBIFS make use of this feature. # Filesystems supporting encryption must select this if FS_ENCRYPTION. This -# allows the algorithms to be built as modules when all the filesystems are. +# allows the algorithms to be built as modules when all the filesystems are, +# whereas selecting them from FS_ENCRYPTION would force them to be built-in. +# +# Note: this option only pulls in the algorithms that filesystem encryption +# needs "by default". If userspace will use "non-default" encryption modes such +# as Adiantum encryption, then those other modes need to be explicitly enabled +# in the crypto API; see Documentation/filesystems/fscrypt.rst for details. +# +# Also note that this option only pulls in the generic implementations of the +# algorithms, not any per-architecture optimized implementations. It is +# strongly recommended to enable optimized implementations too. It is safe to +# disable these generic implementations if corresponding optimized +# implementations will always be available too; for this reason, these are soft +# dependencies ('imply' rather than 'select'). Only disable these generic +# implementations if you're sure they will never be needed, though. config FS_ENCRYPTION_ALGS tristate - select CRYPTO_AES - select CRYPTO_CBC - select CRYPTO_CTS - select CRYPTO_ECB - select CRYPTO_HMAC - select CRYPTO_SHA512 - select CRYPTO_XTS + imply CRYPTO_AES + imply CRYPTO_CBC + imply CRYPTO_CTS + imply CRYPTO_ECB + imply CRYPTO_HMAC + imply CRYPTO_SHA512 + imply CRYPTO_XTS config FS_ENCRYPTION_INLINE_CRYPT bool "Enable fscrypt to use inline crypto" diff --git a/fs/verity/Kconfig b/fs/verity/Kconfig index 88fb25119899..24d1b54de807 100644 --- a/fs/verity/Kconfig +++ b/fs/verity/Kconfig @@ -3,9 +3,13 @@ config FS_VERITY bool "FS Verity (read-only file-based authenticity protection)" select CRYPTO - # SHA-256 is selected as it's intended to be the default hash algorithm. + # SHA-256 is implied as it's intended to be the default hash algorithm. # To avoid bloat, other wanted algorithms must be selected explicitly. - select CRYPTO_SHA256 + # Note that CRYPTO_SHA256 denotes the generic C implementation, but + # some architectures provided optimized implementations of the same + # algorithm that may be used instead. In this case, CRYPTO_SHA256 may + # be omitted even if SHA-256 is being used. + imply CRYPTO_SHA256 help This option enables fs-verity. fs-verity is the dm-verity mechanism implemented at the file level. On supported diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index fcde59c65a81..cb3d6b1c655d 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -165,6 +165,8 @@ static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req) * crypto_free_acomp() -- free ACOMPRESS tfm handle * * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_acomp(struct crypto_acomp *tfm) { diff --git a/include/crypto/aead.h b/include/crypto/aead.h index fcc12c593ef8..e728469c4ccc 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -185,6 +185,8 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) /** * crypto_free_aead() - zeroize and free aead handle * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_aead(struct crypto_aead *tfm) { diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h index 1d3aa252caba..5764b46bd1ec 100644 --- a/include/crypto/akcipher.h +++ b/include/crypto/akcipher.h @@ -174,6 +174,8 @@ static inline struct crypto_akcipher *crypto_akcipher_reqtfm( * crypto_free_akcipher() - free AKCIPHER tfm handle * * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_akcipher(struct crypto_akcipher *tfm) { diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h index 3a1c72fdb7cf..dabaee698718 100644 --- a/include/crypto/chacha.h +++ b/include/crypto/chacha.h @@ -47,13 +47,18 @@ static inline void hchacha_block(const u32 *state, u32 *out, int nrounds) hchacha_block_generic(state, out, nrounds); } -void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv); -static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv) +static inline void chacha_init_consts(u32 *state) { state[0] = 0x61707865; /* "expa" */ state[1] = 0x3320646e; /* "nd 3" */ state[2] = 0x79622d32; /* "2-by" */ state[3] = 0x6b206574; /* "te k" */ +} + +void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv); +static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv) +{ + chacha_init_consts(state); state[4] = key[0]; state[5] = key[1]; state[6] = key[2]; diff --git a/include/crypto/ecc_curve.h b/include/crypto/ecc_curve.h new file mode 100644 index 000000000000..70964781eb68 --- /dev/null +++ b/include/crypto/ecc_curve.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 HiSilicon */ + +#ifndef _CRYTO_ECC_CURVE_H +#define _CRYTO_ECC_CURVE_H + +#include + +/** + * struct ecc_point - elliptic curve point in affine coordinates + * + * @x: X coordinate in vli form. + * @y: Y coordinate in vli form. + * @ndigits: Length of vlis in u64 qwords. + */ +struct ecc_point { + u64 *x; + u64 *y; + u8 ndigits; +}; + +/** + * struct ecc_curve - definition of elliptic curve + * + * @name: Short name of the curve. + * @g: Generator point of the curve. + * @p: Prime number, if Barrett's reduction is used for this curve + * pre-calculated value 'mu' is appended to the @p after ndigits. + * Use of Barrett's reduction is heuristically determined in + * vli_mmod_fast(). + * @n: Order of the curve group. + * @a: Curve parameter a. + * @b: Curve parameter b. + */ +struct ecc_curve { + char *name; + struct ecc_point g; + u64 *p; + u64 *n; + u64 *a; + u64 *b; +}; + +/** + * ecc_get_curve() - get elliptic curve; + * @curve_id: Curves IDs: + * defined in 'include/crypto/ecdh.h'; + * + * Returns curve if get curve succssful, NULL otherwise + */ +const struct ecc_curve *ecc_get_curve(unsigned int curve_id); + +/** + * ecc_get_curve25519() - get curve25519 curve; + * + * Returns curve25519 + */ +const struct ecc_curve *ecc_get_curve25519(void); + +#endif diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h index a5b805b5526d..a9f98078d29c 100644 --- a/include/crypto/ecdh.h +++ b/include/crypto/ecdh.h @@ -25,16 +25,15 @@ /* Curves IDs */ #define ECC_CURVE_NIST_P192 0x0001 #define ECC_CURVE_NIST_P256 0x0002 +#define ECC_CURVE_NIST_P384 0x0003 /** * struct ecdh - define an ECDH private key * - * @curve_id: ECC curve the key is based on. * @key: Private ECDH key * @key_size: Size of the private ECDH key */ struct ecdh { - unsigned short curve_id; char *key; unsigned short key_size; }; diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 13f8a6a54ca8..b2bc1e46e86a 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -281,6 +281,8 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm) /** * crypto_free_ahash() - zeroize and free the ahash handle * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_ahash(struct crypto_ahash *tfm) { @@ -724,6 +726,8 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm) /** * crypto_free_shash() - zeroize and free the message digest handle * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_shash(struct crypto_shash *tfm) { diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h index 064e52ca5248..196aa769f296 100644 --- a/include/crypto/internal/poly1305.h +++ b/include/crypto/internal/poly1305.h @@ -18,7 +18,8 @@ * only the ε-almost-∆-universal hash function (not the full MAC) is computed. */ -void poly1305_core_setkey(struct poly1305_core_key *key, const u8 *raw_key); +void poly1305_core_setkey(struct poly1305_core_key *key, + const u8 raw_key[POLY1305_BLOCK_SIZE]); static inline void poly1305_core_init(struct poly1305_state *state) { *state = (struct poly1305_state){}; diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h index 88b591215d5c..cccceadc164b 100644 --- a/include/crypto/kpp.h +++ b/include/crypto/kpp.h @@ -154,6 +154,8 @@ static inline void crypto_kpp_set_flags(struct crypto_kpp *tfm, u32 flags) * crypto_free_kpp() - free KPP tfm handle * * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_kpp(struct crypto_kpp *tfm) { diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h index f1f67fc749cf..090692ec3bc7 100644 --- a/include/crypto/poly1305.h +++ b/include/crypto/poly1305.h @@ -58,8 +58,10 @@ struct poly1305_desc_ctx { }; }; -void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key); -void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key); +void poly1305_init_arch(struct poly1305_desc_ctx *desc, + const u8 key[POLY1305_KEY_SIZE]); +void poly1305_init_generic(struct poly1305_desc_ctx *desc, + const u8 key[POLY1305_KEY_SIZE]); static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key) { diff --git a/include/crypto/rng.h b/include/crypto/rng.h index 8b4b844b4eef..17bb3673d3c1 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h @@ -111,6 +111,8 @@ static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm) /** * crypto_free_rng() - zeroize and free RNG handle * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_rng(struct crypto_rng *tfm) { diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 6a733b171a5d..ef0fc9ed4342 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -196,6 +196,8 @@ static inline struct crypto_tfm *crypto_skcipher_tfm( /** * crypto_free_skcipher() - zeroize and free cipher handle * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_skcipher(struct crypto_skcipher *tfm) { diff --git a/include/keys/asymmetric-type.h b/include/keys/asymmetric-type.h index a29d3ff2e7e8..c432fdb8547f 100644 --- a/include/keys/asymmetric-type.h +++ b/include/keys/asymmetric-type.h @@ -72,6 +72,12 @@ const struct asymmetric_key_ids *asymmetric_key_ids(const struct key *key) return key->payload.data[asym_key_ids]; } +static inline +const struct public_key *asymmetric_key_public_key(const struct key *key) +{ + return key->payload.data[asym_crypto]; +} + extern struct key *find_asymmetric_key(struct key *keyring, const struct asymmetric_key_id *id_0, const struct asymmetric_key_id *id_1, diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h index d06988d1565e..461b7aa587ba 100644 --- a/include/linux/oid_registry.h +++ b/include/linux/oid_registry.h @@ -19,8 +19,14 @@ enum OID { OID_id_dsa_with_sha1, /* 1.2.840.10030.4.3 */ OID_id_dsa, /* 1.2.840.10040.4.1 */ - OID_id_ecdsa_with_sha1, /* 1.2.840.10045.4.1 */ OID_id_ecPublicKey, /* 1.2.840.10045.2.1 */ + OID_id_prime192v1, /* 1.2.840.10045.3.1.1 */ + OID_id_prime256v1, /* 1.2.840.10045.3.1.7 */ + OID_id_ecdsa_with_sha1, /* 1.2.840.10045.4.1 */ + OID_id_ecdsa_with_sha224, /* 1.2.840.10045.4.3.1 */ + OID_id_ecdsa_with_sha256, /* 1.2.840.10045.4.3.2 */ + OID_id_ecdsa_with_sha384, /* 1.2.840.10045.4.3.3 */ + OID_id_ecdsa_with_sha512, /* 1.2.840.10045.4.3.4 */ /* PKCS#1 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-1(1)} */ OID_rsaEncryption, /* 1.2.840.113549.1.1.1 */ @@ -58,6 +64,7 @@ enum OID { OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */ OID_sha1, /* 1.3.14.3.2.26 */ + OID_id_ansip384r1, /* 1.3.132.0.34 */ OID_sha256, /* 2.16.840.1.101.3.4.2.1 */ OID_sha384, /* 2.16.840.1.101.3.4.2.2 */ OID_sha512, /* 2.16.840.1.101.3.4.2.3 */ @@ -122,6 +129,7 @@ enum OID { }; extern enum OID look_up_OID(const void *data, size_t datasize); +extern int parse_OID(const void *data, size_t datasize, enum OID *oid); extern int sprint_oid(const void *, size_t, char *, size_t); extern int sprint_OID(enum OID, char *, size_t); diff --git a/include/trace/events/random.h b/include/trace/events/random.h index 9570a10cb949..3d7b432ca5f3 100644 --- a/include/trace/events/random.h +++ b/include/trace/events/random.h @@ -85,28 +85,6 @@ TRACE_EVENT(credit_entropy_bits, __entry->entropy_count, (void *)__entry->IP) ); -TRACE_EVENT(push_to_pool, - TP_PROTO(const char *pool_name, int pool_bits, int input_bits), - - TP_ARGS(pool_name, pool_bits, input_bits), - - TP_STRUCT__entry( - __field( const char *, pool_name ) - __field( int, pool_bits ) - __field( int, input_bits ) - ), - - TP_fast_assign( - __entry->pool_name = pool_name; - __entry->pool_bits = pool_bits; - __entry->input_bits = input_bits; - ), - - TP_printk("%s: pool_bits %d input_pool_bits %d", - __entry->pool_name, __entry->pool_bits, - __entry->input_bits) -); - TRACE_EVENT(debit_entropy, TP_PROTO(const char *pool_name, int debit_bits), @@ -161,35 +139,6 @@ TRACE_EVENT(add_disk_randomness, MINOR(__entry->dev), __entry->input_bits) ); -TRACE_EVENT(xfer_secondary_pool, - TP_PROTO(const char *pool_name, int xfer_bits, int request_bits, - int pool_entropy, int input_entropy), - - TP_ARGS(pool_name, xfer_bits, request_bits, pool_entropy, - input_entropy), - - TP_STRUCT__entry( - __field( const char *, pool_name ) - __field( int, xfer_bits ) - __field( int, request_bits ) - __field( int, pool_entropy ) - __field( int, input_entropy ) - ), - - TP_fast_assign( - __entry->pool_name = pool_name; - __entry->xfer_bits = xfer_bits; - __entry->request_bits = request_bits; - __entry->pool_entropy = pool_entropy; - __entry->input_entropy = input_entropy; - ), - - TP_printk("pool %s xfer_bits %d request_bits %d pool_entropy %d " - "input_entropy %d", __entry->pool_name, __entry->xfer_bits, - __entry->request_bits, __entry->pool_entropy, - __entry->input_entropy) -); - DECLARE_EVENT_CLASS(random__get_random_bytes, TP_PROTO(int nbytes, unsigned long IP), @@ -253,38 +202,6 @@ DEFINE_EVENT(random__extract_entropy, extract_entropy, TP_ARGS(pool_name, nbytes, entropy_count, IP) ); -DEFINE_EVENT(random__extract_entropy, extract_entropy_user, - TP_PROTO(const char *pool_name, int nbytes, int entropy_count, - unsigned long IP), - - TP_ARGS(pool_name, nbytes, entropy_count, IP) -); - -TRACE_EVENT(random_read, - TP_PROTO(int got_bits, int need_bits, int pool_left, int input_left), - - TP_ARGS(got_bits, need_bits, pool_left, input_left), - - TP_STRUCT__entry( - __field( int, got_bits ) - __field( int, need_bits ) - __field( int, pool_left ) - __field( int, input_left ) - ), - - TP_fast_assign( - __entry->got_bits = got_bits; - __entry->need_bits = need_bits; - __entry->pool_left = pool_left; - __entry->input_left = input_left; - ), - - TP_printk("got_bits %d still_needed_bits %d " - "blocking_pool_entropy_left %d input_entropy_left %d", - __entry->got_bits, __entry->got_bits, __entry->pool_left, - __entry->input_left) -); - TRACE_EVENT(urandom_read, TP_PROTO(int got_bits, int pool_left, int input_left), diff --git a/include/uapi/misc/uacce/hisi_qm.h b/include/uapi/misc/uacce/hisi_qm.h index 6435f0bcb556..1faef5ff87ef 100644 --- a/include/uapi/misc/uacce/hisi_qm.h +++ b/include/uapi/misc/uacce/hisi_qm.h @@ -16,6 +16,7 @@ struct hisi_qp_ctx { #define HISI_QM_API_VER_BASE "hisi_qm_v1" #define HISI_QM_API_VER2_BASE "hisi_qm_v2" +#define HISI_QM_API_VER3_BASE "hisi_qm_v3" /* UACCE_CMD_QM_SET_QP_CTX: Set qp algorithm type */ #define UACCE_CMD_QM_SET_QP_CTX _IOWR('H', 10, struct hisi_qp_ctx) diff --git a/lib/crypto/chacha.c b/lib/crypto/chacha.c index 4ccbec442469..b748fd3d256e 100644 --- a/lib/crypto/chacha.c +++ b/lib/crypto/chacha.c @@ -64,7 +64,7 @@ static void chacha_permute(u32 *x, int nrounds) } /** - * chacha_block - generate one keystream block and increment block counter + * chacha_block_generic - generate one keystream block and increment block counter * @state: input state matrix (16 32-bit words) * @stream: output keystream block (64 bytes) * @nrounds: number of rounds (20 or 12; 20 is recommended) @@ -92,7 +92,7 @@ EXPORT_SYMBOL(chacha_block_generic); /** * hchacha_block_generic - abbreviated ChaCha core, for XChaCha * @state: input state matrix (16 32-bit words) - * @out: output (8 32-bit words) + * @stream: output (8 32-bit words) * @nrounds: number of rounds (20 or 12; 20 is recommended) * * HChaCha is the ChaCha equivalent of HSalsa and is an intermediate step diff --git a/lib/crypto/poly1305-donna32.c b/lib/crypto/poly1305-donna32.c index 3cc77d94390b..7fb71845cc84 100644 --- a/lib/crypto/poly1305-donna32.c +++ b/lib/crypto/poly1305-donna32.c @@ -10,7 +10,8 @@ #include #include -void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16]) +void poly1305_core_setkey(struct poly1305_core_key *key, + const u8 raw_key[POLY1305_BLOCK_SIZE]) { /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ key->key.r[0] = (get_unaligned_le32(&raw_key[0])) & 0x3ffffff; diff --git a/lib/crypto/poly1305-donna64.c b/lib/crypto/poly1305-donna64.c index 6ae181bb4345..d34cf4053668 100644 --- a/lib/crypto/poly1305-donna64.c +++ b/lib/crypto/poly1305-donna64.c @@ -12,7 +12,8 @@ typedef __uint128_t u128; -void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16]) +void poly1305_core_setkey(struct poly1305_core_key *key, + const u8 raw_key[POLY1305_BLOCK_SIZE]) { u64 t0, t1; diff --git a/lib/crypto/poly1305.c b/lib/crypto/poly1305.c index 9d2d14df0fee..26d87fc3823e 100644 --- a/lib/crypto/poly1305.c +++ b/lib/crypto/poly1305.c @@ -12,7 +12,8 @@ #include #include -void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key) +void poly1305_init_generic(struct poly1305_desc_ctx *desc, + const u8 key[POLY1305_KEY_SIZE]) { poly1305_core_setkey(&desc->core_r, key); desc->s[0] = get_unaligned_le32(key + 16); diff --git a/lib/oid_registry.c b/lib/oid_registry.c index f7ad43f28579..3dfaa836e7c5 100644 --- a/lib/oid_registry.c +++ b/lib/oid_registry.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "oid_registry_data.c" MODULE_DESCRIPTION("OID Registry"); @@ -92,6 +93,29 @@ enum OID look_up_OID(const void *data, size_t datasize) } EXPORT_SYMBOL_GPL(look_up_OID); +/** + * parse_OID - Parse an OID from a bytestream + * @data: Binary representation of the header + OID + * @datasize: Size of the binary representation + * @oid: Pointer to oid to return result + * + * Parse an OID from a bytestream that holds the OID in the format + * ASN1_OID | length | oid. The length indicator must equal to datasize - 2. + * -EBADMSG is returned if the bytestream is too short. + */ +int parse_OID(const void *data, size_t datasize, enum OID *oid) +{ + const unsigned char *v = data; + + /* we need 2 bytes of header and at least 1 byte for oid */ + if (datasize < 3 || v[0] != ASN1_OID || v[1] != datasize - 2) + return -EBADMSG; + + *oid = look_up_OID(data + 2, datasize - 2); + return 0; +} +EXPORT_SYMBOL_GPL(parse_OID); + /* * sprint_OID - Print an Object Identifier into a buffer * @data: The encoded OID to print diff --git a/net/bluetooth/ecdh_helper.c b/net/bluetooth/ecdh_helper.c index 3226fe02e875..989401f116e9 100644 --- a/net/bluetooth/ecdh_helper.c +++ b/net/bluetooth/ecdh_helper.c @@ -126,8 +126,6 @@ int set_ecdh_privkey(struct crypto_kpp *tfm, const u8 private_key[32]) int err; struct ecdh p = {0}; - p.curve_id = ECC_CURVE_NIST_P256; - if (private_key) { tmp = kmalloc(32, GFP_KERNEL); if (!tmp) diff --git a/net/bluetooth/selftest.c b/net/bluetooth/selftest.c index f71c6fa65fb3..f49604d44b87 100644 --- a/net/bluetooth/selftest.c +++ b/net/bluetooth/selftest.c @@ -205,7 +205,7 @@ static int __init test_ecdh(void) calltime = ktime_get(); - tfm = crypto_alloc_kpp("ecdh", 0, 0); + tfm = crypto_alloc_kpp("ecdh-nist-p256", 0, 0); if (IS_ERR(tfm)) { BT_ERR("Unable to create ECDH crypto context"); err = PTR_ERR(tfm); diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index b0c1ee110eff..21e445993f39 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -1386,7 +1386,7 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) goto zfree_smp; } - smp->tfm_ecdh = crypto_alloc_kpp("ecdh", 0, 0); + smp->tfm_ecdh = crypto_alloc_kpp("ecdh-nist-p256", 0, 0); if (IS_ERR(smp->tfm_ecdh)) { BT_ERR("Unable to create ECDH crypto context"); goto free_shash; @@ -3281,7 +3281,7 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid) return ERR_CAST(tfm_cmac); } - tfm_ecdh = crypto_alloc_kpp("ecdh", 0, 0); + tfm_ecdh = crypto_alloc_kpp("ecdh-nist-p256", 0, 0); if (IS_ERR(tfm_ecdh)) { BT_ERR("Unable to create ECDH crypto context"); crypto_free_shash(tfm_cmac); @@ -3806,7 +3806,7 @@ int __init bt_selftest_smp(void) return PTR_ERR(tfm_cmac); } - tfm_ecdh = crypto_alloc_kpp("ecdh", 0, 0); + tfm_ecdh = crypto_alloc_kpp("ecdh-nist-p256", 0, 0); if (IS_ERR(tfm_ecdh)) { BT_ERR("Unable to create ECDH crypto context"); crypto_free_shash(tfm_cmac); diff --git a/security/integrity/digsig_asymmetric.c b/security/integrity/digsig_asymmetric.c index a662024b4c70..23240d793b07 100644 --- a/security/integrity/digsig_asymmetric.c +++ b/security/integrity/digsig_asymmetric.c @@ -84,6 +84,7 @@ int asymmetric_verify(struct key *keyring, const char *sig, { struct public_key_signature pks; struct signature_v2_hdr *hdr = (struct signature_v2_hdr *)sig; + const struct public_key *pk; struct key *key; int ret; @@ -105,23 +106,20 @@ int asymmetric_verify(struct key *keyring, const char *sig, memset(&pks, 0, sizeof(pks)); pks.hash_algo = hash_algo_name[hdr->hash_algo]; - switch (hdr->hash_algo) { - case HASH_ALGO_STREEBOG_256: - case HASH_ALGO_STREEBOG_512: - /* EC-RDSA and Streebog should go together. */ - pks.pkey_algo = "ecrdsa"; - pks.encoding = "raw"; - break; - case HASH_ALGO_SM3_256: - /* SM2 and SM3 should go together. */ - pks.pkey_algo = "sm2"; - pks.encoding = "raw"; - break; - default: - pks.pkey_algo = "rsa"; + + pk = asymmetric_key_public_key(key); + pks.pkey_algo = pk->pkey_algo; + if (!strcmp(pk->pkey_algo, "rsa")) pks.encoding = "pkcs1"; - break; - } + else if (!strncmp(pk->pkey_algo, "ecdsa-", 6)) + /* edcsa-nist-p192 etc. */ + pks.encoding = "x962"; + else if (!strcmp(pk->pkey_algo, "ecrdsa") || + !strcmp(pk->pkey_algo, "sm2")) + pks.encoding = "raw"; + else + return -ENOPKG; + pks.digest = (u8 *)data; pks.digest_size = datalen; pks.s = hdr->sig;