This update includes the following changes:

API:
 
 - Move crypto engine callback from tfm ctx into algorithm object.
 - Fix atomic sleep bug in crypto_destroy_instance.
 - Move lib/mpi into lib/crypto.
 
 Algorithms:
 
 - Add chacha20 and poly1305 implementation for powerpc p10.
 
 Drivers:
 
 - Add AES skcipher and aead support to starfive.
 - Add Dynamic Boost Control support to ccp.
 - Add support for STM32P13 platform to stm32.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEn51F/lCuNhUwmDeSxycdCkmxi6cFAmTsZkMACgkQxycdCkmx
 i6furw//e6kYK1CTOqidPM6nI0KK1Ok204VXu56H0wM4THZ09ZwcbDNKpvI6vjMi
 XZkKthiayl/1okmpRVP0rPqMWDtxajeu6IUAQqqFGUFU8R7AqCDrOd+te+zlSFWG
 16ySNQO47RND0OzNqZ4ojgCC0n9RpP+zOfndmderZ4EnfXSbodwGUwkcuE7Z96cP
 jNoainO2iwlyMZPlVynrw61O3RxGu/s/ch+uY1mV+TyvAAWoOlzt57gYUs3eGduz
 4Ky+0Ubctg3sfBaqA2Hg6GjtAqG/QUssRyj8YgsFMrgXPHDTbLh6abej39wWo4gz
 ZdC7Bm47hV/yfVdWe2iq3/5iqdILEdPBh3fDh6NNsZ1Jlm3aEZpH9rEXm0k4X2MJ
 A9NDAFVj8dAYVZza7+Y8jPc8FNe+HqN9HYip/2K7g68WAJGWnMc9lq9qGwGmg1Gl
 dn6yM27AgH8B+UljWYM9FS1ZFsc8KCudJavRZqA2d0W3rbXVWAoBBp83ii0yX1Nm
 ZPAblAYMZCDeCtrVrDYKLtGn566rfpCrv3R5cppwHLksGJsDxgWrjG47l9uy5HXI
 u05jiXT11R+pjIU2Wv5qsiUIhyvli6AaiFYHIdZ8fWaovPAOdhrCrN3IryvUVHj/
 LqMcnmW1rWGNYN9pqHn0sQZ730ZJIma0klhTZOn8HPJNbiK68X0=
 =LbcA
 -----END PGP SIGNATURE-----

Merge tag 'v6.6-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto updates from Herbert Xu:
 "API:
   - Move crypto engine callback from tfm ctx into algorithm object
   - Fix atomic sleep bug in crypto_destroy_instance
   - Move lib/mpi into lib/crypto

  Algorithms:
   - Add chacha20 and poly1305 implementation for powerpc p10

  Drivers:
   - Add AES skcipher and aead support to starfive
   - Add Dynamic Boost Control support to ccp
   - Add support for STM32P13 platform to stm32"

* tag 'v6.6-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (149 commits)
  Revert "dt-bindings: crypto: qcom,prng: Add SM8450"
  crypto: chelsio - Remove unused declarations
  X.509: if signature is unsupported skip validation
  crypto: qat - fix crypto capability detection for 4xxx
  crypto: drivers - Explicitly include correct DT includes
  crypto: engine - Remove crypto_engine_ctx
  crypto: zynqmp - Use new crypto_engine_op interface
  crypto: virtio - Use new crypto_engine_op interface
  crypto: stm32 - Use new crypto_engine_op interface
  crypto: jh7110 - Use new crypto_engine_op interface
  crypto: rk3288 - Use new crypto_engine_op interface
  crypto: omap - Use new crypto_engine_op interface
  crypto: keembay - Use new crypto_engine_op interface
  crypto: sl3516 - Use new crypto_engine_op interface
  crypto: caam - Use new crypto_engine_op interface
  crypto: aspeed - Remove non-standard sha512 algorithms
  crypto: aspeed - Use new crypto_engine_op interface
  crypto: amlogic - Use new crypto_engine_op interface
  crypto: sun8i-ss - Use new crypto_engine_op interface
  crypto: sun8i-ce - Use new crypto_engine_op interface
  ...
This commit is contained in:
Linus Torvalds 2023-08-29 11:23:29 -07:00
commit 68cf01760b
205 changed files with 10033 additions and 2915 deletions

View File

@ -0,0 +1,61 @@
What: /sys/kernel/debug/qat_<device>_<BDF>/qat/fw_counters
Date: November 2023
KernelVersion: 6.6
Contact: qat-linux@intel.com
Description: (RO) Read returns the number of requests sent to the FW and the number of responses
received from the FW for each Acceleration Engine
Reported firmware counters::
<N>: Number of requests sent from Acceleration Engine N to FW and responses
Acceleration Engine N received from FW
What: /sys/kernel/debug/qat_<device>_<BDF>/heartbeat/config
Date: November 2023
KernelVersion: 6.6
Contact: qat-linux@intel.com
Description: (RW) Read returns value of the Heartbeat update period.
Write to the file changes this period value.
This period should reflect planned polling interval of device
health status. High frequency Heartbeat monitoring wastes CPU cycles
but minimizes the customers system downtime. Also, if there are
large service requests that take some time to complete, high frequency
Heartbeat monitoring could result in false reports of unresponsiveness
and in those cases, period needs to be increased.
This parameter is effective only for c3xxx, c62x, dh895xcc devices.
4xxx has this value internally fixed to 200ms.
Default value is set to 500. Minimal allowed value is 200.
All values are expressed in milliseconds.
What: /sys/kernel/debug/qat_<device>_<BDF>/heartbeat/queries_failed
Date: November 2023
KernelVersion: 6.6
Contact: qat-linux@intel.com
Description: (RO) Read returns the number of times the device became unresponsive.
Attribute returns value of the counter which is incremented when
status query results negative.
What: /sys/kernel/debug/qat_<device>_<BDF>/heartbeat/queries_sent
Date: November 2023
KernelVersion: 6.6
Contact: qat-linux@intel.com
Description: (RO) Read returns the number of times the control process checked
if the device is responsive.
Attribute returns value of the counter which is incremented on
every status query.
What: /sys/kernel/debug/qat_<device>_<BDF>/heartbeat/status
Date: November 2023
KernelVersion: 6.6
Contact: qat-linux@intel.com
Description: (RO) Read returns the device health status.
Returns 0 when device is healthy or -1 when is unresponsive
or the query failed to send.
The driver does not monitor for Heartbeat. It is left for a user
to poll the status periodically.

View File

@ -85,3 +85,21 @@ Description:
Possible values:
0: Not enforced
1: Enforced
What: /sys/bus/pci/devices/<BDF>/bootloader_version
Date: June 2023
KernelVersion: 6.4
Contact: mario.limonciello@amd.com
Description:
The /sys/bus/pci/devices/<BDF>/bootloader_version
file reports the firmware version of the AMD AGESA
bootloader.
What: /sys/bus/pci/devices/<BDF>/tee_version
Date: June 2023
KernelVersion: 6.4
Contact: mario.limonciello@amd.com
Description:
The /sys/bus/pci/devices/<BDF>/tee_version
file reports the firmware version of the AMD Trusted
Execution Environment (TEE).

View File

@ -20,6 +20,7 @@ properties:
- stericsson,ux500-hash
- st,stm32f456-hash
- st,stm32f756-hash
- st,stm32mp13-hash
reg:
maxItems: 1

View File

@ -915,6 +915,18 @@ S: Supported
F: drivers/crypto/ccp/sev*
F: include/uapi/linux/psp-sev.h
AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER - DBC SUPPORT
M: Mario Limonciello <mario.limonciello@amd.com>
L: linux-crypto@vger.kernel.org
S: Supported
F: drivers/crypto/ccp/dbc.c
F: drivers/crypto/ccp/dbc.h
F: drivers/crypto/ccp/platform-access.c
F: drivers/crypto/ccp/platform-access.h
F: include/uapi/linux/psp-dbc.h
F: tools/crypto/ccp/*.c
F: tools/crypto/ccp/*.py
AMD DISPLAY CORE
M: Harry Wentland <harry.wentland@amd.com>
M: Leo Li <sunpeng.li@amd.com>

View File

@ -81,11 +81,6 @@ aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o
obj-$(CONFIG_CRYPTO_AES_ARM64_BS) += aes-neon-bs.o
aes-neon-bs-y := aes-neonbs-core.o aes-neonbs-glue.o
CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS
$(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
$(call if_changed_rule,cc_o_c)
quiet_cmd_perlasm = PERLASM $@
cmd_perlasm = $(PERL) $(<) void $(@)

View File

@ -0,0 +1,2 @@
#define USE_V8_CRYPTO_EXTENSIONS
#include "aes-glue.c"

View File

@ -0,0 +1 @@
#include "aes-glue.c"

View File

@ -111,4 +111,30 @@ config CRYPTO_AES_GCM_P10
Support for cryptographic acceleration instructions on Power10 or
later CPU. This module supports stitched acceleration for AES/GCM.
config CRYPTO_CHACHA20_P10
tristate "Ciphers: ChaCha20, XChacha20, XChacha12 (P10 or later)"
depends on PPC64 && CPU_LITTLE_ENDIAN
select CRYPTO_SKCIPHER
select CRYPTO_LIB_CHACHA_GENERIC
select CRYPTO_ARCH_HAVE_LIB_CHACHA
help
Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
stream cipher algorithms
Architecture: PowerPC64
- Power10 or later
- Little-endian
config CRYPTO_POLY1305_P10
tristate "Hash functions: Poly1305 (P10 or later)"
depends on PPC64 && CPU_LITTLE_ENDIAN
select CRYPTO_HASH
select CRYPTO_LIB_POLY1305_GENERIC
help
Poly1305 authenticator algorithm (RFC7539)
Architecture: PowerPC64
- Power10 or later
- Little-endian
endmenu

View File

@ -14,6 +14,8 @@ obj-$(CONFIG_CRYPTO_CRC32C_VPMSUM) += crc32c-vpmsum.o
obj-$(CONFIG_CRYPTO_CRCT10DIF_VPMSUM) += crct10dif-vpmsum.o
obj-$(CONFIG_CRYPTO_VPMSUM_TESTER) += crc-vpmsum_test.o
obj-$(CONFIG_CRYPTO_AES_GCM_P10) += aes-gcm-p10-crypto.o
obj-$(CONFIG_CRYPTO_CHACHA20_P10) += chacha-p10-crypto.o
obj-$(CONFIG_CRYPTO_POLY1305_P10) += poly1305-p10-crypto.o
aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o
md5-ppc-y := md5-asm.o md5-glue.o
@ -23,6 +25,8 @@ sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o
crc32c-vpmsum-y := crc32c-vpmsum_asm.o crc32c-vpmsum_glue.o
crct10dif-vpmsum-y := crct10dif-vpmsum_asm.o crct10dif-vpmsum_glue.o
aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp10-ppc.o aesp10-ppc.o
chacha-p10-crypto-y := chacha-p10-glue.o chacha-p10le-8x.o
poly1305-p10-crypto-y := poly1305-p10-glue.o poly1305-p10le_64.o
quiet_cmd_perl = PERL $@
cmd_perl = $(PERL) $< $(if $(CONFIG_CPU_LITTLE_ENDIAN), linux-ppc64le, linux-ppc64) > $@

View File

@ -0,0 +1,221 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PowerPC P10 (ppc64le) accelerated ChaCha and XChaCha stream ciphers,
* including ChaCha20 (RFC7539)
*
* Copyright 2023- IBM Corp. All rights reserved.
*/
#include <crypto/algapi.h>
#include <crypto/internal/chacha.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <linux/sizes.h>
#include <asm/simd.h>
#include <asm/switch_to.h>
asmlinkage void chacha_p10le_8x(u32 *state, u8 *dst, const u8 *src,
unsigned int len, int nrounds);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_p10);
static void vsx_begin(void)
{
preempt_disable();
enable_kernel_vsx();
}
static void vsx_end(void)
{
disable_kernel_vsx();
preempt_enable();
}
static void chacha_p10_do_8x(u32 *state, u8 *dst, const u8 *src,
unsigned int bytes, int nrounds)
{
unsigned int l = bytes & ~0x0FF;
if (l > 0) {
chacha_p10le_8x(state, dst, src, l, nrounds);
bytes -= l;
src += l;
dst += l;
state[12] += l / CHACHA_BLOCK_SIZE;
}
if (bytes > 0)
chacha_crypt_generic(state, dst, src, bytes, nrounds);
}
void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
{
hchacha_block_generic(state, stream, nrounds);
}
EXPORT_SYMBOL(hchacha_block_arch);
void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
{
chacha_init_generic(state, key, iv);
}
EXPORT_SYMBOL(chacha_init_arch);
void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
int nrounds)
{
if (!static_branch_likely(&have_p10) || bytes <= CHACHA_BLOCK_SIZE ||
!crypto_simd_usable())
return chacha_crypt_generic(state, dst, src, bytes, nrounds);
do {
unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
vsx_begin();
chacha_p10_do_8x(state, dst, src, todo, nrounds);
vsx_end();
bytes -= todo;
src += todo;
dst += todo;
} while (bytes);
}
EXPORT_SYMBOL(chacha_crypt_arch);
static int chacha_p10_stream_xor(struct skcipher_request *req,
const struct chacha_ctx *ctx, const u8 *iv)
{
struct skcipher_walk walk;
u32 state[16];
int err;
err = skcipher_walk_virt(&walk, req, false);
if (err)
return err;
chacha_init_generic(state, ctx->key, iv);
while (walk.nbytes > 0) {
unsigned int nbytes = walk.nbytes;
if (nbytes < walk.total)
nbytes = rounddown(nbytes, walk.stride);
if (!crypto_simd_usable()) {
chacha_crypt_generic(state, walk.dst.virt.addr,
walk.src.virt.addr, nbytes,
ctx->nrounds);
} else {
vsx_begin();
chacha_p10_do_8x(state, walk.dst.virt.addr,
walk.src.virt.addr, nbytes, ctx->nrounds);
vsx_end();
}
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
if (err)
break;
}
return err;
}
static int chacha_p10(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
return chacha_p10_stream_xor(req, ctx, req->iv);
}
static int xchacha_p10(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
struct chacha_ctx subctx;
u32 state[16];
u8 real_iv[16];
chacha_init_generic(state, ctx->key, req->iv);
hchacha_block_arch(state, subctx.key, ctx->nrounds);
subctx.nrounds = ctx->nrounds;
memcpy(&real_iv[0], req->iv + 24, 8);
memcpy(&real_iv[8], req->iv + 16, 8);
return chacha_p10_stream_xor(req, &subctx, real_iv);
}
static struct skcipher_alg algs[] = {
{
.base.cra_name = "chacha20",
.base.cra_driver_name = "chacha20-p10",
.base.cra_priority = 300,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = CHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
.setkey = chacha20_setkey,
.encrypt = chacha_p10,
.decrypt = chacha_p10,
}, {
.base.cra_name = "xchacha20",
.base.cra_driver_name = "xchacha20-p10",
.base.cra_priority = 300,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = XCHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
.setkey = chacha20_setkey,
.encrypt = xchacha_p10,
.decrypt = xchacha_p10,
}, {
.base.cra_name = "xchacha12",
.base.cra_driver_name = "xchacha12-p10",
.base.cra_priority = 300,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = XCHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
.setkey = chacha12_setkey,
.encrypt = xchacha_p10,
.decrypt = xchacha_p10,
}
};
static int __init chacha_p10_init(void)
{
static_branch_enable(&have_p10);
return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
}
static void __exit chacha_p10_exit(void)
{
crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
}
module_cpu_feature_match(PPC_MODULE_FEATURE_P10, chacha_p10_init);
module_exit(chacha_p10_exit);
MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (P10 accelerated)");
MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("chacha20");
MODULE_ALIAS_CRYPTO("chacha20-p10");
MODULE_ALIAS_CRYPTO("xchacha20");
MODULE_ALIAS_CRYPTO("xchacha20-p10");
MODULE_ALIAS_CRYPTO("xchacha12");
MODULE_ALIAS_CRYPTO("xchacha12-p10");

View File

@ -0,0 +1,842 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#
# Accelerated chacha20 implementation for ppc64le.
#
# Copyright 2023- IBM Corp. All rights reserved
#
#===================================================================================
# Written by Danny Tsen <dtsen@us.ibm.com>
#
# chacha_p10le_8x(u32 *state, byte *dst, const byte *src,
# size_t len, int nrounds);
#
# do rounds, 8 quarter rounds
# 1. a += b; d ^= a; d <<<= 16;
# 2. c += d; b ^= c; b <<<= 12;
# 3. a += b; d ^= a; d <<<= 8;
# 4. c += d; b ^= c; b <<<= 7
#
# row1 = (row1 + row2), row4 = row1 xor row4, row4 rotate each word by 16
# row3 = (row3 + row4), row2 = row3 xor row2, row2 rotate each word by 12
# row1 = (row1 + row2), row4 = row1 xor row4, row4 rotate each word by 8
# row3 = (row3 + row4), row2 = row3 xor row2, row2 rotate each word by 7
#
# 4 blocks (a b c d)
#
# a0 b0 c0 d0
# a1 b1 c1 d1
# ...
# a4 b4 c4 d4
# ...
# a8 b8 c8 d8
# ...
# a12 b12 c12 d12
# a13 ...
# a14 ...
# a15 b15 c15 d15
#
# Column round (v0, v4, v8, v12, v1, v5, v9, v13, v2, v6, v10, v14, v3, v7, v11, v15)
# Diagnal round (v0, v5, v10, v15, v1, v6, v11, v12, v2, v7, v8, v13, v3, v4, v9, v14)
#
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/asm-compat.h>
#include <linux/linkage.h>
.machine "any"
.text
.macro SAVE_GPR GPR OFFSET FRAME
std \GPR,\OFFSET(\FRAME)
.endm
.macro SAVE_VRS VRS OFFSET FRAME
li 16, \OFFSET
stvx \VRS, 16, \FRAME
.endm
.macro SAVE_VSX VSX OFFSET FRAME
li 16, \OFFSET
stxvx \VSX, 16, \FRAME
.endm
.macro RESTORE_GPR GPR OFFSET FRAME
ld \GPR,\OFFSET(\FRAME)
.endm
.macro RESTORE_VRS VRS OFFSET FRAME
li 16, \OFFSET
lvx \VRS, 16, \FRAME
.endm
.macro RESTORE_VSX VSX OFFSET FRAME
li 16, \OFFSET
lxvx \VSX, 16, \FRAME
.endm
.macro SAVE_REGS
mflr 0
std 0, 16(1)
stdu 1,-752(1)
SAVE_GPR 14, 112, 1
SAVE_GPR 15, 120, 1
SAVE_GPR 16, 128, 1
SAVE_GPR 17, 136, 1
SAVE_GPR 18, 144, 1
SAVE_GPR 19, 152, 1
SAVE_GPR 20, 160, 1
SAVE_GPR 21, 168, 1
SAVE_GPR 22, 176, 1
SAVE_GPR 23, 184, 1
SAVE_GPR 24, 192, 1
SAVE_GPR 25, 200, 1
SAVE_GPR 26, 208, 1
SAVE_GPR 27, 216, 1
SAVE_GPR 28, 224, 1
SAVE_GPR 29, 232, 1
SAVE_GPR 30, 240, 1
SAVE_GPR 31, 248, 1
addi 9, 1, 256
SAVE_VRS 20, 0, 9
SAVE_VRS 21, 16, 9
SAVE_VRS 22, 32, 9
SAVE_VRS 23, 48, 9
SAVE_VRS 24, 64, 9
SAVE_VRS 25, 80, 9
SAVE_VRS 26, 96, 9
SAVE_VRS 27, 112, 9
SAVE_VRS 28, 128, 9
SAVE_VRS 29, 144, 9
SAVE_VRS 30, 160, 9
SAVE_VRS 31, 176, 9
SAVE_VSX 14, 192, 9
SAVE_VSX 15, 208, 9
SAVE_VSX 16, 224, 9
SAVE_VSX 17, 240, 9
SAVE_VSX 18, 256, 9
SAVE_VSX 19, 272, 9
SAVE_VSX 20, 288, 9
SAVE_VSX 21, 304, 9
SAVE_VSX 22, 320, 9
SAVE_VSX 23, 336, 9
SAVE_VSX 24, 352, 9
SAVE_VSX 25, 368, 9
SAVE_VSX 26, 384, 9
SAVE_VSX 27, 400, 9
SAVE_VSX 28, 416, 9
SAVE_VSX 29, 432, 9
SAVE_VSX 30, 448, 9
SAVE_VSX 31, 464, 9
.endm # SAVE_REGS
.macro RESTORE_REGS
addi 9, 1, 256
RESTORE_VRS 20, 0, 9
RESTORE_VRS 21, 16, 9
RESTORE_VRS 22, 32, 9
RESTORE_VRS 23, 48, 9
RESTORE_VRS 24, 64, 9
RESTORE_VRS 25, 80, 9
RESTORE_VRS 26, 96, 9
RESTORE_VRS 27, 112, 9
RESTORE_VRS 28, 128, 9
RESTORE_VRS 29, 144, 9
RESTORE_VRS 30, 160, 9
RESTORE_VRS 31, 176, 9
RESTORE_VSX 14, 192, 9
RESTORE_VSX 15, 208, 9
RESTORE_VSX 16, 224, 9
RESTORE_VSX 17, 240, 9
RESTORE_VSX 18, 256, 9
RESTORE_VSX 19, 272, 9
RESTORE_VSX 20, 288, 9
RESTORE_VSX 21, 304, 9
RESTORE_VSX 22, 320, 9
RESTORE_VSX 23, 336, 9
RESTORE_VSX 24, 352, 9
RESTORE_VSX 25, 368, 9
RESTORE_VSX 26, 384, 9
RESTORE_VSX 27, 400, 9
RESTORE_VSX 28, 416, 9
RESTORE_VSX 29, 432, 9
RESTORE_VSX 30, 448, 9
RESTORE_VSX 31, 464, 9
RESTORE_GPR 14, 112, 1
RESTORE_GPR 15, 120, 1
RESTORE_GPR 16, 128, 1
RESTORE_GPR 17, 136, 1
RESTORE_GPR 18, 144, 1
RESTORE_GPR 19, 152, 1
RESTORE_GPR 20, 160, 1
RESTORE_GPR 21, 168, 1
RESTORE_GPR 22, 176, 1
RESTORE_GPR 23, 184, 1
RESTORE_GPR 24, 192, 1
RESTORE_GPR 25, 200, 1
RESTORE_GPR 26, 208, 1
RESTORE_GPR 27, 216, 1
RESTORE_GPR 28, 224, 1
RESTORE_GPR 29, 232, 1
RESTORE_GPR 30, 240, 1
RESTORE_GPR 31, 248, 1
addi 1, 1, 752
ld 0, 16(1)
mtlr 0
.endm # RESTORE_REGS
.macro QT_loop_8x
# QR(v0, v4, v8, v12, v1, v5, v9, v13, v2, v6, v10, v14, v3, v7, v11, v15)
xxlor 0, 32+25, 32+25
xxlor 32+25, 20, 20
vadduwm 0, 0, 4
vadduwm 1, 1, 5
vadduwm 2, 2, 6
vadduwm 3, 3, 7
vadduwm 16, 16, 20
vadduwm 17, 17, 21
vadduwm 18, 18, 22
vadduwm 19, 19, 23
vpermxor 12, 12, 0, 25
vpermxor 13, 13, 1, 25
vpermxor 14, 14, 2, 25
vpermxor 15, 15, 3, 25
vpermxor 28, 28, 16, 25
vpermxor 29, 29, 17, 25
vpermxor 30, 30, 18, 25
vpermxor 31, 31, 19, 25
xxlor 32+25, 0, 0
vadduwm 8, 8, 12
vadduwm 9, 9, 13
vadduwm 10, 10, 14
vadduwm 11, 11, 15
vadduwm 24, 24, 28
vadduwm 25, 25, 29
vadduwm 26, 26, 30
vadduwm 27, 27, 31
vxor 4, 4, 8
vxor 5, 5, 9
vxor 6, 6, 10
vxor 7, 7, 11
vxor 20, 20, 24
vxor 21, 21, 25
vxor 22, 22, 26
vxor 23, 23, 27
xxlor 0, 32+25, 32+25
xxlor 32+25, 21, 21
vrlw 4, 4, 25 #
vrlw 5, 5, 25
vrlw 6, 6, 25
vrlw 7, 7, 25
vrlw 20, 20, 25 #
vrlw 21, 21, 25
vrlw 22, 22, 25
vrlw 23, 23, 25
xxlor 32+25, 0, 0
vadduwm 0, 0, 4
vadduwm 1, 1, 5
vadduwm 2, 2, 6
vadduwm 3, 3, 7
vadduwm 16, 16, 20
vadduwm 17, 17, 21
vadduwm 18, 18, 22
vadduwm 19, 19, 23
xxlor 0, 32+25, 32+25
xxlor 32+25, 22, 22
vpermxor 12, 12, 0, 25
vpermxor 13, 13, 1, 25
vpermxor 14, 14, 2, 25
vpermxor 15, 15, 3, 25
vpermxor 28, 28, 16, 25
vpermxor 29, 29, 17, 25
vpermxor 30, 30, 18, 25
vpermxor 31, 31, 19, 25
xxlor 32+25, 0, 0
vadduwm 8, 8, 12
vadduwm 9, 9, 13
vadduwm 10, 10, 14
vadduwm 11, 11, 15
vadduwm 24, 24, 28
vadduwm 25, 25, 29
vadduwm 26, 26, 30
vadduwm 27, 27, 31
xxlor 0, 32+28, 32+28
xxlor 32+28, 23, 23
vxor 4, 4, 8
vxor 5, 5, 9
vxor 6, 6, 10
vxor 7, 7, 11
vxor 20, 20, 24
vxor 21, 21, 25
vxor 22, 22, 26
vxor 23, 23, 27
vrlw 4, 4, 28 #
vrlw 5, 5, 28
vrlw 6, 6, 28
vrlw 7, 7, 28
vrlw 20, 20, 28 #
vrlw 21, 21, 28
vrlw 22, 22, 28
vrlw 23, 23, 28
xxlor 32+28, 0, 0
# QR(v0, v5, v10, v15, v1, v6, v11, v12, v2, v7, v8, v13, v3, v4, v9, v14)
xxlor 0, 32+25, 32+25
xxlor 32+25, 20, 20
vadduwm 0, 0, 5
vadduwm 1, 1, 6
vadduwm 2, 2, 7
vadduwm 3, 3, 4
vadduwm 16, 16, 21
vadduwm 17, 17, 22
vadduwm 18, 18, 23
vadduwm 19, 19, 20
vpermxor 15, 15, 0, 25
vpermxor 12, 12, 1, 25
vpermxor 13, 13, 2, 25
vpermxor 14, 14, 3, 25
vpermxor 31, 31, 16, 25
vpermxor 28, 28, 17, 25
vpermxor 29, 29, 18, 25
vpermxor 30, 30, 19, 25
xxlor 32+25, 0, 0
vadduwm 10, 10, 15
vadduwm 11, 11, 12
vadduwm 8, 8, 13
vadduwm 9, 9, 14
vadduwm 26, 26, 31
vadduwm 27, 27, 28
vadduwm 24, 24, 29
vadduwm 25, 25, 30
vxor 5, 5, 10
vxor 6, 6, 11
vxor 7, 7, 8
vxor 4, 4, 9
vxor 21, 21, 26
vxor 22, 22, 27
vxor 23, 23, 24
vxor 20, 20, 25
xxlor 0, 32+25, 32+25
xxlor 32+25, 21, 21
vrlw 5, 5, 25
vrlw 6, 6, 25
vrlw 7, 7, 25
vrlw 4, 4, 25
vrlw 21, 21, 25
vrlw 22, 22, 25
vrlw 23, 23, 25
vrlw 20, 20, 25
xxlor 32+25, 0, 0
vadduwm 0, 0, 5
vadduwm 1, 1, 6
vadduwm 2, 2, 7
vadduwm 3, 3, 4
vadduwm 16, 16, 21
vadduwm 17, 17, 22
vadduwm 18, 18, 23
vadduwm 19, 19, 20
xxlor 0, 32+25, 32+25
xxlor 32+25, 22, 22
vpermxor 15, 15, 0, 25
vpermxor 12, 12, 1, 25
vpermxor 13, 13, 2, 25
vpermxor 14, 14, 3, 25
vpermxor 31, 31, 16, 25
vpermxor 28, 28, 17, 25
vpermxor 29, 29, 18, 25
vpermxor 30, 30, 19, 25
xxlor 32+25, 0, 0
vadduwm 10, 10, 15
vadduwm 11, 11, 12
vadduwm 8, 8, 13
vadduwm 9, 9, 14
vadduwm 26, 26, 31
vadduwm 27, 27, 28
vadduwm 24, 24, 29
vadduwm 25, 25, 30
xxlor 0, 32+28, 32+28
xxlor 32+28, 23, 23
vxor 5, 5, 10
vxor 6, 6, 11
vxor 7, 7, 8
vxor 4, 4, 9
vxor 21, 21, 26
vxor 22, 22, 27
vxor 23, 23, 24
vxor 20, 20, 25
vrlw 5, 5, 28
vrlw 6, 6, 28
vrlw 7, 7, 28
vrlw 4, 4, 28
vrlw 21, 21, 28
vrlw 22, 22, 28
vrlw 23, 23, 28
vrlw 20, 20, 28
xxlor 32+28, 0, 0
.endm
.macro QT_loop_4x
# QR(v0, v4, v8, v12, v1, v5, v9, v13, v2, v6, v10, v14, v3, v7, v11, v15)
vadduwm 0, 0, 4
vadduwm 1, 1, 5
vadduwm 2, 2, 6
vadduwm 3, 3, 7
vpermxor 12, 12, 0, 20
vpermxor 13, 13, 1, 20
vpermxor 14, 14, 2, 20
vpermxor 15, 15, 3, 20
vadduwm 8, 8, 12
vadduwm 9, 9, 13
vadduwm 10, 10, 14
vadduwm 11, 11, 15
vxor 4, 4, 8
vxor 5, 5, 9
vxor 6, 6, 10
vxor 7, 7, 11
vrlw 4, 4, 21
vrlw 5, 5, 21
vrlw 6, 6, 21
vrlw 7, 7, 21
vadduwm 0, 0, 4
vadduwm 1, 1, 5
vadduwm 2, 2, 6
vadduwm 3, 3, 7
vpermxor 12, 12, 0, 22
vpermxor 13, 13, 1, 22
vpermxor 14, 14, 2, 22
vpermxor 15, 15, 3, 22
vadduwm 8, 8, 12
vadduwm 9, 9, 13
vadduwm 10, 10, 14
vadduwm 11, 11, 15
vxor 4, 4, 8
vxor 5, 5, 9
vxor 6, 6, 10
vxor 7, 7, 11
vrlw 4, 4, 23
vrlw 5, 5, 23
vrlw 6, 6, 23
vrlw 7, 7, 23
# QR(v0, v5, v10, v15, v1, v6, v11, v12, v2, v7, v8, v13, v3, v4, v9, v14)
vadduwm 0, 0, 5
vadduwm 1, 1, 6
vadduwm 2, 2, 7
vadduwm 3, 3, 4
vpermxor 15, 15, 0, 20
vpermxor 12, 12, 1, 20
vpermxor 13, 13, 2, 20
vpermxor 14, 14, 3, 20
vadduwm 10, 10, 15
vadduwm 11, 11, 12
vadduwm 8, 8, 13
vadduwm 9, 9, 14
vxor 5, 5, 10
vxor 6, 6, 11
vxor 7, 7, 8
vxor 4, 4, 9
vrlw 5, 5, 21
vrlw 6, 6, 21
vrlw 7, 7, 21
vrlw 4, 4, 21
vadduwm 0, 0, 5
vadduwm 1, 1, 6
vadduwm 2, 2, 7
vadduwm 3, 3, 4
vpermxor 15, 15, 0, 22
vpermxor 12, 12, 1, 22
vpermxor 13, 13, 2, 22
vpermxor 14, 14, 3, 22
vadduwm 10, 10, 15
vadduwm 11, 11, 12
vadduwm 8, 8, 13
vadduwm 9, 9, 14
vxor 5, 5, 10
vxor 6, 6, 11
vxor 7, 7, 8
vxor 4, 4, 9
vrlw 5, 5, 23
vrlw 6, 6, 23
vrlw 7, 7, 23
vrlw 4, 4, 23
.endm
# Transpose
.macro TP_4x a0 a1 a2 a3
xxmrghw 10, 32+\a0, 32+\a1 # a0, a1, b0, b1
xxmrghw 11, 32+\a2, 32+\a3 # a2, a3, b2, b3
xxmrglw 12, 32+\a0, 32+\a1 # c0, c1, d0, d1
xxmrglw 13, 32+\a2, 32+\a3 # c2, c3, d2, d3
xxpermdi 32+\a0, 10, 11, 0 # a0, a1, a2, a3
xxpermdi 32+\a1, 10, 11, 3 # b0, b1, b2, b3
xxpermdi 32+\a2, 12, 13, 0 # c0, c1, c2, c3
xxpermdi 32+\a3, 12, 13, 3 # d0, d1, d2, d3
.endm
# key stream = working state + state
.macro Add_state S
vadduwm \S+0, \S+0, 16-\S
vadduwm \S+4, \S+4, 17-\S
vadduwm \S+8, \S+8, 18-\S
vadduwm \S+12, \S+12, 19-\S
vadduwm \S+1, \S+1, 16-\S
vadduwm \S+5, \S+5, 17-\S
vadduwm \S+9, \S+9, 18-\S
vadduwm \S+13, \S+13, 19-\S
vadduwm \S+2, \S+2, 16-\S
vadduwm \S+6, \S+6, 17-\S
vadduwm \S+10, \S+10, 18-\S
vadduwm \S+14, \S+14, 19-\S
vadduwm \S+3, \S+3, 16-\S
vadduwm \S+7, \S+7, 17-\S
vadduwm \S+11, \S+11, 18-\S
vadduwm \S+15, \S+15, 19-\S
.endm
#
# write 256 bytes
#
.macro Write_256 S
add 9, 14, 5
add 16, 14, 4
lxvw4x 0, 0, 9
lxvw4x 1, 17, 9
lxvw4x 2, 18, 9
lxvw4x 3, 19, 9
lxvw4x 4, 20, 9
lxvw4x 5, 21, 9
lxvw4x 6, 22, 9
lxvw4x 7, 23, 9
lxvw4x 8, 24, 9
lxvw4x 9, 25, 9
lxvw4x 10, 26, 9
lxvw4x 11, 27, 9
lxvw4x 12, 28, 9
lxvw4x 13, 29, 9
lxvw4x 14, 30, 9
lxvw4x 15, 31, 9
xxlxor \S+32, \S+32, 0
xxlxor \S+36, \S+36, 1
xxlxor \S+40, \S+40, 2
xxlxor \S+44, \S+44, 3
xxlxor \S+33, \S+33, 4
xxlxor \S+37, \S+37, 5
xxlxor \S+41, \S+41, 6
xxlxor \S+45, \S+45, 7
xxlxor \S+34, \S+34, 8
xxlxor \S+38, \S+38, 9
xxlxor \S+42, \S+42, 10
xxlxor \S+46, \S+46, 11
xxlxor \S+35, \S+35, 12
xxlxor \S+39, \S+39, 13
xxlxor \S+43, \S+43, 14
xxlxor \S+47, \S+47, 15
stxvw4x \S+32, 0, 16
stxvw4x \S+36, 17, 16
stxvw4x \S+40, 18, 16
stxvw4x \S+44, 19, 16
stxvw4x \S+33, 20, 16
stxvw4x \S+37, 21, 16
stxvw4x \S+41, 22, 16
stxvw4x \S+45, 23, 16
stxvw4x \S+34, 24, 16
stxvw4x \S+38, 25, 16
stxvw4x \S+42, 26, 16
stxvw4x \S+46, 27, 16
stxvw4x \S+35, 28, 16
stxvw4x \S+39, 29, 16
stxvw4x \S+43, 30, 16
stxvw4x \S+47, 31, 16
.endm
#
# chacha20_p10le_8x(u32 *state, byte *dst, const byte *src, size_t len, int nrounds);
#
SYM_FUNC_START(chacha_p10le_8x)
.align 5
cmpdi 6, 0
ble Out_no_chacha
SAVE_REGS
# r17 - r31 mainly for Write_256 macro.
li 17, 16
li 18, 32
li 19, 48
li 20, 64
li 21, 80
li 22, 96
li 23, 112
li 24, 128
li 25, 144
li 26, 160
li 27, 176
li 28, 192
li 29, 208
li 30, 224
li 31, 240
mr 15, 6 # len
li 14, 0 # offset to inp and outp
lxvw4x 48, 0, 3 # vr16, constants
lxvw4x 49, 17, 3 # vr17, key 1
lxvw4x 50, 18, 3 # vr18, key 2
lxvw4x 51, 19, 3 # vr19, counter, nonce
# create (0, 1, 2, 3) counters
vspltisw 0, 0
vspltisw 1, 1
vspltisw 2, 2
vspltisw 3, 3
vmrghw 4, 0, 1
vmrglw 5, 2, 3
vsldoi 30, 4, 5, 8 # vr30 counter, 4 (0, 1, 2, 3)
vspltisw 21, 12
vspltisw 23, 7
addis 11, 2, permx@toc@ha
addi 11, 11, permx@toc@l
lxvw4x 32+20, 0, 11
lxvw4x 32+22, 17, 11
sradi 8, 7, 1
mtctr 8
# save constants to vsx
xxlor 16, 48, 48
xxlor 17, 49, 49
xxlor 18, 50, 50
xxlor 19, 51, 51
vspltisw 25, 4
vspltisw 26, 8
xxlor 25, 32+26, 32+26
xxlor 24, 32+25, 32+25
vadduwm 31, 30, 25 # counter = (0, 1, 2, 3) + (4, 4, 4, 4)
xxlor 30, 32+30, 32+30
xxlor 31, 32+31, 32+31
xxlor 20, 32+20, 32+20
xxlor 21, 32+21, 32+21
xxlor 22, 32+22, 32+22
xxlor 23, 32+23, 32+23
cmpdi 6, 512
blt Loop_last
Loop_8x:
xxspltw 32+0, 16, 0
xxspltw 32+1, 16, 1
xxspltw 32+2, 16, 2
xxspltw 32+3, 16, 3
xxspltw 32+4, 17, 0
xxspltw 32+5, 17, 1
xxspltw 32+6, 17, 2
xxspltw 32+7, 17, 3
xxspltw 32+8, 18, 0
xxspltw 32+9, 18, 1
xxspltw 32+10, 18, 2
xxspltw 32+11, 18, 3
xxspltw 32+12, 19, 0
xxspltw 32+13, 19, 1
xxspltw 32+14, 19, 2
xxspltw 32+15, 19, 3
vadduwm 12, 12, 30 # increase counter
xxspltw 32+16, 16, 0
xxspltw 32+17, 16, 1
xxspltw 32+18, 16, 2
xxspltw 32+19, 16, 3
xxspltw 32+20, 17, 0
xxspltw 32+21, 17, 1
xxspltw 32+22, 17, 2
xxspltw 32+23, 17, 3
xxspltw 32+24, 18, 0
xxspltw 32+25, 18, 1
xxspltw 32+26, 18, 2
xxspltw 32+27, 18, 3
xxspltw 32+28, 19, 0
xxspltw 32+29, 19, 1
vadduwm 28, 28, 31 # increase counter
xxspltw 32+30, 19, 2
xxspltw 32+31, 19, 3
.align 5
quarter_loop_8x:
QT_loop_8x
bdnz quarter_loop_8x
xxlor 0, 32+30, 32+30
xxlor 32+30, 30, 30
vadduwm 12, 12, 30
xxlor 32+30, 0, 0
TP_4x 0, 1, 2, 3
TP_4x 4, 5, 6, 7
TP_4x 8, 9, 10, 11
TP_4x 12, 13, 14, 15
xxlor 0, 48, 48
xxlor 1, 49, 49
xxlor 2, 50, 50
xxlor 3, 51, 51
xxlor 48, 16, 16
xxlor 49, 17, 17
xxlor 50, 18, 18
xxlor 51, 19, 19
Add_state 0
xxlor 48, 0, 0
xxlor 49, 1, 1
xxlor 50, 2, 2
xxlor 51, 3, 3
Write_256 0
addi 14, 14, 256 # offset +=256
addi 15, 15, -256 # len -=256
xxlor 5, 32+31, 32+31
xxlor 32+31, 31, 31
vadduwm 28, 28, 31
xxlor 32+31, 5, 5
TP_4x 16+0, 16+1, 16+2, 16+3
TP_4x 16+4, 16+5, 16+6, 16+7
TP_4x 16+8, 16+9, 16+10, 16+11
TP_4x 16+12, 16+13, 16+14, 16+15
xxlor 32, 16, 16
xxlor 33, 17, 17
xxlor 34, 18, 18
xxlor 35, 19, 19
Add_state 16
Write_256 16
addi 14, 14, 256 # offset +=256
addi 15, 15, -256 # len +=256
xxlor 32+24, 24, 24
xxlor 32+25, 25, 25
xxlor 32+30, 30, 30
vadduwm 30, 30, 25
vadduwm 31, 30, 24
xxlor 30, 32+30, 32+30
xxlor 31, 32+31, 32+31
cmpdi 15, 0
beq Out_loop
cmpdi 15, 512
blt Loop_last
mtctr 8
b Loop_8x
Loop_last:
lxvw4x 48, 0, 3 # vr16, constants
lxvw4x 49, 17, 3 # vr17, key 1
lxvw4x 50, 18, 3 # vr18, key 2
lxvw4x 51, 19, 3 # vr19, counter, nonce
vspltisw 21, 12
vspltisw 23, 7
addis 11, 2, permx@toc@ha
addi 11, 11, permx@toc@l
lxvw4x 32+20, 0, 11
lxvw4x 32+22, 17, 11
sradi 8, 7, 1
mtctr 8
Loop_4x:
vspltw 0, 16, 0
vspltw 1, 16, 1
vspltw 2, 16, 2
vspltw 3, 16, 3
vspltw 4, 17, 0
vspltw 5, 17, 1
vspltw 6, 17, 2
vspltw 7, 17, 3
vspltw 8, 18, 0
vspltw 9, 18, 1
vspltw 10, 18, 2
vspltw 11, 18, 3
vspltw 12, 19, 0
vadduwm 12, 12, 30 # increase counter
vspltw 13, 19, 1
vspltw 14, 19, 2
vspltw 15, 19, 3
.align 5
quarter_loop:
QT_loop_4x
bdnz quarter_loop
vadduwm 12, 12, 30
TP_4x 0, 1, 2, 3
TP_4x 4, 5, 6, 7
TP_4x 8, 9, 10, 11
TP_4x 12, 13, 14, 15
Add_state 0
Write_256 0
addi 14, 14, 256 # offset += 256
addi 15, 15, -256 # len += 256
# Update state counter
vspltisw 25, 4
vadduwm 30, 30, 25
cmpdi 15, 0
beq Out_loop
cmpdi 15, 256
blt Out_loop
mtctr 8
b Loop_4x
Out_loop:
RESTORE_REGS
blr
Out_no_chacha:
li 3, 0
blr
SYM_FUNC_END(chacha_p10le_8x)
SYM_DATA_START_LOCAL(PERMX)
.align 5
permx:
.long 0x22330011, 0x66774455, 0xaabb8899, 0xeeffccdd
.long 0x11223300, 0x55667744, 0x99aabb88, 0xddeeffcc
SYM_DATA_END(PERMX)

View File

@ -0,0 +1,186 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Poly1305 authenticator algorithm, RFC7539.
*
* Copyright 2023- IBM Corp. All rights reserved.
*/
#include <crypto/algapi.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/jump_label.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/poly1305.h>
#include <crypto/internal/simd.h>
#include <linux/cpufeature.h>
#include <asm/unaligned.h>
#include <asm/simd.h>
#include <asm/switch_to.h>
asmlinkage void poly1305_p10le_4blocks(void *h, const u8 *m, u32 mlen);
asmlinkage void poly1305_64s(void *h, const u8 *m, u32 mlen, int highbit);
asmlinkage void poly1305_emit_64(void *h, void *s, u8 *dst);
static void vsx_begin(void)
{
preempt_disable();
enable_kernel_vsx();
}
static void vsx_end(void)
{
disable_kernel_vsx();
preempt_enable();
}
static int crypto_poly1305_p10_init(struct shash_desc *desc)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
poly1305_core_init(&dctx->h);
dctx->buflen = 0;
dctx->rset = 0;
dctx->sset = false;
return 0;
}
static unsigned int crypto_poly1305_setdctxkey(struct poly1305_desc_ctx *dctx,
const u8 *inp, unsigned int len)
{
unsigned int acc = 0;
if (unlikely(!dctx->sset)) {
if (!dctx->rset && len >= POLY1305_BLOCK_SIZE) {
struct poly1305_core_key *key = &dctx->core_r;
key->key.r64[0] = get_unaligned_le64(&inp[0]);
key->key.r64[1] = get_unaligned_le64(&inp[8]);
inp += POLY1305_BLOCK_SIZE;
len -= POLY1305_BLOCK_SIZE;
acc += POLY1305_BLOCK_SIZE;
dctx->rset = 1;
}
if (len >= POLY1305_BLOCK_SIZE) {
dctx->s[0] = get_unaligned_le32(&inp[0]);
dctx->s[1] = get_unaligned_le32(&inp[4]);
dctx->s[2] = get_unaligned_le32(&inp[8]);
dctx->s[3] = get_unaligned_le32(&inp[12]);
acc += POLY1305_BLOCK_SIZE;
dctx->sset = true;
}
}
return acc;
}
static int crypto_poly1305_p10_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
unsigned int bytes, used;
if (unlikely(dctx->buflen)) {
bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen);
memcpy(dctx->buf + dctx->buflen, src, bytes);
src += bytes;
srclen -= bytes;
dctx->buflen += bytes;
if (dctx->buflen == POLY1305_BLOCK_SIZE) {
if (likely(!crypto_poly1305_setdctxkey(dctx, dctx->buf,
POLY1305_BLOCK_SIZE))) {
vsx_begin();
poly1305_64s(&dctx->h, dctx->buf,
POLY1305_BLOCK_SIZE, 1);
vsx_end();
}
dctx->buflen = 0;
}
}
if (likely(srclen >= POLY1305_BLOCK_SIZE)) {
bytes = round_down(srclen, POLY1305_BLOCK_SIZE);
used = crypto_poly1305_setdctxkey(dctx, src, bytes);
if (likely(used)) {
srclen -= used;
src += used;
}
if (crypto_simd_usable() && (srclen >= POLY1305_BLOCK_SIZE*4)) {
vsx_begin();
poly1305_p10le_4blocks(&dctx->h, src, srclen);
vsx_end();
src += srclen - (srclen % (POLY1305_BLOCK_SIZE * 4));
srclen %= POLY1305_BLOCK_SIZE * 4;
}
while (srclen >= POLY1305_BLOCK_SIZE) {
vsx_begin();
poly1305_64s(&dctx->h, src, POLY1305_BLOCK_SIZE, 1);
vsx_end();
srclen -= POLY1305_BLOCK_SIZE;
src += POLY1305_BLOCK_SIZE;
}
}
if (unlikely(srclen)) {
dctx->buflen = srclen;
memcpy(dctx->buf, src, srclen);
}
return 0;
}
static int crypto_poly1305_p10_final(struct shash_desc *desc, u8 *dst)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
if (unlikely(!dctx->sset))
return -ENOKEY;
if ((dctx->buflen)) {
dctx->buf[dctx->buflen++] = 1;
memset(dctx->buf + dctx->buflen, 0,
POLY1305_BLOCK_SIZE - dctx->buflen);
vsx_begin();
poly1305_64s(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
vsx_end();
dctx->buflen = 0;
}
poly1305_emit_64(&dctx->h, &dctx->s, dst);
return 0;
}
static struct shash_alg poly1305_alg = {
.digestsize = POLY1305_DIGEST_SIZE,
.init = crypto_poly1305_p10_init,
.update = crypto_poly1305_p10_update,
.final = crypto_poly1305_p10_final,
.descsize = sizeof(struct poly1305_desc_ctx),
.base = {
.cra_name = "poly1305",
.cra_driver_name = "poly1305-p10",
.cra_priority = 300,
.cra_blocksize = POLY1305_BLOCK_SIZE,
.cra_module = THIS_MODULE,
},
};
static int __init poly1305_p10_init(void)
{
return crypto_register_shash(&poly1305_alg);
}
static void __exit poly1305_p10_exit(void)
{
crypto_unregister_shash(&poly1305_alg);
}
module_cpu_feature_match(PPC_MODULE_FEATURE_P10, poly1305_p10_init);
module_exit(poly1305_p10_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com>");
MODULE_DESCRIPTION("Optimized Poly1305 for P10");
MODULE_ALIAS_CRYPTO("poly1305");
MODULE_ALIAS_CRYPTO("poly1305-p10");

File diff suppressed because it is too large Load Diff

View File

@ -229,10 +229,9 @@ static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
return (struct crypto_aes_ctx *)ALIGN(addr, align);
}
static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
static int aes_set_key_common(struct crypto_aes_ctx *ctx,
const u8 *in_key, unsigned int key_len)
{
struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
int err;
if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
@ -253,7 +252,8 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
return aes_set_key_common(aes_ctx(crypto_tfm_ctx(tfm)), in_key,
key_len);
}
static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
@ -285,8 +285,7 @@ static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
return aes_set_key_common(crypto_skcipher_tfm(tfm),
crypto_skcipher_ctx(tfm), key, len);
return aes_set_key_common(aes_ctx(crypto_skcipher_ctx(tfm)), key, len);
}
static int ecb_encrypt(struct skcipher_request *req)
@ -627,8 +626,7 @@ static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
return aes_set_key_common(crypto_aead_tfm(aead),
&ctx->aes_key_expanded, key, key_len) ?:
return aes_set_key_common(&ctx->aes_key_expanded, key, key_len) ?:
rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
}
@ -893,14 +891,13 @@ static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
keylen /= 2;
/* first half of xts-key is for crypt */
err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
key, keylen);
err = aes_set_key_common(aes_ctx(ctx->raw_crypt_ctx), key, keylen);
if (err)
return err;
/* second half of xts-key is for tweak */
return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
key + keylen, keylen);
return aes_set_key_common(aes_ctx(ctx->raw_tweak_ctx), key + keylen,
keylen);
}
static int xts_crypt(struct skcipher_request *req, bool encrypt)
@ -1150,8 +1147,7 @@ static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
{
struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
return aes_set_key_common(crypto_aead_tfm(aead),
&ctx->aes_key_expanded, key, key_len) ?:
return aes_set_key_common(&ctx->aes_key_expanded, key, key_len) ?:
rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
}

View File

@ -320,18 +320,21 @@ static int alg_setkey_by_key_serial(struct alg_sock *ask, sockptr_t optval,
if (IS_ERR(ret)) {
up_read(&key->sem);
key_put(key);
return PTR_ERR(ret);
}
key_data = sock_kmalloc(&ask->sk, key_datalen, GFP_KERNEL);
if (!key_data) {
up_read(&key->sem);
key_put(key);
return -ENOMEM;
}
memcpy(key_data, ret, key_datalen);
up_read(&key->sem);
key_put(key);
err = type->setkey(ask->private, key_data, key_datalen);
@ -1192,6 +1195,7 @@ struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
areq->areqlen = areqlen;
areq->sk = sk;
areq->first_rsgl.sgl.sgt.sgl = areq->first_rsgl.sgl.sgl;
areq->last_rsgl = NULL;
INIT_LIST_HEAD(&areq->rsgl_list);
areq->tsgl = NULL;

View File

@ -17,6 +17,7 @@
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/workqueue.h>
#include "internal.h"
@ -74,15 +75,26 @@ static void crypto_free_instance(struct crypto_instance *inst)
inst->alg.cra_type->free(inst);
}
static void crypto_destroy_instance(struct crypto_alg *alg)
static void crypto_destroy_instance_workfn(struct work_struct *w)
{
struct crypto_instance *inst = (void *)alg;
struct crypto_instance *inst = container_of(w, struct crypto_instance,
free_work);
struct crypto_template *tmpl = inst->tmpl;
crypto_free_instance(inst);
crypto_tmpl_put(tmpl);
}
static void crypto_destroy_instance(struct crypto_alg *alg)
{
struct crypto_instance *inst = container_of(alg,
struct crypto_instance,
alg);
INIT_WORK(&inst->free_work, crypto_destroy_instance_workfn);
schedule_work(&inst->free_work);
}
/*
* This function adds a spawn to the list secondary_spawns which
* will be used at the end of crypto_remove_spawns to unregister

View File

@ -42,7 +42,7 @@ static void public_key_describe(const struct key *asymmetric_key,
void public_key_free(struct public_key *key)
{
if (key) {
kfree(key->key);
kfree_sensitive(key->key);
kfree(key->params);
kfree(key);
}
@ -263,7 +263,7 @@ error_free_tfm:
else
crypto_free_akcipher(tfm);
error_free_key:
kfree(key);
kfree_sensitive(key);
pr_devel("<==%s() = %d\n", __func__, ret);
return ret;
}
@ -369,7 +369,7 @@ error_free_tfm:
else
crypto_free_akcipher(tfm);
error_free_key:
kfree(key);
kfree_sensitive(key);
pr_devel("<==%s() = %d\n", __func__, ret);
return ret;
}
@ -441,7 +441,7 @@ int public_key_verify_signature(const struct public_key *pkey,
sig->digest, sig->digest_size);
error_free_key:
kfree(key);
kfree_sensitive(key);
error_free_tfm:
crypto_free_sig(tfm);
pr_devel("<==%s() = %d\n", __func__, ret);

View File

@ -391,7 +391,7 @@ error_no_desc:
* verify_pefile_signature - Verify the signature on a PE binary image
* @pebuf: Buffer containing the PE binary image
* @pelen: Length of the binary image
* @trust_keys: Signing certificate(s) to use as starting points
* @trusted_keys: Signing certificate(s) to use as starting points
* @usage: The use to which the key is being put.
*
* Validate that the certificate chain inside the PKCS#7 message inside the PE

View File

@ -130,6 +130,11 @@ int x509_check_for_self_signed(struct x509_certificate *cert)
goto out;
}
if (cert->unsupported_sig) {
ret = 0;
goto out;
}
ret = public_key_verify_signature(cert->pub, cert->sig);
if (ret < 0) {
if (ret == -ENOPKG) {

View File

@ -7,15 +7,30 @@
* Author: Baolin Wang <baolin.wang@linaro.org>
*/
#include <crypto/internal/aead.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/engine.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/kpp.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <crypto/engine.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <uapi/linux/sched/types.h>
#include "internal.h"
#define CRYPTO_ENGINE_MAX_QLEN 10
/* Temporary algorithm flag used to indicate an updated driver. */
#define CRYPTO_ALG_ENGINE 0x200
struct crypto_engine_alg {
struct crypto_alg base;
struct crypto_engine_op op;
};
/**
* crypto_finalize_request - finalize one request if the request is done
* @engine: the hardware engine
@ -26,9 +41,6 @@ static void crypto_finalize_request(struct crypto_engine *engine,
struct crypto_async_request *req, int err)
{
unsigned long flags;
bool finalize_req = false;
int ret;
struct crypto_engine_ctx *enginectx;
/*
* If hardware cannot enqueue more requests
@ -38,21 +50,11 @@ static void crypto_finalize_request(struct crypto_engine *engine,
if (!engine->retry_support) {
spin_lock_irqsave(&engine->queue_lock, flags);
if (engine->cur_req == req) {
finalize_req = true;
engine->cur_req = NULL;
}
spin_unlock_irqrestore(&engine->queue_lock, flags);
}
if (finalize_req || engine->retry_support) {
enginectx = crypto_tfm_ctx(req->tfm);
if (enginectx->op.prepare_request &&
enginectx->op.unprepare_request) {
ret = enginectx->op.unprepare_request(engine, req);
if (ret)
dev_err(engine->dev, "failed to unprepare request\n");
}
}
lockdep_assert_in_softirq();
crypto_request_complete(req, err);
@ -72,10 +74,11 @@ static void crypto_pump_requests(struct crypto_engine *engine,
bool in_kthread)
{
struct crypto_async_request *async_req, *backlog;
struct crypto_engine_alg *alg;
struct crypto_engine_op *op;
unsigned long flags;
bool was_busy = false;
int ret;
struct crypto_engine_ctx *enginectx;
spin_lock_irqsave(&engine->queue_lock, flags);
@ -141,27 +144,21 @@ start_request:
ret = engine->prepare_crypt_hardware(engine);
if (ret) {
dev_err(engine->dev, "failed to prepare crypt hardware\n");
goto req_err_2;
goto req_err_1;
}
}
enginectx = crypto_tfm_ctx(async_req->tfm);
if (enginectx->op.prepare_request) {
ret = enginectx->op.prepare_request(engine, async_req);
if (ret) {
dev_err(engine->dev, "failed to prepare request: %d\n",
ret);
goto req_err_2;
}
}
if (!enginectx->op.do_one_request) {
if (async_req->tfm->__crt_alg->cra_flags & CRYPTO_ALG_ENGINE) {
alg = container_of(async_req->tfm->__crt_alg,
struct crypto_engine_alg, base);
op = &alg->op;
} else {
dev_err(engine->dev, "failed to do request\n");
ret = -EINVAL;
goto req_err_1;
}
ret = enginectx->op.do_one_request(engine, async_req);
ret = op->do_one_request(engine, async_req);
/* Request unsuccessfully executed by hardware */
if (ret < 0) {
@ -177,18 +174,6 @@ start_request:
ret);
goto req_err_1;
}
/*
* If retry mechanism is supported,
* unprepare current request and
* enqueue it back into crypto-engine queue.
*/
if (enginectx->op.unprepare_request) {
ret = enginectx->op.unprepare_request(engine,
async_req);
if (ret)
dev_err(engine->dev,
"failed to unprepare request\n");
}
spin_lock_irqsave(&engine->queue_lock, flags);
/*
* If hardware was unable to execute request, enqueue it
@ -204,13 +189,6 @@ start_request:
goto retry;
req_err_1:
if (enginectx->op.unprepare_request) {
ret = enginectx->op.unprepare_request(engine, async_req);
if (ret)
dev_err(engine->dev, "failed to unprepare request\n");
}
req_err_2:
crypto_request_complete(async_req, ret);
retry:
@ -591,5 +569,177 @@ int crypto_engine_exit(struct crypto_engine *engine)
}
EXPORT_SYMBOL_GPL(crypto_engine_exit);
int crypto_engine_register_aead(struct aead_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
return crypto_register_aead(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_aead);
void crypto_engine_unregister_aead(struct aead_engine_alg *alg)
{
crypto_unregister_aead(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_unregister_aead);
int crypto_engine_register_aeads(struct aead_engine_alg *algs, int count)
{
int i, ret;
for (i = 0; i < count; i++) {
ret = crypto_engine_register_aead(&algs[i]);
if (ret)
goto err;
}
return 0;
err:
crypto_engine_unregister_aeads(algs, i);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_engine_register_aeads);
void crypto_engine_unregister_aeads(struct aead_engine_alg *algs, int count)
{
int i;
for (i = count - 1; i >= 0; --i)
crypto_engine_unregister_aead(&algs[i]);
}
EXPORT_SYMBOL_GPL(crypto_engine_unregister_aeads);
int crypto_engine_register_ahash(struct ahash_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
alg->base.halg.base.cra_flags |= CRYPTO_ALG_ENGINE;
return crypto_register_ahash(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_ahash);
void crypto_engine_unregister_ahash(struct ahash_engine_alg *alg)
{
crypto_unregister_ahash(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahash);
int crypto_engine_register_ahashes(struct ahash_engine_alg *algs, int count)
{
int i, ret;
for (i = 0; i < count; i++) {
ret = crypto_engine_register_ahash(&algs[i]);
if (ret)
goto err;
}
return 0;
err:
crypto_engine_unregister_ahashes(algs, i);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_engine_register_ahashes);
void crypto_engine_unregister_ahashes(struct ahash_engine_alg *algs,
int count)
{
int i;
for (i = count - 1; i >= 0; --i)
crypto_engine_unregister_ahash(&algs[i]);
}
EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahashes);
int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
return crypto_register_akcipher(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_akcipher);
void crypto_engine_unregister_akcipher(struct akcipher_engine_alg *alg)
{
crypto_unregister_akcipher(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_unregister_akcipher);
int crypto_engine_register_kpp(struct kpp_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
return crypto_register_kpp(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_kpp);
void crypto_engine_unregister_kpp(struct kpp_engine_alg *alg)
{
crypto_unregister_kpp(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_unregister_kpp);
int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
return crypto_register_skcipher(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_skcipher);
void crypto_engine_unregister_skcipher(struct skcipher_engine_alg *alg)
{
return crypto_unregister_skcipher(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_unregister_skcipher);
int crypto_engine_register_skciphers(struct skcipher_engine_alg *algs,
int count)
{
int i, ret;
for (i = 0; i < count; i++) {
ret = crypto_engine_register_skcipher(&algs[i]);
if (ret)
goto err;
}
return 0;
err:
crypto_engine_unregister_skciphers(algs, i);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_engine_register_skciphers);
void crypto_engine_unregister_skciphers(struct skcipher_engine_alg *algs,
int count)
{
int i;
for (i = count - 1; i >= 0; --i)
crypto_engine_unregister_skcipher(&algs[i]);
}
EXPORT_SYMBOL_GPL(crypto_engine_unregister_skciphers);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Crypto hardware engine framework");

View File

@ -89,10 +89,14 @@ struct rand_data {
unsigned int rct_count; /* Number of stuck values */
/* Intermittent health test failure threshold of 2^-30 */
#define JENT_RCT_CUTOFF 30 /* Taken from SP800-90B sec 4.4.1 */
#define JENT_APT_CUTOFF 325 /* Taken from SP800-90B sec 4.4.2 */
/* From an SP800-90B perspective, this RCT cutoff value is equal to 31. */
/* However, our RCT implementation starts at 1, so we subtract 1 here. */
#define JENT_RCT_CUTOFF (31 - 1) /* Taken from SP800-90B sec 4.4.1 */
#define JENT_APT_CUTOFF 325 /* Taken from SP800-90B sec 4.4.2 */
/* Permanent health test failure threshold of 2^-60 */
#define JENT_RCT_CUTOFF_PERMANENT 60
/* From an SP800-90B perspective, this RCT cutoff value is equal to 61. */
/* However, our RCT implementation starts at 1, so we subtract 1 here. */
#define JENT_RCT_CUTOFF_PERMANENT (61 - 1)
#define JENT_APT_CUTOFF_PERMANENT 355
#define JENT_APT_WINDOW_SIZE 512 /* Data window size */
/* LSB of time stamp to process */

View File

@ -357,10 +357,10 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
* cipher name.
*/
if (!strncmp(cipher_name, "ecb(", 4)) {
unsigned len;
int len;
len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
if (len < 2 || len >= sizeof(ecb_name))
len = strscpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
if (len < 2)
goto err_free_inst;
if (ecb_name[len - 1] != ')')

View File

@ -21,11 +21,6 @@
static const struct crypto_type crypto_sig_type;
static inline struct crypto_sig *__crypto_sig_tfm(struct crypto_tfm *tfm)
{
return container_of(tfm, struct crypto_sig, base);
}
static int crypto_sig_init_tfm(struct crypto_tfm *tfm)
{
if (tfm->__crt_alg->cra_type != &crypto_sig_type)

View File

@ -396,10 +396,10 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
* cipher name.
*/
if (!strncmp(cipher_name, "ecb(", 4)) {
unsigned len;
int len;
len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
if (len < 2 || len >= sizeof(ctx->name))
len = strscpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
if (len < 2)
goto err_free_inst;
if (ctx->name[len - 1] != ')')

View File

@ -37,7 +37,7 @@ config HW_RANDOM_TIMERIOMEM
config HW_RANDOM_INTEL
tristate "Intel HW Random Number Generator support"
depends on (X86 || IA64) && PCI
depends on (X86 || IA64 || COMPILE_TEST) && PCI
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@ -50,7 +50,8 @@ config HW_RANDOM_INTEL
config HW_RANDOM_AMD
tristate "AMD HW Random Number Generator support"
depends on (X86 || PPC_MAPLE) && PCI
depends on (X86 || PPC_MAPLE || COMPILE_TEST)
depends on PCI && HAS_IOPORT_MAP
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@ -63,7 +64,7 @@ config HW_RANDOM_AMD
config HW_RANDOM_ATMEL
tristate "Atmel Random Number Generator support"
depends on (ARCH_AT91 || COMPILE_TEST) && HAVE_CLK && OF
depends on (ARCH_AT91 || COMPILE_TEST)
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@ -113,7 +114,8 @@ config HW_RANDOM_IPROC_RNG200
config HW_RANDOM_GEODE
tristate "AMD Geode HW Random Number Generator support"
depends on X86_32 && PCI
depends on (X86_32 || COMPILE_TEST)
depends on PCI
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@ -205,7 +207,7 @@ config HW_RANDOM_OCTEON
config HW_RANDOM_PASEMI
tristate "PA Semi HW Random Number Generator support"
depends on PPC_PASEMI
depends on PPC_PASEMI || (PPC && COMPILE_TEST)
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@ -228,7 +230,7 @@ config HW_RANDOM_VIRTIO
config HW_RANDOM_MXC_RNGA
tristate "Freescale i.MX RNGA Random Number Generator"
depends on SOC_IMX31
depends on SOC_IMX31 || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@ -241,7 +243,7 @@ config HW_RANDOM_MXC_RNGA
config HW_RANDOM_IMX_RNGC
tristate "Freescale i.MX RNGC Random Number Generator"
depends on HAS_IOMEM && HAVE_CLK
depends on HAS_IOMEM
depends on SOC_IMX25 || SOC_IMX6SL || SOC_IMX6SLL || SOC_IMX6UL || COMPILE_TEST
default HW_RANDOM
help
@ -256,8 +258,7 @@ config HW_RANDOM_IMX_RNGC
config HW_RANDOM_INGENIC_RNG
tristate "Ingenic Random Number Generator support"
depends on HW_RANDOM
depends on MACH_JZ4780 || MACH_X1000
depends on MACH_JZ4780 || MACH_X1000 || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number Generator
@ -271,8 +272,7 @@ config HW_RANDOM_INGENIC_RNG
config HW_RANDOM_INGENIC_TRNG
tristate "Ingenic True Random Number Generator support"
depends on HW_RANDOM
depends on MACH_X1830
depends on MACH_X1830 || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the True Random Number Generator
@ -324,7 +324,7 @@ config HW_RANDOM_POWERNV
config HW_RANDOM_HISI
tristate "Hisilicon Random Number Generator support"
depends on HW_RANDOM && ARCH_HISI
depends on ARCH_HISI || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@ -348,7 +348,7 @@ config HW_RANDOM_HISTB
config HW_RANDOM_ST
tristate "ST Microelectronics HW Random Number Generator support"
depends on HW_RANDOM && (ARCH_STI || COMPILE_TEST)
depends on ARCH_STI || COMPILE_TEST
help
This driver provides kernel-side support for the Random Number
Generator hardware found on STi series of SoCs.
@ -358,7 +358,7 @@ config HW_RANDOM_ST
config HW_RANDOM_XGENE
tristate "APM X-Gene True Random Number Generator (TRNG) support"
depends on HW_RANDOM && ARCH_XGENE
depends on ARCH_XGENE || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@ -371,7 +371,7 @@ config HW_RANDOM_XGENE
config HW_RANDOM_STM32
tristate "STMicroelectronics STM32 random number generator"
depends on HW_RANDOM && (ARCH_STM32 || COMPILE_TEST)
depends on ARCH_STM32 || COMPILE_TEST
depends on HAS_IOMEM
default HW_RANDOM
help
@ -385,8 +385,8 @@ config HW_RANDOM_STM32
config HW_RANDOM_PIC32
tristate "Microchip PIC32 Random Number Generator support"
depends on HW_RANDOM && MACH_PIC32
default y
depends on MACH_PIC32 || COMPILE_TEST
default HW_RANDOM if MACH_PIC32
help
This driver provides kernel-side support for the Random Number
Generator hardware found on a PIC32.
@ -425,7 +425,8 @@ config HW_RANDOM_MESON
config HW_RANDOM_CAVIUM
tristate "Cavium ThunderX Random Number Generator support"
depends on HW_RANDOM && PCI && ARCH_THUNDER
depends on PCI
depends on ARCH_THUNDER || (ARM64 && COMPILE_TEST)
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number

View File

@ -26,6 +26,7 @@
#include <linux/delay.h>
#include <linux/hw_random.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>

View File

@ -105,8 +105,6 @@ static int smccc_trng_probe(struct platform_device *pdev)
trng->name = "smccc_trng";
trng->read = smccc_trng_read;
platform_set_drvdata(pdev, trng);
return devm_hwrng_register(&pdev->dev, trng);
}

View File

@ -15,7 +15,7 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/hw_random.h>
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>

View File

@ -189,13 +189,9 @@ static int ba431_trng_probe(struct platform_device *pdev)
ba431->rng.cleanup = ba431_trng_cleanup;
ba431->rng.read = ba431_trng_read;
platform_set_drvdata(pdev, ba431);
ret = devm_hwrng_register(&pdev->dev, &ba431->rng);
if (ret) {
dev_err(&pdev->dev, "BA431 registration failed (%d)\n", ret);
return ret;
}
if (ret)
return dev_err_probe(&pdev->dev, ret, "BA431 registration failed\n");
dev_info(&pdev->dev, "BA431 TRNG registered\n");
@ -203,7 +199,7 @@ static int ba431_trng_probe(struct platform_device *pdev)
}
static const struct of_device_id ba431_trng_dt_ids[] = {
{ .compatible = "silex-insight,ba431-rng", .data = NULL },
{ .compatible = "silex-insight,ba431-rng" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ba431_trng_dt_ids);

View File

@ -8,8 +8,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/clk.h>

View File

@ -455,35 +455,6 @@ static void cc_trng_startwork_handler(struct work_struct *w)
cc_trng_hw_trigger(drvdata);
}
static int cc_trng_clk_init(struct cctrng_drvdata *drvdata)
{
struct clk *clk;
struct device *dev = &(drvdata->pdev->dev);
int rc = 0;
clk = devm_clk_get_optional(dev, NULL);
if (IS_ERR(clk))
return dev_err_probe(dev, PTR_ERR(clk),
"Error getting clock\n");
drvdata->clk = clk;
rc = clk_prepare_enable(drvdata->clk);
if (rc) {
dev_err(dev, "Failed to enable clock\n");
return rc;
}
return 0;
}
static void cc_trng_clk_fini(struct cctrng_drvdata *drvdata)
{
clk_disable_unprepare(drvdata->clk);
}
static int cctrng_probe(struct platform_device *pdev)
{
struct cctrng_drvdata *drvdata;
@ -492,6 +463,10 @@ static int cctrng_probe(struct platform_device *pdev)
u32 val;
int irq;
/* Compile time assertion checks */
BUILD_BUG_ON(CCTRNG_DATA_BUF_WORDS < 6);
BUILD_BUG_ON((CCTRNG_DATA_BUF_WORDS & (CCTRNG_DATA_BUF_WORDS-1)) != 0);
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
@ -510,10 +485,8 @@ static int cctrng_probe(struct platform_device *pdev)
drvdata->circ.buf = (char *)drvdata->data_buf;
drvdata->cc_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(drvdata->cc_base)) {
dev_err(dev, "Failed to ioremap registers");
return PTR_ERR(drvdata->cc_base);
}
if (IS_ERR(drvdata->cc_base))
return dev_err_probe(dev, PTR_ERR(drvdata->cc_base), "Failed to ioremap registers");
/* Then IRQ */
irq = platform_get_irq(pdev, 0);
@ -522,16 +495,13 @@ static int cctrng_probe(struct platform_device *pdev)
/* parse sampling rate from device tree */
rc = cc_trng_parse_sampling_ratio(drvdata);
if (rc) {
dev_err(dev, "Failed to get legal sampling ratio for rosc\n");
return rc;
}
if (rc)
return dev_err_probe(dev, rc, "Failed to get legal sampling ratio for rosc\n");
rc = cc_trng_clk_init(drvdata);
if (rc) {
dev_err(dev, "cc_trng_clk_init failed\n");
return rc;
}
drvdata->clk = devm_clk_get_optional_enabled(dev, NULL);
if (IS_ERR(drvdata->clk))
return dev_err_probe(dev, PTR_ERR(drvdata->clk),
"Failed to get or enable the clock\n");
INIT_WORK(&drvdata->compwork, cc_trng_compwork_handler);
INIT_WORK(&drvdata->startwork, cc_trng_startwork_handler);
@ -539,10 +509,8 @@ static int cctrng_probe(struct platform_device *pdev)
/* register the driver isr function */
rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "cctrng", drvdata);
if (rc) {
dev_err(dev, "Could not register to interrupt %d\n", irq);
goto post_clk_err;
}
if (rc)
return dev_err_probe(dev, rc, "Could not register to interrupt %d\n", irq);
dev_dbg(dev, "Registered to IRQ: %d\n", irq);
/* Clear all pending interrupts */
@ -557,17 +525,13 @@ static int cctrng_probe(struct platform_device *pdev)
/* init PM */
rc = cc_trng_pm_init(drvdata);
if (rc) {
dev_err(dev, "cc_trng_pm_init failed\n");
goto post_clk_err;
}
if (rc)
return dev_err_probe(dev, rc, "cc_trng_pm_init failed\n");
/* increment device's usage counter */
rc = cc_trng_pm_get(dev);
if (rc) {
dev_err(dev, "cc_trng_pm_get returned %x\n", rc);
goto post_pm_err;
}
if (rc)
return dev_err_probe(dev, rc, "cc_trng_pm_get returned %x\n", rc);
/* set pending_hw to verify that HW won't be triggered from read */
atomic_set(&drvdata->pending_hw, 1);
@ -593,9 +557,6 @@ static int cctrng_probe(struct platform_device *pdev)
post_pm_err:
cc_trng_pm_fini(drvdata);
post_clk_err:
cc_trng_clk_fini(drvdata);
return rc;
}
@ -608,8 +569,6 @@ static int cctrng_remove(struct platform_device *pdev)
cc_trng_pm_fini(drvdata);
cc_trng_clk_fini(drvdata);
dev_info(dev, "ARM cctrng device terminated\n");
return 0;
@ -698,21 +657,7 @@ static struct platform_driver cctrng_driver = {
.remove = cctrng_remove,
};
static int __init cctrng_mod_init(void)
{
/* Compile time assertion checks */
BUILD_BUG_ON(CCTRNG_DATA_BUF_WORDS < 6);
BUILD_BUG_ON((CCTRNG_DATA_BUF_WORDS & (CCTRNG_DATA_BUF_WORDS-1)) != 0);
return platform_driver_register(&cctrng_driver);
}
module_init(cctrng_mod_init);
static void __exit cctrng_mod_exit(void)
{
platform_driver_unregister(&cctrng_driver);
}
module_exit(cctrng_mod_exit);
module_platform_driver(cctrng_driver);
/* Module description */
MODULE_DESCRIPTION("ARM CryptoCell TRNG Driver");

View File

@ -187,10 +187,8 @@ static int cn10k_rng_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, rng);
rng->reg_base = pcim_iomap(pdev, 0, 0);
if (!rng->reg_base) {
dev_err(&pdev->dev, "Error while mapping CSRs, exiting\n");
return -ENOMEM;
}
if (!rng->reg_base)
return dev_err_probe(&pdev->dev, -ENOMEM, "Error while mapping CSRs, exiting\n");
rng->ops.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"cn10k-rng-%s", dev_name(&pdev->dev));
@ -205,19 +203,12 @@ static int cn10k_rng_probe(struct pci_dev *pdev, const struct pci_device_id *id)
reset_rng_health_state(rng);
err = devm_hwrng_register(&pdev->dev, &rng->ops);
if (err) {
dev_err(&pdev->dev, "Could not register hwrng device.\n");
return err;
}
if (err)
return dev_err_probe(&pdev->dev, err, "Could not register hwrng device.\n");
return 0;
}
static void cn10k_rng_remove(struct pci_dev *pdev)
{
/* Nothing to do */
}
static const struct pci_device_id cn10k_rng_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xA098) }, /* RNG PF */
{0,},
@ -229,7 +220,6 @@ static struct pci_driver cn10k_rng_driver = {
.name = "cn10k_rng",
.id_table = cn10k_rng_id_table,
.probe = cn10k_rng_probe,
.remove = cn10k_rng_remove,
};
module_pci_driver(cn10k_rng_driver);

View File

@ -15,14 +15,13 @@
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/hw_random.h>
#include <linux/random.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/sched/signal.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/uaccess.h>

View File

@ -185,14 +185,14 @@ static int exynos_trng_remove(struct platform_device *pdev)
return 0;
}
static int __maybe_unused exynos_trng_suspend(struct device *dev)
static int exynos_trng_suspend(struct device *dev)
{
pm_runtime_put_sync(dev);
return 0;
}
static int __maybe_unused exynos_trng_resume(struct device *dev)
static int exynos_trng_resume(struct device *dev)
{
int ret;
@ -205,7 +205,7 @@ static int __maybe_unused exynos_trng_resume(struct device *dev)
return 0;
}
static SIMPLE_DEV_PM_OPS(exynos_trng_pm_ops, exynos_trng_suspend,
static DEFINE_SIMPLE_DEV_PM_OPS(exynos_trng_pm_ops, exynos_trng_suspend,
exynos_trng_resume);
static const struct of_device_id exynos_trng_dt_match[] = {
@ -219,7 +219,7 @@ MODULE_DEVICE_TABLE(of, exynos_trng_dt_match);
static struct platform_driver exynos_trng_driver = {
.driver = {
.name = "exynos-trng",
.pm = &exynos_trng_pm_ops,
.pm = pm_sleep_ptr(&exynos_trng_pm_ops),
.of_match_table = exynos_trng_dt_match,
},
.probe = exynos_trng_probe,

View File

@ -239,10 +239,8 @@ static int __init imx_rngc_probe(struct platform_device *pdev)
return PTR_ERR(rngc->base);
rngc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(rngc->clk)) {
dev_err(&pdev->dev, "Can not get rng_clk\n");
return PTR_ERR(rngc->clk);
}
if (IS_ERR(rngc->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(rngc->clk), "Cannot get rng_clk\n");
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@ -272,24 +270,18 @@ static int __init imx_rngc_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev,
irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
if (ret) {
dev_err(rngc->dev, "Can't get interrupt working.\n");
return ret;
}
if (ret)
return dev_err_probe(&pdev->dev, ret, "Can't get interrupt working.\n");
if (self_test) {
ret = imx_rngc_self_test(rngc);
if (ret) {
dev_err(rngc->dev, "self test failed\n");
return ret;
}
if (ret)
return dev_err_probe(&pdev->dev, ret, "self test failed\n");
}
ret = devm_hwrng_register(&pdev->dev, &rngc->rng);
if (ret) {
dev_err(&pdev->dev, "hwrng registration failed\n");
return ret;
}
if (ret)
return dev_err_probe(&pdev->dev, ret, "hwrng registration failed\n");
dev_info(&pdev->dev,
"Freescale RNG%c registered (HW revision %d.%02d)\n",

View File

@ -95,7 +95,7 @@ static int ingenic_rng_probe(struct platform_device *pdev)
return PTR_ERR(priv->base);
}
priv->version = (enum ingenic_rng_version)of_device_get_match_data(&pdev->dev);
priv->version = (enum ingenic_rng_version)(uintptr_t)of_device_get_match_data(&pdev->dev);
priv->rng.name = pdev->name;
priv->rng.init = ingenic_rng_init;

View File

@ -11,8 +11,8 @@
#include <linux/hw_random.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@ -22,8 +22,6 @@
#define TRNG_REG_STATUS_OFFSET 0x08
/* bits within the CFG register */
#define CFG_RDY_CLR BIT(12)
#define CFG_INT_MASK BIT(11)
#define CFG_GEN_EN BIT(0)
/* bits within the STATUS register */
@ -31,7 +29,6 @@
struct ingenic_trng {
void __iomem *base;
struct clk *clk;
struct hwrng rng;
};
@ -79,6 +76,7 @@ static int ingenic_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait
static int ingenic_trng_probe(struct platform_device *pdev)
{
struct ingenic_trng *trng;
struct clk *clk;
int ret;
trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
@ -86,60 +84,28 @@ static int ingenic_trng_probe(struct platform_device *pdev)
return -ENOMEM;
trng->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(trng->base)) {
pr_err("%s: Failed to map DTRNG registers\n", __func__);
ret = PTR_ERR(trng->base);
return PTR_ERR(trng->base);
}
if (IS_ERR(trng->base))
return dev_err_probe(&pdev->dev, PTR_ERR(trng->base),
"%s: Failed to map DTRNG registers\n", __func__);
trng->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(trng->clk)) {
ret = PTR_ERR(trng->clk);
pr_crit("%s: Cannot get DTRNG clock\n", __func__);
return PTR_ERR(trng->clk);
}
ret = clk_prepare_enable(trng->clk);
if (ret) {
pr_crit("%s: Unable to enable DTRNG clock\n", __func__);
return ret;
}
clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk))
return dev_err_probe(&pdev->dev, PTR_ERR(clk),
"%s: Cannot get and enable DTRNG clock\n", __func__);
trng->rng.name = pdev->name;
trng->rng.init = ingenic_trng_init;
trng->rng.cleanup = ingenic_trng_cleanup;
trng->rng.read = ingenic_trng_read;
ret = hwrng_register(&trng->rng);
if (ret) {
dev_err(&pdev->dev, "Failed to register hwrng\n");
goto err_unprepare_clk;
}
ret = devm_hwrng_register(&pdev->dev, &trng->rng);
if (ret)
return dev_err_probe(&pdev->dev, ret, "Failed to register hwrng\n");
platform_set_drvdata(pdev, trng);
dev_info(&pdev->dev, "Ingenic DTRNG driver registered\n");
return 0;
err_unprepare_clk:
clk_disable_unprepare(trng->clk);
return ret;
}
static int ingenic_trng_remove(struct platform_device *pdev)
{
struct ingenic_trng *trng = platform_get_drvdata(pdev);
unsigned int ctrl;
hwrng_unregister(&trng->rng);
ctrl = readl(trng->base + TRNG_REG_CFG_OFFSET);
ctrl &= ~CFG_GEN_EN;
writel(ctrl, trng->base + TRNG_REG_CFG_OFFSET);
clk_disable_unprepare(trng->clk);
return 0;
}
static const struct of_device_id ingenic_trng_of_match[] = {
@ -150,7 +116,6 @@ MODULE_DEVICE_TABLE(of, ingenic_trng_of_match);
static struct platform_driver ingenic_trng_driver = {
.probe = ingenic_trng_probe,
.remove = ingenic_trng_remove,
.driver = {
.name = "ingenic-trng",
.of_match_table = ingenic_trng_of_match,

View File

@ -12,8 +12,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
@ -182,6 +181,8 @@ static int iproc_rng200_probe(struct platform_device *pdev)
return PTR_ERR(priv->base);
}
dev_set_drvdata(dev, priv);
priv->rng.name = "iproc-rng200";
priv->rng.read = iproc_rng200_read;
priv->rng.init = iproc_rng200_init;
@ -199,6 +200,28 @@ static int iproc_rng200_probe(struct platform_device *pdev)
return 0;
}
static int __maybe_unused iproc_rng200_suspend(struct device *dev)
{
struct iproc_rng200_dev *priv = dev_get_drvdata(dev);
iproc_rng200_cleanup(&priv->rng);
return 0;
}
static int __maybe_unused iproc_rng200_resume(struct device *dev)
{
struct iproc_rng200_dev *priv = dev_get_drvdata(dev);
iproc_rng200_init(&priv->rng);
return 0;
}
static const struct dev_pm_ops iproc_rng200_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(iproc_rng200_suspend, iproc_rng200_resume)
};
static const struct of_device_id iproc_rng200_of_match[] = {
{ .compatible = "brcm,bcm2711-rng200", },
{ .compatible = "brcm,bcm7211-rng200", },
@ -212,6 +235,7 @@ static struct platform_driver iproc_rng200_driver = {
.driver = {
.name = "iproc-rng200",
.of_match_table = iproc_rng200_of_match,
.pm = &iproc_rng200_pm_ops,
},
.probe = iproc_rng200_probe,
};

View File

@ -13,8 +13,6 @@
#include <linux/clk.h>
#include <linux/err.h>
static struct clk *rng_clk;
static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
void __iomem *base = (void __iomem *)rng->priv;
@ -36,21 +34,17 @@ static struct hwrng nmk_rng = {
static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
{
struct clk *rng_clk;
void __iomem *base;
int ret;
rng_clk = devm_clk_get(&dev->dev, NULL);
if (IS_ERR(rng_clk)) {
dev_err(&dev->dev, "could not get rng clock\n");
ret = PTR_ERR(rng_clk);
return ret;
}
clk_prepare_enable(rng_clk);
rng_clk = devm_clk_get_enabled(&dev->dev, NULL);
if (IS_ERR(rng_clk))
return dev_err_probe(&dev->dev, PTR_ERR(rng_clk), "could not get rng clock\n");
ret = amba_request_regions(dev, dev->dev.init_name);
if (ret)
goto out_clk;
return ret;
ret = -ENOMEM;
base = devm_ioremap(&dev->dev, dev->res.start,
resource_size(&dev->res));
@ -64,15 +58,12 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
out_release:
amba_release_regions(dev);
out_clk:
clk_disable_unprepare(rng_clk);
return ret;
}
static void nmk_rng_remove(struct amba_device *dev)
{
amba_release_regions(dev);
clk_disable_unprepare(rng_clk);
}
static const struct amba_id nmk_rng_ids[] = {

View File

@ -8,12 +8,11 @@
#include <linux/init.h>
#include <linux/random.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/hw_random.h>
#include <linux/delay.h>
#include <linux/of_irq.h>
#include <linux/pm_runtime.h>
#include <linux/of_device.h>
#define NPCM_RNGCS_REG 0x00 /* Control and status register */
#define NPCM_RNGD_REG 0x04 /* Data register */

View File

@ -26,8 +26,6 @@
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
#include <linux/io.h>

View File

@ -20,7 +20,6 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>

View File

@ -9,11 +9,10 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/hw_random.h>
#include <linux/delay.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/io.h>
#define SDCRNG_CTL_REG 0x00

View File

@ -12,31 +12,22 @@
#include <linux/hw_random.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#define RNGCON 0x04
#define TRNGEN BIT(8)
#define PRNGEN BIT(9)
#define PRNGCONT BIT(10)
#define TRNGMOD BIT(11)
#define SEEDLOAD BIT(12)
#define RNGPOLY1 0x08
#define RNGPOLY2 0x0C
#define RNGNUMGEN1 0x10
#define RNGNUMGEN2 0x14
#define TRNGEN BIT(8)
#define TRNGMOD BIT(11)
#define RNGSEED1 0x18
#define RNGSEED2 0x1C
#define RNGRCNT 0x20
#define RCNT_MASK 0x7F
#define RCNT_MASK 0x7F
struct pic32_rng {
void __iomem *base;
struct hwrng rng;
struct clk *clk;
};
/*
@ -46,6 +37,15 @@ struct pic32_rng {
*/
#define RNG_TIMEOUT 500
static int pic32_rng_init(struct hwrng *rng)
{
struct pic32_rng *priv = container_of(rng, struct pic32_rng, rng);
/* enable TRNG in enhanced mode */
writel(TRNGEN | TRNGMOD, priv->base + RNGCON);
return 0;
}
static int pic32_rng_read(struct hwrng *rng, void *buf, size_t max,
bool wait)
{
@ -67,11 +67,17 @@ static int pic32_rng_read(struct hwrng *rng, void *buf, size_t max,
return -EIO;
}
static void pic32_rng_cleanup(struct hwrng *rng)
{
struct pic32_rng *priv = container_of(rng, struct pic32_rng, rng);
writel(0, priv->base + RNGCON);
}
static int pic32_rng_probe(struct platform_device *pdev)
{
struct pic32_rng *priv;
u32 v;
int ret;
struct clk *clk;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@ -81,41 +87,16 @@ static int pic32_rng_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
ret = clk_prepare_enable(priv->clk);
if (ret)
return ret;
/* enable TRNG in enhanced mode */
v = TRNGEN | TRNGMOD;
writel(v, priv->base + RNGCON);
clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
priv->rng.name = pdev->name;
priv->rng.init = pic32_rng_init;
priv->rng.read = pic32_rng_read;
priv->rng.cleanup = pic32_rng_cleanup;
ret = devm_hwrng_register(&pdev->dev, &priv->rng);
if (ret)
goto err_register;
platform_set_drvdata(pdev, priv);
return 0;
err_register:
clk_disable_unprepare(priv->clk);
return ret;
}
static int pic32_rng_remove(struct platform_device *pdev)
{
struct pic32_rng *rng = platform_get_drvdata(pdev);
writel(0, rng->base + RNGCON);
clk_disable_unprepare(rng->clk);
return 0;
return devm_hwrng_register(&pdev->dev, &priv->rng);
}
static const struct of_device_id pic32_rng_of_match[] __maybe_unused = {
@ -126,10 +107,9 @@ MODULE_DEVICE_TABLE(of, pic32_rng_of_match);
static struct platform_driver pic32_rng_driver = {
.probe = pic32_rng_probe,
.remove = pic32_rng_remove,
.driver = {
.name = "pic32-rng",
.of_match_table = of_match_ptr(pic32_rng_of_match),
.of_match_table = pic32_rng_of_match,
},
};

View File

@ -10,8 +10,9 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>

View File

@ -113,16 +113,6 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
return -EINVAL;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENXIO;
if (res->start % 4 != 0 || resource_size(res) < 4) {
dev_err(&pdev->dev,
"address must be at least four bytes wide and 32-bit aligned\n");
return -EINVAL;
}
/* Allocate memory for the device structure (and zero it) */
priv = devm_kzalloc(&pdev->dev,
sizeof(struct timeriomem_rng_private), GFP_KERNEL);
@ -131,6 +121,16 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
priv->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->io_base))
return PTR_ERR(priv->io_base);
if (res->start % 4 != 0 || resource_size(res) < 4) {
dev_err(&pdev->dev,
"address must be at least four bytes wide and 32-bit aligned\n");
return -EINVAL;
}
if (pdev->dev.of_node) {
int i;
@ -158,11 +158,6 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
priv->rng_ops.name = dev_name(&pdev->dev);
priv->rng_ops.read = timeriomem_rng_read;
priv->io_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->io_base)) {
return PTR_ERR(priv->io_base);
}
/* Assume random data is already available. */
priv->present = 1;
complete(&priv->completion);

View File

@ -14,10 +14,10 @@
#include <linux/hw_random.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/timer.h>
#define RNG_MAX_DATUM 4

View File

@ -7,7 +7,6 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/hw_random.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/delay.h>

View File

@ -14,7 +14,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <crypto/scatterwalk.h>
#include <linux/scatterlist.h>

View File

@ -29,7 +29,7 @@ static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
struct sun8i_ce_alg_template *algt;
unsigned int todo, len;
algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base);
if (sg_nents_for_len(areq->src, areq->cryptlen) > MAX_SG ||
sg_nents_for_len(areq->dst, areq->cryptlen) > MAX_SG) {
@ -92,13 +92,18 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
int err;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct sun8i_ce_alg_template *algt;
algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
algt->stat_fb++;
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct sun8i_ce_alg_template *algt __maybe_unused;
algt = container_of(alg, struct sun8i_ce_alg_template,
alg.skcipher.base);
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
algt->stat_fb++;
#endif
}
skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
@ -133,7 +138,7 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
int ns = sg_nents_for_len(areq->src, areq->cryptlen);
int nd = sg_nents_for_len(areq->dst, areq->cryptlen);
algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base);
dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
crypto_tfm_alg_name(areq->base.tfm),
@ -294,7 +299,7 @@ theend:
return err;
}
static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
{
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
@ -308,10 +313,10 @@ static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
local_bh_disable();
crypto_finalize_skcipher_request(engine, breq, err);
local_bh_enable();
return 0;
}
static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_req)
static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
void *async_req)
{
struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
@ -353,7 +358,17 @@ static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_r
}
dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
}
int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
{
int err = sun8i_ce_cipher_prepare(engine, areq);
if (err)
return err;
sun8i_ce_cipher_run(engine, areq);
sun8i_ce_cipher_unprepare(engine, areq);
return 0;
}
@ -406,7 +421,7 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base);
op->ce = algt->ce;
op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
@ -423,10 +438,6 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)),
CRYPTO_MAX_ALG_NAME);
op->enginectx.op.do_one_request = sun8i_ce_cipher_run;
op->enginectx.op.prepare_request = sun8i_ce_cipher_prepare;
op->enginectx.op.unprepare_request = sun8i_ce_cipher_unprepare;
err = pm_runtime_get_sync(op->ce->dev);
if (err < 0)
goto error_pm;

View File

@ -9,21 +9,24 @@
*
* You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst
*/
#include <crypto/engine.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/rng.h>
#include <crypto/internal/skcipher.h>
#include <linux/clk.h>
#include <linux/crypto.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <crypto/internal/rng.h>
#include <crypto/internal/skcipher.h>
#include "sun8i-ce.h"
@ -277,7 +280,7 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.ce_algo_id = CE_ID_CIPHER_AES,
.ce_blockmode = CE_ID_OP_CBC,
.alg.skcipher = {
.alg.skcipher.base = {
.base = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-sun8i-ce",
@ -298,13 +301,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.setkey = sun8i_ce_aes_setkey,
.encrypt = sun8i_ce_skencrypt,
.decrypt = sun8i_ce_skdecrypt,
}
},
.alg.skcipher.op = {
.do_one_request = sun8i_ce_cipher_do_one,
},
},
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.ce_algo_id = CE_ID_CIPHER_AES,
.ce_blockmode = CE_ID_OP_ECB,
.alg.skcipher = {
.alg.skcipher.base = {
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-sun8i-ce",
@ -324,13 +330,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.setkey = sun8i_ce_aes_setkey,
.encrypt = sun8i_ce_skencrypt,
.decrypt = sun8i_ce_skdecrypt,
}
},
.alg.skcipher.op = {
.do_one_request = sun8i_ce_cipher_do_one,
},
},
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.ce_algo_id = CE_ID_CIPHER_DES3,
.ce_blockmode = CE_ID_OP_CBC,
.alg.skcipher = {
.alg.skcipher.base = {
.base = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "cbc-des3-sun8i-ce",
@ -351,13 +360,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.setkey = sun8i_ce_des3_setkey,
.encrypt = sun8i_ce_skencrypt,
.decrypt = sun8i_ce_skdecrypt,
}
},
.alg.skcipher.op = {
.do_one_request = sun8i_ce_cipher_do_one,
},
},
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.ce_algo_id = CE_ID_CIPHER_DES3,
.ce_blockmode = CE_ID_OP_ECB,
.alg.skcipher = {
.alg.skcipher.base = {
.base = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "ecb-des3-sun8i-ce",
@ -377,12 +389,15 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.setkey = sun8i_ce_des3_setkey,
.encrypt = sun8i_ce_skencrypt,
.decrypt = sun8i_ce_skdecrypt,
}
},
.alg.skcipher.op = {
.do_one_request = sun8i_ce_cipher_do_one,
},
},
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_HASH
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ce_algo_id = CE_ID_HASH_MD5,
.alg.hash = {
.alg.hash.base = {
.init = sun8i_ce_hash_init,
.update = sun8i_ce_hash_update,
.final = sun8i_ce_hash_final,
@ -390,6 +405,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.digest = sun8i_ce_hash_digest,
.export = sun8i_ce_hash_export,
.import = sun8i_ce_hash_import,
.init_tfm = sun8i_ce_hash_init_tfm,
.exit_tfm = sun8i_ce_hash_exit_tfm,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
.statesize = sizeof(struct md5_state),
@ -404,15 +421,17 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_init = sun8i_ce_hash_crainit,
.cra_exit = sun8i_ce_hash_craexit,
}
}
}
},
.alg.hash.op = {
.do_one_request = sun8i_ce_hash_run,
},
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ce_algo_id = CE_ID_HASH_SHA1,
.alg.hash = {
.alg.hash.base = {
.init = sun8i_ce_hash_init,
.update = sun8i_ce_hash_update,
.final = sun8i_ce_hash_final,
@ -420,6 +439,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.digest = sun8i_ce_hash_digest,
.export = sun8i_ce_hash_export,
.import = sun8i_ce_hash_import,
.init_tfm = sun8i_ce_hash_init_tfm,
.exit_tfm = sun8i_ce_hash_exit_tfm,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.statesize = sizeof(struct sha1_state),
@ -434,15 +455,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_init = sun8i_ce_hash_crainit,
.cra_exit = sun8i_ce_hash_craexit,
}
}
}
},
.alg.hash.op = {
.do_one_request = sun8i_ce_hash_run,
},
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ce_algo_id = CE_ID_HASH_SHA224,
.alg.hash = {
.alg.hash.base = {
.init = sun8i_ce_hash_init,
.update = sun8i_ce_hash_update,
.final = sun8i_ce_hash_final,
@ -450,6 +472,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.digest = sun8i_ce_hash_digest,
.export = sun8i_ce_hash_export,
.import = sun8i_ce_hash_import,
.init_tfm = sun8i_ce_hash_init_tfm,
.exit_tfm = sun8i_ce_hash_exit_tfm,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
@ -464,15 +488,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_init = sun8i_ce_hash_crainit,
.cra_exit = sun8i_ce_hash_craexit,
}
}
}
},
.alg.hash.op = {
.do_one_request = sun8i_ce_hash_run,
},
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ce_algo_id = CE_ID_HASH_SHA256,
.alg.hash = {
.alg.hash.base = {
.init = sun8i_ce_hash_init,
.update = sun8i_ce_hash_update,
.final = sun8i_ce_hash_final,
@ -480,6 +505,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.digest = sun8i_ce_hash_digest,
.export = sun8i_ce_hash_export,
.import = sun8i_ce_hash_import,
.init_tfm = sun8i_ce_hash_init_tfm,
.exit_tfm = sun8i_ce_hash_exit_tfm,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
@ -494,15 +521,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_init = sun8i_ce_hash_crainit,
.cra_exit = sun8i_ce_hash_craexit,
}
}
}
},
.alg.hash.op = {
.do_one_request = sun8i_ce_hash_run,
},
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ce_algo_id = CE_ID_HASH_SHA384,
.alg.hash = {
.alg.hash.base = {
.init = sun8i_ce_hash_init,
.update = sun8i_ce_hash_update,
.final = sun8i_ce_hash_final,
@ -510,6 +538,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.digest = sun8i_ce_hash_digest,
.export = sun8i_ce_hash_export,
.import = sun8i_ce_hash_import,
.init_tfm = sun8i_ce_hash_init_tfm,
.exit_tfm = sun8i_ce_hash_exit_tfm,
.halg = {
.digestsize = SHA384_DIGEST_SIZE,
.statesize = sizeof(struct sha512_state),
@ -524,15 +554,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_init = sun8i_ce_hash_crainit,
.cra_exit = sun8i_ce_hash_craexit,
}
}
}
},
.alg.hash.op = {
.do_one_request = sun8i_ce_hash_run,
},
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ce_algo_id = CE_ID_HASH_SHA512,
.alg.hash = {
.alg.hash.base = {
.init = sun8i_ce_hash_init,
.update = sun8i_ce_hash_update,
.final = sun8i_ce_hash_final,
@ -540,6 +571,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.digest = sun8i_ce_hash_digest,
.export = sun8i_ce_hash_export,
.import = sun8i_ce_hash_import,
.init_tfm = sun8i_ce_hash_init_tfm,
.exit_tfm = sun8i_ce_hash_exit_tfm,
.halg = {
.digestsize = SHA512_DIGEST_SIZE,
.statesize = sizeof(struct sha512_state),
@ -554,11 +587,12 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_init = sun8i_ce_hash_crainit,
.cra_exit = sun8i_ce_hash_craexit,
}
}
}
},
.alg.hash.op = {
.do_one_request = sun8i_ce_hash_run,
},
},
#endif
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_PRNG
@ -582,14 +616,18 @@ static struct sun8i_ce_alg_template ce_algs[] = {
#endif
};
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v)
{
struct sun8i_ce_dev *ce = seq->private;
struct sun8i_ce_dev *ce __maybe_unused = seq->private;
unsigned int i;
for (i = 0; i < MAXFLOW; i++)
seq_printf(seq, "Channel %d: nreq %lu\n", i, ce->chanlist[i].stat_req);
seq_printf(seq, "Channel %d: nreq %lu\n", i,
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
ce->chanlist[i].stat_req);
#else
0ul);
#endif
for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
if (!ce_algs[i].ce)
@ -597,8 +635,8 @@ static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v)
switch (ce_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
ce_algs[i].alg.skcipher.base.cra_driver_name,
ce_algs[i].alg.skcipher.base.cra_name,
ce_algs[i].alg.skcipher.base.base.cra_driver_name,
ce_algs[i].alg.skcipher.base.base.cra_name,
ce_algs[i].stat_req, ce_algs[i].stat_fb);
seq_printf(seq, "\tLast fallback is: %s\n",
ce_algs[i].fbname);
@ -621,8 +659,8 @@ static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v)
break;
case CRYPTO_ALG_TYPE_AHASH:
seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
ce_algs[i].alg.hash.halg.base.cra_driver_name,
ce_algs[i].alg.hash.halg.base.cra_name,
ce_algs[i].alg.hash.base.halg.base.cra_driver_name,
ce_algs[i].alg.hash.base.halg.base.cra_name,
ce_algs[i].stat_req, ce_algs[i].stat_fb);
seq_printf(seq, "\tLast fallback is: %s\n",
ce_algs[i].fbname);
@ -643,7 +681,8 @@ static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v)
break;
}
}
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG
#if defined(CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG) && \
defined(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)
seq_printf(seq, "HWRNG %lu %lu\n",
ce->hwrng_stat_req, ce->hwrng_stat_bytes);
#endif
@ -651,7 +690,6 @@ static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v)
}
DEFINE_SHOW_ATTRIBUTE(sun8i_ce_debugfs);
#endif
static void sun8i_ce_free_chanlist(struct sun8i_ce_dev *ce, int i)
{
@ -839,7 +877,7 @@ static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce)
if (ce_method == CE_ID_NOTSUPP) {
dev_dbg(ce->dev,
"DEBUG: Algo of %s not supported\n",
ce_algs[i].alg.skcipher.base.cra_name);
ce_algs[i].alg.skcipher.base.base.cra_name);
ce_algs[i].ce = NULL;
break;
}
@ -847,16 +885,16 @@ static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce)
ce_method = ce->variant->op_mode[id];
if (ce_method == CE_ID_NOTSUPP) {
dev_dbg(ce->dev, "DEBUG: Blockmode of %s not supported\n",
ce_algs[i].alg.skcipher.base.cra_name);
ce_algs[i].alg.skcipher.base.base.cra_name);
ce_algs[i].ce = NULL;
break;
}
dev_info(ce->dev, "Register %s\n",
ce_algs[i].alg.skcipher.base.cra_name);
err = crypto_register_skcipher(&ce_algs[i].alg.skcipher);
ce_algs[i].alg.skcipher.base.base.cra_name);
err = crypto_engine_register_skcipher(&ce_algs[i].alg.skcipher);
if (err) {
dev_err(ce->dev, "ERROR: Fail to register %s\n",
ce_algs[i].alg.skcipher.base.cra_name);
ce_algs[i].alg.skcipher.base.base.cra_name);
ce_algs[i].ce = NULL;
return err;
}
@ -867,16 +905,16 @@ static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce)
if (ce_method == CE_ID_NOTSUPP) {
dev_info(ce->dev,
"DEBUG: Algo of %s not supported\n",
ce_algs[i].alg.hash.halg.base.cra_name);
ce_algs[i].alg.hash.base.halg.base.cra_name);
ce_algs[i].ce = NULL;
break;
}
dev_info(ce->dev, "Register %s\n",
ce_algs[i].alg.hash.halg.base.cra_name);
err = crypto_register_ahash(&ce_algs[i].alg.hash);
ce_algs[i].alg.hash.base.halg.base.cra_name);
err = crypto_engine_register_ahash(&ce_algs[i].alg.hash);
if (err) {
dev_err(ce->dev, "ERROR: Fail to register %s\n",
ce_algs[i].alg.hash.halg.base.cra_name);
ce_algs[i].alg.hash.base.halg.base.cra_name);
ce_algs[i].ce = NULL;
return err;
}
@ -916,13 +954,13 @@ static void sun8i_ce_unregister_algs(struct sun8i_ce_dev *ce)
switch (ce_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
dev_info(ce->dev, "Unregister %d %s\n", i,
ce_algs[i].alg.skcipher.base.cra_name);
crypto_unregister_skcipher(&ce_algs[i].alg.skcipher);
ce_algs[i].alg.skcipher.base.base.cra_name);
crypto_engine_unregister_skcipher(&ce_algs[i].alg.skcipher);
break;
case CRYPTO_ALG_TYPE_AHASH:
dev_info(ce->dev, "Unregister %d %s\n", i,
ce_algs[i].alg.hash.halg.base.cra_name);
crypto_unregister_ahash(&ce_algs[i].alg.hash);
ce_algs[i].alg.hash.base.halg.base.cra_name);
crypto_engine_unregister_ahash(&ce_algs[i].alg.hash);
break;
case CRYPTO_ALG_TYPE_RNG:
dev_info(ce->dev, "Unregister %d %s\n", i,
@ -1007,13 +1045,21 @@ static int sun8i_ce_probe(struct platform_device *pdev)
pm_runtime_put_sync(ce->dev);
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
struct dentry *dbgfs_dir __maybe_unused;
struct dentry *dbgfs_stats __maybe_unused;
/* Ignore error of debugfs */
dbgfs_dir = debugfs_create_dir("sun8i-ce", NULL);
dbgfs_stats = debugfs_create_file("stats", 0444,
dbgfs_dir, ce,
&sun8i_ce_debugfs_fops);
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
/* Ignore error of debugfs */
ce->dbgfs_dir = debugfs_create_dir("sun8i-ce", NULL);
ce->dbgfs_stats = debugfs_create_file("stats", 0444,
ce->dbgfs_dir, ce,
&sun8i_ce_debugfs_fops);
ce->dbgfs_dir = dbgfs_dir;
ce->dbgfs_stats = dbgfs_stats;
#endif
}
return 0;
error_alg:

View File

@ -9,48 +9,46 @@
*
* You could find the datasheet in Documentation/arch/arm/sunxi.rst
*/
#include <linux/bottom_half.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#include <crypto/internal/hash.h>
#include <crypto/md5.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/md5.h>
#include <linux/bottom_half.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "sun8i-ce.h"
int sun8i_ce_hash_crainit(struct crypto_tfm *tfm)
int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm)
{
struct sun8i_ce_hash_tfm_ctx *op = crypto_tfm_ctx(tfm);
struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
struct sun8i_ce_hash_tfm_ctx *op = crypto_ahash_ctx(tfm);
struct ahash_alg *alg = crypto_ahash_alg(tfm);
struct sun8i_ce_alg_template *algt;
int err;
memset(op, 0, sizeof(struct sun8i_ce_hash_tfm_ctx));
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
op->ce = algt->ce;
op->enginectx.op.do_one_request = sun8i_ce_hash_run;
op->enginectx.op.prepare_request = NULL;
op->enginectx.op.unprepare_request = NULL;
/* FALLBACK */
op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
op->fallback_tfm = crypto_alloc_ahash(crypto_ahash_alg_name(tfm), 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(op->fallback_tfm)) {
dev_err(algt->ce->dev, "Fallback driver could no be loaded\n");
return PTR_ERR(op->fallback_tfm);
}
if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm))
algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm);
crypto_ahash_set_statesize(tfm,
crypto_ahash_statesize(op->fallback_tfm));
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
crypto_ahash_set_reqsize(tfm,
sizeof(struct sun8i_ce_hash_reqctx) +
crypto_ahash_reqsize(op->fallback_tfm));
memcpy(algt->fbname, crypto_tfm_alg_driver_name(&op->fallback_tfm->base),
memcpy(algt->fbname, crypto_ahash_driver_name(op->fallback_tfm),
CRYPTO_MAX_ALG_NAME);
err = pm_runtime_get_sync(op->ce->dev);
@ -63,9 +61,9 @@ error_pm:
return err;
}
void sun8i_ce_hash_craexit(struct crypto_tfm *tfm)
void sun8i_ce_hash_exit_tfm(struct crypto_ahash *tfm)
{
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
crypto_free_ahash(tfmctx->fallback_tfm);
pm_runtime_put_sync_suspend(tfmctx->ce->dev);
@ -114,20 +112,22 @@ int sun8i_ce_hash_final(struct ahash_request *areq)
struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
struct sun8i_ce_alg_template *algt;
#endif
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
rctx->fallback_req.base.flags = areq->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP;
rctx->fallback_req.result = areq->result;
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
struct sun8i_ce_alg_template *algt __maybe_unused;
struct ahash_alg *alg = crypto_ahash_alg(tfm);
algt = container_of(alg, struct sun8i_ce_alg_template,
alg.hash.base);
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
algt->stat_fb++;
algt->stat_fb++;
#endif
}
return crypto_ahash_final(&rctx->fallback_req);
}
@ -152,10 +152,6 @@ int sun8i_ce_hash_finup(struct ahash_request *areq)
struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
struct sun8i_ce_alg_template *algt;
#endif
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
rctx->fallback_req.base.flags = areq->base.flags &
@ -164,10 +160,17 @@ int sun8i_ce_hash_finup(struct ahash_request *areq)
rctx->fallback_req.nbytes = areq->nbytes;
rctx->fallback_req.src = areq->src;
rctx->fallback_req.result = areq->result;
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
struct sun8i_ce_alg_template *algt __maybe_unused;
struct ahash_alg *alg = crypto_ahash_alg(tfm);
algt = container_of(alg, struct sun8i_ce_alg_template,
alg.hash.base);
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
algt->stat_fb++;
algt->stat_fb++;
#endif
}
return crypto_ahash_finup(&rctx->fallback_req);
}
@ -177,10 +180,6 @@ static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
struct sun8i_ce_alg_template *algt;
#endif
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
rctx->fallback_req.base.flags = areq->base.flags &
@ -189,10 +188,17 @@ static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
rctx->fallback_req.nbytes = areq->nbytes;
rctx->fallback_req.src = areq->src;
rctx->fallback_req.result = areq->result;
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
struct sun8i_ce_alg_template *algt __maybe_unused;
struct ahash_alg *alg = crypto_ahash_alg(tfm);
algt = container_of(alg, struct sun8i_ce_alg_template,
alg.hash.base);
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
algt->stat_fb++;
algt->stat_fb++;
#endif
}
return crypto_ahash_digest(&rctx->fallback_req);
}
@ -204,7 +210,7 @@ static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq)
struct sun8i_ce_alg_template *algt;
struct scatterlist *sg;
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
if (areq->nbytes == 0) {
algt->stat_fb_len0++;
@ -253,7 +259,7 @@ int sun8i_ce_hash_digest(struct ahash_request *areq)
return sun8i_ce_hash_digest_fb(areq);
}
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
ce = algt->ce;
e = sun8i_ce_get_engine_number(ce);
@ -345,11 +351,11 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
dma_addr_t addr_res, addr_pad;
int ns = sg_nents_for_len(areq->src, areq->nbytes);
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
ce = algt->ce;
bs = algt->alg.hash.halg.base.cra_blocksize;
digestsize = algt->alg.hash.halg.digestsize;
bs = algt->alg.hash.base.halg.base.cra_blocksize;
digestsize = algt->alg.hash.base.halg.digestsize;
if (digestsize == SHA224_DIGEST_SIZE)
digestsize = SHA256_DIGEST_SIZE;
if (digestsize == SHA384_DIGEST_SIZE)
@ -454,14 +460,14 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
chan->timeout = areq->nbytes;
err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
err = sun8i_ce_run_task(ce, flow, crypto_ahash_alg_name(tfm));
dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize);
theend:
kfree(buf);
kfree(result);

View File

@ -265,14 +265,12 @@ struct sun8i_cipher_req_ctx {
/*
* struct sun8i_cipher_tfm_ctx - context for a skcipher TFM
* @enginectx: crypto_engine used by this TFM
* @key: pointer to key data
* @keylen: len of the key
* @ce: pointer to the private data of driver handling this TFM
* @fallback_tfm: pointer to the fallback TFM
*/
struct sun8i_cipher_tfm_ctx {
struct crypto_engine_ctx enginectx;
u32 *key;
u32 keylen;
struct sun8i_ce_dev *ce;
@ -281,12 +279,10 @@ struct sun8i_cipher_tfm_ctx {
/*
* struct sun8i_ce_hash_tfm_ctx - context for an ahash TFM
* @enginectx: crypto_engine used by this TFM
* @ce: pointer to the private data of driver handling this TFM
* @fallback_tfm: pointer to the fallback TFM
*/
struct sun8i_ce_hash_tfm_ctx {
struct crypto_engine_ctx enginectx;
struct sun8i_ce_dev *ce;
struct crypto_ahash *fallback_tfm;
};
@ -329,8 +325,8 @@ struct sun8i_ce_alg_template {
u32 ce_blockmode;
struct sun8i_ce_dev *ce;
union {
struct skcipher_alg skcipher;
struct ahash_alg hash;
struct skcipher_engine_alg skcipher;
struct ahash_engine_alg hash;
struct rng_alg rng;
} alg;
unsigned long stat_req;
@ -347,14 +343,13 @@ struct sun8i_ce_alg_template {
char fbname[CRYPTO_MAX_ALG_NAME];
};
int sun8i_ce_enqueue(struct crypto_async_request *areq, u32 type);
int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
int sun8i_ce_cipher_init(struct crypto_tfm *tfm);
void sun8i_ce_cipher_exit(struct crypto_tfm *tfm);
int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq);
int sun8i_ce_skdecrypt(struct skcipher_request *areq);
int sun8i_ce_skencrypt(struct skcipher_request *areq);
@ -362,12 +357,11 @@ int sun8i_ce_get_engine_number(struct sun8i_ce_dev *ce);
int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name);
int sun8i_ce_hash_crainit(struct crypto_tfm *tfm);
void sun8i_ce_hash_craexit(struct crypto_tfm *tfm);
int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm);
void sun8i_ce_hash_exit_tfm(struct crypto_ahash *tfm);
int sun8i_ce_hash_init(struct ahash_request *areq);
int sun8i_ce_hash_export(struct ahash_request *areq, void *out);
int sun8i_ce_hash_import(struct ahash_request *areq, const void *in);
int sun8i_ce_hash(struct ahash_request *areq);
int sun8i_ce_hash_final(struct ahash_request *areq);
int sun8i_ce_hash_update(struct ahash_request *areq);
int sun8i_ce_hash_finup(struct ahash_request *areq);

View File

@ -24,7 +24,7 @@ static bool sun8i_ss_need_fallback(struct skcipher_request *areq)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct sun8i_ss_alg_template *algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
struct sun8i_ss_alg_template *algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher.base);
struct scatterlist *in_sg = areq->src;
struct scatterlist *out_sg = areq->dst;
struct scatterlist *sg;
@ -93,13 +93,18 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq)
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
int err;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct sun8i_ss_alg_template *algt;
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct sun8i_ss_alg_template *algt __maybe_unused;
algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
algt->stat_fb++;
algt = container_of(alg, struct sun8i_ss_alg_template,
alg.skcipher.base);
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
algt->stat_fb++;
#endif
}
skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
areq->base.complete, areq->base.data);
@ -193,7 +198,7 @@ static int sun8i_ss_cipher(struct skcipher_request *areq)
int nsgd = sg_nents_for_len(areq->dst, areq->cryptlen);
int i;
algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher.base);
dev_dbg(ss->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
crypto_tfm_alg_name(areq->base.tfm),
@ -324,7 +329,7 @@ theend:
return err;
}
static int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq)
int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq)
{
int err;
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
@ -390,7 +395,7 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher.base);
op->ss = algt->ss;
op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
@ -408,10 +413,6 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)),
CRYPTO_MAX_ALG_NAME);
op->enginectx.op.do_one_request = sun8i_ss_handle_cipher_request;
op->enginectx.op.prepare_request = NULL;
op->enginectx.op.unprepare_request = NULL;
err = pm_runtime_resume_and_get(op->ss->dev);
if (err < 0) {
dev_err(op->ss->dev, "pm error %d\n", err);

View File

@ -9,22 +9,23 @@
*
* You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst
*/
#include <crypto/engine.h>
#include <crypto/internal/rng.h>
#include <crypto/internal/skcipher.h>
#include <linux/clk.h>
#include <linux/crypto.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <crypto/internal/rng.h>
#include <crypto/internal/skcipher.h>
#include "sun8i-ss.h"
@ -168,7 +169,7 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.ss_algo_id = SS_ID_CIPHER_AES,
.ss_blockmode = SS_ID_OP_CBC,
.alg.skcipher = {
.alg.skcipher.base = {
.base = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-sun8i-ss",
@ -189,13 +190,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.setkey = sun8i_ss_aes_setkey,
.encrypt = sun8i_ss_skencrypt,
.decrypt = sun8i_ss_skdecrypt,
}
},
.alg.skcipher.op = {
.do_one_request = sun8i_ss_handle_cipher_request,
},
},
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.ss_algo_id = SS_ID_CIPHER_AES,
.ss_blockmode = SS_ID_OP_ECB,
.alg.skcipher = {
.alg.skcipher.base = {
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-sun8i-ss",
@ -215,13 +219,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.setkey = sun8i_ss_aes_setkey,
.encrypt = sun8i_ss_skencrypt,
.decrypt = sun8i_ss_skdecrypt,
}
},
.alg.skcipher.op = {
.do_one_request = sun8i_ss_handle_cipher_request,
},
},
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.ss_algo_id = SS_ID_CIPHER_DES3,
.ss_blockmode = SS_ID_OP_CBC,
.alg.skcipher = {
.alg.skcipher.base = {
.base = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "cbc-des3-sun8i-ss",
@ -242,13 +249,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.setkey = sun8i_ss_des3_setkey,
.encrypt = sun8i_ss_skencrypt,
.decrypt = sun8i_ss_skdecrypt,
}
},
.alg.skcipher.op = {
.do_one_request = sun8i_ss_handle_cipher_request,
},
},
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.ss_algo_id = SS_ID_CIPHER_DES3,
.ss_blockmode = SS_ID_OP_ECB,
.alg.skcipher = {
.alg.skcipher.base = {
.base = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "ecb-des3-sun8i-ss",
@ -268,7 +278,10 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.setkey = sun8i_ss_des3_setkey,
.encrypt = sun8i_ss_skencrypt,
.decrypt = sun8i_ss_skdecrypt,
}
},
.alg.skcipher.op = {
.do_one_request = sun8i_ss_handle_cipher_request,
},
},
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_PRNG
{
@ -292,7 +305,7 @@ static struct sun8i_ss_alg_template ss_algs[] = {
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_HASH
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ss_algo_id = SS_ID_HASH_MD5,
.alg.hash = {
.alg.hash.base = {
.init = sun8i_ss_hash_init,
.update = sun8i_ss_hash_update,
.final = sun8i_ss_hash_final,
@ -300,6 +313,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.digest = sun8i_ss_hash_digest,
.export = sun8i_ss_hash_export,
.import = sun8i_ss_hash_import,
.init_tfm = sun8i_ss_hash_init_tfm,
.exit_tfm = sun8i_ss_hash_exit_tfm,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
.statesize = sizeof(struct md5_state),
@ -314,15 +329,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_init = sun8i_ss_hash_crainit,
.cra_exit = sun8i_ss_hash_craexit,
}
}
}
},
.alg.hash.op = {
.do_one_request = sun8i_ss_hash_run,
},
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ss_algo_id = SS_ID_HASH_SHA1,
.alg.hash = {
.alg.hash.base = {
.init = sun8i_ss_hash_init,
.update = sun8i_ss_hash_update,
.final = sun8i_ss_hash_final,
@ -330,6 +346,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.digest = sun8i_ss_hash_digest,
.export = sun8i_ss_hash_export,
.import = sun8i_ss_hash_import,
.init_tfm = sun8i_ss_hash_init_tfm,
.exit_tfm = sun8i_ss_hash_exit_tfm,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
.statesize = sizeof(struct sha1_state),
@ -344,15 +362,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_init = sun8i_ss_hash_crainit,
.cra_exit = sun8i_ss_hash_craexit,
}
}
}
},
.alg.hash.op = {
.do_one_request = sun8i_ss_hash_run,
},
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ss_algo_id = SS_ID_HASH_SHA224,
.alg.hash = {
.alg.hash.base = {
.init = sun8i_ss_hash_init,
.update = sun8i_ss_hash_update,
.final = sun8i_ss_hash_final,
@ -360,6 +379,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.digest = sun8i_ss_hash_digest,
.export = sun8i_ss_hash_export,
.import = sun8i_ss_hash_import,
.init_tfm = sun8i_ss_hash_init_tfm,
.exit_tfm = sun8i_ss_hash_exit_tfm,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
@ -374,15 +395,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_init = sun8i_ss_hash_crainit,
.cra_exit = sun8i_ss_hash_craexit,
}
}
}
},
.alg.hash.op = {
.do_one_request = sun8i_ss_hash_run,
},
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ss_algo_id = SS_ID_HASH_SHA256,
.alg.hash = {
.alg.hash.base = {
.init = sun8i_ss_hash_init,
.update = sun8i_ss_hash_update,
.final = sun8i_ss_hash_final,
@ -390,6 +412,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.digest = sun8i_ss_hash_digest,
.export = sun8i_ss_hash_export,
.import = sun8i_ss_hash_import,
.init_tfm = sun8i_ss_hash_init_tfm,
.exit_tfm = sun8i_ss_hash_exit_tfm,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct sha256_state),
@ -404,15 +428,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_init = sun8i_ss_hash_crainit,
.cra_exit = sun8i_ss_hash_craexit,
}
}
}
},
.alg.hash.op = {
.do_one_request = sun8i_ss_hash_run,
},
},
{ .type = CRYPTO_ALG_TYPE_AHASH,
.ss_algo_id = SS_ID_HASH_SHA1,
.alg.hash = {
.alg.hash.base = {
.init = sun8i_ss_hash_init,
.update = sun8i_ss_hash_update,
.final = sun8i_ss_hash_final,
@ -420,6 +445,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.digest = sun8i_ss_hash_digest,
.export = sun8i_ss_hash_export,
.import = sun8i_ss_hash_import,
.init_tfm = sun8i_ss_hash_init_tfm,
.exit_tfm = sun8i_ss_hash_exit_tfm,
.setkey = sun8i_ss_hmac_setkey,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
@ -435,23 +462,28 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_init = sun8i_ss_hash_crainit,
.cra_exit = sun8i_ss_hash_craexit,
}
}
}
},
.alg.hash.op = {
.do_one_request = sun8i_ss_hash_run,
},
},
#endif
};
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v)
{
struct sun8i_ss_dev *ss = seq->private;
struct sun8i_ss_dev *ss __maybe_unused = seq->private;
unsigned int i;
for (i = 0; i < MAXFLOW; i++)
seq_printf(seq, "Channel %d: nreq %lu\n", i, ss->flows[i].stat_req);
seq_printf(seq, "Channel %d: nreq %lu\n", i,
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
ss->flows[i].stat_req);
#else
0ul);
#endif
for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
if (!ss_algs[i].ss)
@ -459,8 +491,8 @@ static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v)
switch (ss_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
ss_algs[i].alg.skcipher.base.cra_driver_name,
ss_algs[i].alg.skcipher.base.cra_name,
ss_algs[i].alg.skcipher.base.base.cra_driver_name,
ss_algs[i].alg.skcipher.base.base.cra_name,
ss_algs[i].stat_req, ss_algs[i].stat_fb);
seq_printf(seq, "\tLast fallback is: %s\n",
@ -482,8 +514,8 @@ static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v)
break;
case CRYPTO_ALG_TYPE_AHASH:
seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
ss_algs[i].alg.hash.halg.base.cra_driver_name,
ss_algs[i].alg.hash.halg.base.cra_name,
ss_algs[i].alg.hash.base.halg.base.cra_driver_name,
ss_algs[i].alg.hash.base.halg.base.cra_name,
ss_algs[i].stat_req, ss_algs[i].stat_fb);
seq_printf(seq, "\tLast fallback is: %s\n",
ss_algs[i].fbname);
@ -502,7 +534,6 @@ static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v)
}
DEFINE_SHOW_ATTRIBUTE(sun8i_ss_debugfs);
#endif
static void sun8i_ss_free_flows(struct sun8i_ss_dev *ss, int i)
{
@ -659,7 +690,7 @@ static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss)
if (ss_method == SS_ID_NOTSUPP) {
dev_info(ss->dev,
"DEBUG: Algo of %s not supported\n",
ss_algs[i].alg.skcipher.base.cra_name);
ss_algs[i].alg.skcipher.base.base.cra_name);
ss_algs[i].ss = NULL;
break;
}
@ -667,16 +698,16 @@ static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss)
ss_method = ss->variant->op_mode[id];
if (ss_method == SS_ID_NOTSUPP) {
dev_info(ss->dev, "DEBUG: Blockmode of %s not supported\n",
ss_algs[i].alg.skcipher.base.cra_name);
ss_algs[i].alg.skcipher.base.base.cra_name);
ss_algs[i].ss = NULL;
break;
}
dev_info(ss->dev, "DEBUG: Register %s\n",
ss_algs[i].alg.skcipher.base.cra_name);
err = crypto_register_skcipher(&ss_algs[i].alg.skcipher);
ss_algs[i].alg.skcipher.base.base.cra_name);
err = crypto_engine_register_skcipher(&ss_algs[i].alg.skcipher);
if (err) {
dev_err(ss->dev, "Fail to register %s\n",
ss_algs[i].alg.skcipher.base.cra_name);
ss_algs[i].alg.skcipher.base.base.cra_name);
ss_algs[i].ss = NULL;
return err;
}
@ -695,16 +726,16 @@ static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss)
if (ss_method == SS_ID_NOTSUPP) {
dev_info(ss->dev,
"DEBUG: Algo of %s not supported\n",
ss_algs[i].alg.hash.halg.base.cra_name);
ss_algs[i].alg.hash.base.halg.base.cra_name);
ss_algs[i].ss = NULL;
break;
}
dev_info(ss->dev, "Register %s\n",
ss_algs[i].alg.hash.halg.base.cra_name);
err = crypto_register_ahash(&ss_algs[i].alg.hash);
ss_algs[i].alg.hash.base.halg.base.cra_name);
err = crypto_engine_register_ahash(&ss_algs[i].alg.hash);
if (err) {
dev_err(ss->dev, "ERROR: Fail to register %s\n",
ss_algs[i].alg.hash.halg.base.cra_name);
ss_algs[i].alg.hash.base.halg.base.cra_name);
ss_algs[i].ss = NULL;
return err;
}
@ -727,8 +758,8 @@ static void sun8i_ss_unregister_algs(struct sun8i_ss_dev *ss)
switch (ss_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
dev_info(ss->dev, "Unregister %d %s\n", i,
ss_algs[i].alg.skcipher.base.cra_name);
crypto_unregister_skcipher(&ss_algs[i].alg.skcipher);
ss_algs[i].alg.skcipher.base.base.cra_name);
crypto_engine_unregister_skcipher(&ss_algs[i].alg.skcipher);
break;
case CRYPTO_ALG_TYPE_RNG:
dev_info(ss->dev, "Unregister %d %s\n", i,
@ -737,8 +768,8 @@ static void sun8i_ss_unregister_algs(struct sun8i_ss_dev *ss)
break;
case CRYPTO_ALG_TYPE_AHASH:
dev_info(ss->dev, "Unregister %d %s\n", i,
ss_algs[i].alg.hash.halg.base.cra_name);
crypto_unregister_ahash(&ss_algs[i].alg.hash);
ss_algs[i].alg.hash.base.halg.base.cra_name);
crypto_engine_unregister_ahash(&ss_algs[i].alg.hash);
break;
}
}
@ -851,13 +882,21 @@ static int sun8i_ss_probe(struct platform_device *pdev)
pm_runtime_put_sync(ss->dev);
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
struct dentry *dbgfs_dir __maybe_unused;
struct dentry *dbgfs_stats __maybe_unused;
/* Ignore error of debugfs */
dbgfs_dir = debugfs_create_dir("sun8i-ss", NULL);
dbgfs_stats = debugfs_create_file("stats", 0444,
dbgfs_dir, ss,
&sun8i_ss_debugfs_fops);
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
/* Ignore error of debugfs */
ss->dbgfs_dir = debugfs_create_dir("sun8i-ss", NULL);
ss->dbgfs_stats = debugfs_create_file("stats", 0444,
ss->dbgfs_dir, ss,
&sun8i_ss_debugfs_fops);
ss->dbgfs_dir = dbgfs_dir;
ss->dbgfs_stats = dbgfs_stats;
#endif
}
return 0;
error_alg:

View File

@ -9,16 +9,21 @@
*
* You could find the datasheet in Documentation/arch/arm/sunxi.rst
*/
#include <linux/bottom_half.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#include <crypto/internal/hash.h>
#include <crypto/hmac.h>
#include <crypto/internal/hash.h>
#include <crypto/md5.h>
#include <crypto/scatterwalk.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/md5.h>
#include <linux/bottom_half.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "sun8i-ss.h"
static int sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx *tfmctx, const u8 *key,
@ -60,14 +65,11 @@ int sun8i_ss_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
unsigned int keylen)
{
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(ahash);
struct ahash_alg *alg = __crypto_ahash_alg(ahash->base.__crt_alg);
struct sun8i_ss_alg_template *algt;
int digestsize, i;
int bs = crypto_ahash_blocksize(ahash);
int ret;
algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
digestsize = algt->alg.hash.halg.digestsize;
digestsize = crypto_ahash_digestsize(ahash);
if (keylen > bs) {
ret = sun8i_ss_hashkey(tfmctx, key, keylen);
@ -107,38 +109,33 @@ err_opad:
return ret;
}
int sun8i_ss_hash_crainit(struct crypto_tfm *tfm)
int sun8i_ss_hash_init_tfm(struct crypto_ahash *tfm)
{
struct sun8i_ss_hash_tfm_ctx *op = crypto_tfm_ctx(tfm);
struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
struct sun8i_ss_hash_tfm_ctx *op = crypto_ahash_ctx(tfm);
struct ahash_alg *alg = crypto_ahash_alg(tfm);
struct sun8i_ss_alg_template *algt;
int err;
memset(op, 0, sizeof(struct sun8i_ss_hash_tfm_ctx));
algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
op->ss = algt->ss;
op->enginectx.op.do_one_request = sun8i_ss_hash_run;
op->enginectx.op.prepare_request = NULL;
op->enginectx.op.unprepare_request = NULL;
/* FALLBACK */
op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
op->fallback_tfm = crypto_alloc_ahash(crypto_ahash_alg_name(tfm), 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(op->fallback_tfm)) {
dev_err(algt->ss->dev, "Fallback driver could no be loaded\n");
return PTR_ERR(op->fallback_tfm);
}
if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm))
algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm);
crypto_ahash_set_statesize(tfm,
crypto_ahash_statesize(op->fallback_tfm));
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
crypto_ahash_set_reqsize(tfm,
sizeof(struct sun8i_ss_hash_reqctx) +
crypto_ahash_reqsize(op->fallback_tfm));
memcpy(algt->fbname, crypto_tfm_alg_driver_name(&op->fallback_tfm->base), CRYPTO_MAX_ALG_NAME);
memcpy(algt->fbname, crypto_ahash_driver_name(op->fallback_tfm),
CRYPTO_MAX_ALG_NAME);
err = pm_runtime_get_sync(op->ss->dev);
if (err < 0)
@ -150,9 +147,9 @@ error_pm:
return err;
}
void sun8i_ss_hash_craexit(struct crypto_tfm *tfm)
void sun8i_ss_hash_exit_tfm(struct crypto_ahash *tfm)
{
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm);
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
kfree_sensitive(tfmctx->ipad);
kfree_sensitive(tfmctx->opad);
@ -204,20 +201,23 @@ int sun8i_ss_hash_final(struct ahash_request *areq)
struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
struct sun8i_ss_alg_template *algt;
#endif
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
rctx->fallback_req.base.flags = areq->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP;
rctx->fallback_req.result = areq->result;
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
struct ahash_alg *alg = crypto_ahash_alg(tfm);
struct sun8i_ss_alg_template *algt __maybe_unused;
algt = container_of(alg, struct sun8i_ss_alg_template,
alg.hash.base);
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
algt->stat_fb++;
algt->stat_fb++;
#endif
}
return crypto_ahash_final(&rctx->fallback_req);
}
@ -242,10 +242,6 @@ int sun8i_ss_hash_finup(struct ahash_request *areq)
struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
struct sun8i_ss_alg_template *algt;
#endif
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
rctx->fallback_req.base.flags = areq->base.flags &
@ -254,10 +250,18 @@ int sun8i_ss_hash_finup(struct ahash_request *areq)
rctx->fallback_req.nbytes = areq->nbytes;
rctx->fallback_req.src = areq->src;
rctx->fallback_req.result = areq->result;
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
struct ahash_alg *alg = crypto_ahash_alg(tfm);
struct sun8i_ss_alg_template *algt __maybe_unused;
algt = container_of(alg, struct sun8i_ss_alg_template,
alg.hash.base);
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
algt->stat_fb++;
algt->stat_fb++;
#endif
}
return crypto_ahash_finup(&rctx->fallback_req);
}
@ -267,10 +271,6 @@ static int sun8i_ss_hash_digest_fb(struct ahash_request *areq)
struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
struct sun8i_ss_alg_template *algt;
#endif
ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
rctx->fallback_req.base.flags = areq->base.flags &
@ -279,10 +279,18 @@ static int sun8i_ss_hash_digest_fb(struct ahash_request *areq)
rctx->fallback_req.nbytes = areq->nbytes;
rctx->fallback_req.src = areq->src;
rctx->fallback_req.result = areq->result;
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
struct ahash_alg *alg = crypto_ahash_alg(tfm);
struct sun8i_ss_alg_template *algt __maybe_unused;
algt = container_of(alg, struct sun8i_ss_alg_template,
alg.hash.base);
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
algt->stat_fb++;
algt->stat_fb++;
#endif
}
return crypto_ahash_digest(&rctx->fallback_req);
}
@ -349,11 +357,11 @@ static int sun8i_ss_run_hash_task(struct sun8i_ss_dev *ss,
static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
struct ahash_alg *alg = crypto_ahash_alg(tfm);
struct sun8i_ss_alg_template *algt;
struct scatterlist *sg;
algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
if (areq->nbytes == 0) {
algt->stat_fb_len++;
@ -398,8 +406,8 @@ static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq)
int sun8i_ss_hash_digest(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
struct ahash_alg *alg = crypto_ahash_alg(tfm);
struct sun8i_ss_alg_template *algt;
struct sun8i_ss_dev *ss;
struct crypto_engine *engine;
@ -408,7 +416,7 @@ int sun8i_ss_hash_digest(struct ahash_request *areq)
if (sun8i_ss_hash_need_fallback(areq))
return sun8i_ss_hash_digest_fb(areq);
algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
ss = algt->ss;
e = sun8i_ss_get_engine_number(ss);
@ -484,8 +492,8 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
struct ahash_request *areq = container_of(breq, struct ahash_request, base);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
struct ahash_alg *alg = crypto_ahash_alg(tfm);
struct sun8i_ss_alg_template *algt;
struct sun8i_ss_dev *ss;
struct scatterlist *sg;
@ -504,10 +512,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
*/
int hmac = 0;
algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
ss = algt->ss;
digestsize = algt->alg.hash.halg.digestsize;
digestsize = crypto_ahash_digestsize(tfm);
if (digestsize == SHA224_DIGEST_SIZE)
digestsize = SHA256_DIGEST_SIZE;
@ -700,7 +708,7 @@ err_dma_result:
}
if (!err)
memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
memcpy(areq->result, result, crypto_ahash_digestsize(tfm));
theend:
local_bh_disable();
crypto_finalize_hash_request(engine, breq, err);

View File

@ -201,16 +201,12 @@ struct sun8i_cipher_req_ctx {
/*
* struct sun8i_cipher_tfm_ctx - context for a skcipher TFM
* @enginectx: crypto_engine used by this TFM
* @key: pointer to key data
* @keylen: len of the key
* @ss: pointer to the private data of driver handling this TFM
* @fallback_tfm: pointer to the fallback TFM
*
* enginectx must be the first element
*/
struct sun8i_cipher_tfm_ctx {
struct crypto_engine_ctx enginectx;
u32 *key;
u32 keylen;
struct sun8i_ss_dev *ss;
@ -229,14 +225,10 @@ struct sun8i_ss_rng_tfm_ctx {
/*
* struct sun8i_ss_hash_tfm_ctx - context for an ahash TFM
* @enginectx: crypto_engine used by this TFM
* @fallback_tfm: pointer to the fallback TFM
* @ss: pointer to the private data of driver handling this TFM
*
* enginectx must be the first element
*/
struct sun8i_ss_hash_tfm_ctx {
struct crypto_engine_ctx enginectx;
struct crypto_ahash *fallback_tfm;
struct sun8i_ss_dev *ss;
u8 *ipad;
@ -279,9 +271,9 @@ struct sun8i_ss_alg_template {
u32 ss_blockmode;
struct sun8i_ss_dev *ss;
union {
struct skcipher_alg skcipher;
struct skcipher_engine_alg skcipher;
struct rng_alg rng;
struct ahash_alg hash;
struct ahash_engine_alg hash;
} alg;
unsigned long stat_req;
unsigned long stat_fb;
@ -293,14 +285,13 @@ struct sun8i_ss_alg_template {
char fbname[CRYPTO_MAX_ALG_NAME];
};
int sun8i_ss_enqueue(struct crypto_async_request *areq, u32 type);
int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
int sun8i_ss_cipher_init(struct crypto_tfm *tfm);
void sun8i_ss_cipher_exit(struct crypto_tfm *tfm);
int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq);
int sun8i_ss_skdecrypt(struct skcipher_request *areq);
int sun8i_ss_skencrypt(struct skcipher_request *areq);
@ -313,8 +304,8 @@ int sun8i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen
int sun8i_ss_prng_init(struct crypto_tfm *tfm);
void sun8i_ss_prng_exit(struct crypto_tfm *tfm);
int sun8i_ss_hash_crainit(struct crypto_tfm *tfm);
void sun8i_ss_hash_craexit(struct crypto_tfm *tfm);
int sun8i_ss_hash_init_tfm(struct crypto_ahash *tfm);
void sun8i_ss_hash_exit_tfm(struct crypto_ahash *tfm);
int sun8i_ss_hash_init(struct ahash_request *areq);
int sun8i_ss_hash_export(struct ahash_request *areq, void *out);
int sun8i_ss_hash_import(struct ahash_request *areq, const void *in);

View File

@ -65,7 +65,7 @@ static int meson_cipher_do_fallback(struct skcipher_request *areq)
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct meson_alg_template *algt;
algt = container_of(alg, struct meson_alg_template, alg.skcipher);
algt = container_of(alg, struct meson_alg_template, alg.skcipher.base);
algt->stat_fb++;
#endif
skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
@ -101,7 +101,7 @@ static int meson_cipher(struct skcipher_request *areq)
void *backup_iv = NULL, *bkeyiv;
u32 v;
algt = container_of(alg, struct meson_alg_template, alg.skcipher);
algt = container_of(alg, struct meson_alg_template, alg.skcipher.base);
dev_dbg(mc->dev, "%s %s %u %x IV(%u) key=%u flow=%d\n", __func__,
crypto_tfm_alg_name(areq->base.tfm),
@ -258,8 +258,7 @@ theend:
return err;
}
static int meson_handle_cipher_request(struct crypto_engine *engine,
void *areq)
int meson_handle_cipher_request(struct crypto_engine *engine, void *areq)
{
int err;
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
@ -318,7 +317,7 @@ int meson_cipher_init(struct crypto_tfm *tfm)
memset(op, 0, sizeof(struct meson_cipher_tfm_ctx));
algt = container_of(alg, struct meson_alg_template, alg.skcipher);
algt = container_of(alg, struct meson_alg_template, alg.skcipher.base);
op->mc = algt->mc;
op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
@ -331,10 +330,6 @@ int meson_cipher_init(struct crypto_tfm *tfm)
sktfm->reqsize = sizeof(struct meson_cipher_req_ctx) +
crypto_skcipher_reqsize(op->fallback_tfm);
op->enginectx.op.do_one_request = meson_handle_cipher_request;
op->enginectx.op.prepare_request = NULL;
op->enginectx.op.unprepare_request = NULL;
return 0;
}

View File

@ -6,17 +6,19 @@
*
* Core file which registers crypto algorithms supported by the hardware.
*/
#include <crypto/engine.h>
#include <crypto/internal/skcipher.h>
#include <linux/clk.h>
#include <linux/crypto.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <crypto/internal/skcipher.h>
#include <linux/dma-mapping.h>
#include "amlogic-gxl.h"
@ -47,7 +49,7 @@ static struct meson_alg_template mc_algs[] = {
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.blockmode = MESON_OPMODE_CBC,
.alg.skcipher = {
.alg.skcipher.base = {
.base = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-gxl",
@ -68,12 +70,15 @@ static struct meson_alg_template mc_algs[] = {
.setkey = meson_aes_setkey,
.encrypt = meson_skencrypt,
.decrypt = meson_skdecrypt,
}
},
.alg.skcipher.op = {
.do_one_request = meson_handle_cipher_request,
},
},
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.blockmode = MESON_OPMODE_ECB,
.alg.skcipher = {
.alg.skcipher.base = {
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-gxl",
@ -93,33 +98,43 @@ static struct meson_alg_template mc_algs[] = {
.setkey = meson_aes_setkey,
.encrypt = meson_skencrypt,
.decrypt = meson_skdecrypt,
}
},
.alg.skcipher.op = {
.do_one_request = meson_handle_cipher_request,
},
},
};
#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
static int meson_debugfs_show(struct seq_file *seq, void *v)
{
struct meson_dev *mc = seq->private;
struct meson_dev *mc __maybe_unused = seq->private;
int i;
for (i = 0; i < MAXFLOW; i++)
seq_printf(seq, "Channel %d: nreq %lu\n", i, mc->chanlist[i].stat_req);
seq_printf(seq, "Channel %d: nreq %lu\n", i,
#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
mc->chanlist[i].stat_req);
#else
0ul);
#endif
for (i = 0; i < ARRAY_SIZE(mc_algs); i++) {
switch (mc_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
seq_printf(seq, "%s %s %lu %lu\n",
mc_algs[i].alg.skcipher.base.cra_driver_name,
mc_algs[i].alg.skcipher.base.cra_name,
mc_algs[i].alg.skcipher.base.base.cra_driver_name,
mc_algs[i].alg.skcipher.base.base.cra_name,
#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
mc_algs[i].stat_req, mc_algs[i].stat_fb);
#else
0ul, 0ul);
#endif
break;
}
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(meson_debugfs);
#endif
static void meson_free_chanlist(struct meson_dev *mc, int i)
{
@ -183,10 +198,10 @@ static int meson_register_algs(struct meson_dev *mc)
mc_algs[i].mc = mc;
switch (mc_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
err = crypto_register_skcipher(&mc_algs[i].alg.skcipher);
err = crypto_engine_register_skcipher(&mc_algs[i].alg.skcipher);
if (err) {
dev_err(mc->dev, "Fail to register %s\n",
mc_algs[i].alg.skcipher.base.cra_name);
mc_algs[i].alg.skcipher.base.base.cra_name);
mc_algs[i].mc = NULL;
return err;
}
@ -206,7 +221,7 @@ static void meson_unregister_algs(struct meson_dev *mc)
continue;
switch (mc_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
crypto_unregister_skcipher(&mc_algs[i].alg.skcipher);
crypto_engine_unregister_skcipher(&mc_algs[i].alg.skcipher);
break;
}
}
@ -264,10 +279,16 @@ static int meson_crypto_probe(struct platform_device *pdev)
if (err)
goto error_alg;
if (IS_ENABLED(CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG)) {
struct dentry *dbgfs_dir;
dbgfs_dir = debugfs_create_dir("gxl-crypto", NULL);
debugfs_create_file("stats", 0444, dbgfs_dir, mc, &meson_debugfs_fops);
#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
mc->dbgfs_dir = debugfs_create_dir("gxl-crypto", NULL);
debugfs_create_file("stats", 0444, mc->dbgfs_dir, mc, &meson_debugfs_fops);
mc->dbgfs_dir = dbgfs_dir;
#endif
}
return 0;
error_alg:

View File

@ -114,7 +114,6 @@ struct meson_cipher_req_ctx {
/*
* struct meson_cipher_tfm_ctx - context for a skcipher TFM
* @enginectx: crypto_engine used by this TFM
* @key: pointer to key data
* @keylen: len of the key
* @keymode: The keymode(type and size of key) associated with this TFM
@ -122,7 +121,6 @@ struct meson_cipher_req_ctx {
* @fallback_tfm: pointer to the fallback TFM
*/
struct meson_cipher_tfm_ctx {
struct crypto_engine_ctx enginectx;
u32 *key;
u32 keylen;
u32 keymode;
@ -143,7 +141,7 @@ struct meson_alg_template {
u32 type;
u32 blockmode;
union {
struct skcipher_alg skcipher;
struct skcipher_engine_alg skcipher;
} alg;
struct meson_dev *mc;
#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
@ -160,3 +158,4 @@ int meson_cipher_init(struct crypto_tfm *tfm);
void meson_cipher_exit(struct crypto_tfm *tfm);
int meson_skdecrypt(struct skcipher_request *areq);
int meson_skencrypt(struct skcipher_request *areq);
int meson_handle_cipher_request(struct crypto_engine *engine, void *areq);

View File

@ -2,25 +2,23 @@
/*
* Copyright 2021 Aspeed Technology Inc.
*/
#include <crypto/akcipher.h>
#include <crypto/algapi.h>
#include <crypto/engine.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/rsa.h>
#include <crypto/scatterwalk.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/mfd/syscon.h>
#include <linux/interrupt.h>
#include <linux/count_zeros.h>
#include <linux/err.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/string.h>
#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
#define ACRY_DBG(d, fmt, ...) \
@ -112,7 +110,6 @@ struct aspeed_acry_dev {
};
struct aspeed_acry_ctx {
struct crypto_engine_ctx enginectx;
struct aspeed_acry_dev *acry_dev;
struct rsa_key key;
@ -131,7 +128,7 @@ struct aspeed_acry_ctx {
struct aspeed_acry_alg {
struct aspeed_acry_dev *acry_dev;
struct akcipher_alg akcipher;
struct akcipher_engine_alg akcipher;
};
enum aspeed_rsa_key_mode {
@ -577,7 +574,7 @@ static int aspeed_acry_rsa_init_tfm(struct crypto_akcipher *tfm)
const char *name = crypto_tfm_alg_name(&tfm->base);
struct aspeed_acry_alg *acry_alg;
acry_alg = container_of(alg, struct aspeed_acry_alg, akcipher);
acry_alg = container_of(alg, struct aspeed_acry_alg, akcipher.base);
ctx->acry_dev = acry_alg->acry_dev;
@ -589,10 +586,6 @@ static int aspeed_acry_rsa_init_tfm(struct crypto_akcipher *tfm)
return PTR_ERR(ctx->fallback_tfm);
}
ctx->enginectx.op.do_one_request = aspeed_acry_do_request;
ctx->enginectx.op.prepare_request = NULL;
ctx->enginectx.op.unprepare_request = NULL;
return 0;
}
@ -605,7 +598,7 @@ static void aspeed_acry_rsa_exit_tfm(struct crypto_akcipher *tfm)
static struct aspeed_acry_alg aspeed_acry_akcipher_algs[] = {
{
.akcipher = {
.akcipher.base = {
.encrypt = aspeed_acry_rsa_enc,
.decrypt = aspeed_acry_rsa_dec,
.sign = aspeed_acry_rsa_dec,
@ -627,6 +620,9 @@ static struct aspeed_acry_alg aspeed_acry_akcipher_algs[] = {
.cra_ctxsize = sizeof(struct aspeed_acry_ctx),
},
},
.akcipher.op = {
.do_one_request = aspeed_acry_do_request,
},
},
};
@ -636,10 +632,10 @@ static void aspeed_acry_register(struct aspeed_acry_dev *acry_dev)
for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++) {
aspeed_acry_akcipher_algs[i].acry_dev = acry_dev;
rc = crypto_register_akcipher(&aspeed_acry_akcipher_algs[i].akcipher);
rc = crypto_engine_register_akcipher(&aspeed_acry_akcipher_algs[i].akcipher);
if (rc) {
ACRY_DBG(acry_dev, "Failed to register %s\n",
aspeed_acry_akcipher_algs[i].akcipher.base.cra_name);
aspeed_acry_akcipher_algs[i].akcipher.base.base.cra_name);
}
}
}
@ -649,7 +645,7 @@ static void aspeed_acry_unregister(struct aspeed_acry_dev *acry_dev)
int i;
for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++)
crypto_unregister_akcipher(&aspeed_acry_akcipher_algs[i].akcipher);
crypto_engine_unregister_akcipher(&aspeed_acry_akcipher_algs[i].akcipher);
}
/* ACRY interrupt service routine. */

View File

@ -4,6 +4,17 @@
*/
#include "aspeed-hace.h"
#include <crypto/des.h>
#include <crypto/engine.h>
#include <crypto/internal/des.h>
#include <crypto/internal/skcipher.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO_DEBUG
#define CIPHER_DBG(h, fmt, ...) \
@ -696,7 +707,7 @@ static int aspeed_crypto_cra_init(struct crypto_skcipher *tfm)
struct aspeed_hace_alg *crypto_alg;
crypto_alg = container_of(alg, struct aspeed_hace_alg, alg.skcipher);
crypto_alg = container_of(alg, struct aspeed_hace_alg, alg.skcipher.base);
ctx->hace_dev = crypto_alg->hace_dev;
ctx->start = aspeed_hace_skcipher_trigger;
@ -713,10 +724,6 @@ static int aspeed_crypto_cra_init(struct crypto_skcipher *tfm)
crypto_skcipher_set_reqsize(tfm, sizeof(struct aspeed_cipher_reqctx) +
crypto_skcipher_reqsize(ctx->fallback_tfm));
ctx->enginectx.op.do_one_request = aspeed_crypto_do_request;
ctx->enginectx.op.prepare_request = NULL;
ctx->enginectx.op.unprepare_request = NULL;
return 0;
}
@ -731,7 +738,7 @@ static void aspeed_crypto_cra_exit(struct crypto_skcipher *tfm)
static struct aspeed_hace_alg aspeed_crypto_algs[] = {
{
.alg.skcipher = {
.alg.skcipher.base = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = aspeed_aes_setkey,
@ -751,10 +758,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
}
},
.alg.skcipher.op = {
.do_one_request = aspeed_crypto_do_request,
},
},
{
.alg.skcipher = {
.alg.skcipher.base = {
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@ -775,10 +785,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
}
},
.alg.skcipher.op = {
.do_one_request = aspeed_crypto_do_request,
},
},
{
.alg.skcipher = {
.alg.skcipher.base = {
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@ -799,10 +812,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
}
},
.alg.skcipher.op = {
.do_one_request = aspeed_crypto_do_request,
},
},
{
.alg.skcipher = {
.alg.skcipher.base = {
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@ -823,10 +839,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
}
},
.alg.skcipher.op = {
.do_one_request = aspeed_crypto_do_request,
},
},
{
.alg.skcipher = {
.alg.skcipher.base = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.setkey = aspeed_des_setkey,
@ -846,10 +865,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
}
},
.alg.skcipher.op = {
.do_one_request = aspeed_crypto_do_request,
},
},
{
.alg.skcipher = {
.alg.skcipher.base = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@ -870,10 +892,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
}
},
.alg.skcipher.op = {
.do_one_request = aspeed_crypto_do_request,
},
},
{
.alg.skcipher = {
.alg.skcipher.base = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@ -894,10 +919,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
}
},
.alg.skcipher.op = {
.do_one_request = aspeed_crypto_do_request,
},
},
{
.alg.skcipher = {
.alg.skcipher.base = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@ -918,10 +946,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
}
},
.alg.skcipher.op = {
.do_one_request = aspeed_crypto_do_request,
},
},
{
.alg.skcipher = {
.alg.skcipher.base = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = aspeed_des_setkey,
@ -941,10 +972,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
}
},
.alg.skcipher.op = {
.do_one_request = aspeed_crypto_do_request,
},
},
{
.alg.skcipher = {
.alg.skcipher.base = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
@ -965,10 +999,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
}
},
.alg.skcipher.op = {
.do_one_request = aspeed_crypto_do_request,
},
},
{
.alg.skcipher = {
.alg.skcipher.base = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
@ -989,10 +1026,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
}
},
.alg.skcipher.op = {
.do_one_request = aspeed_crypto_do_request,
},
},
{
.alg.skcipher = {
.alg.skcipher.base = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
@ -1013,13 +1053,16 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
}
},
.alg.skcipher.op = {
.do_one_request = aspeed_crypto_do_request,
},
},
};
static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = {
{
.alg.skcipher = {
.alg.skcipher.base = {
.ivsize = AES_BLOCK_SIZE,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
@ -1039,10 +1082,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
}
},
.alg.skcipher.op = {
.do_one_request = aspeed_crypto_do_request,
},
},
{
.alg.skcipher = {
.alg.skcipher.base = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
@ -1062,10 +1108,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
}
},
.alg.skcipher.op = {
.do_one_request = aspeed_crypto_do_request,
},
},
{
.alg.skcipher = {
.alg.skcipher.base = {
.ivsize = DES_BLOCK_SIZE,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
@ -1085,7 +1134,10 @@ static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = {
.cra_alignmask = 0x0f,
.cra_module = THIS_MODULE,
}
}
},
.alg.skcipher.op = {
.do_one_request = aspeed_crypto_do_request,
},
},
};
@ -1095,13 +1147,13 @@ void aspeed_unregister_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
int i;
for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++)
crypto_unregister_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
crypto_engine_unregister_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
if (hace_dev->version != AST2600_VERSION)
return;
for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++)
crypto_unregister_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
crypto_engine_unregister_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
}
void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
@ -1112,10 +1164,10 @@ void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++) {
aspeed_crypto_algs[i].hace_dev = hace_dev;
rc = crypto_register_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
rc = crypto_engine_register_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
if (rc) {
CIPHER_DBG(hace_dev, "Failed to register %s\n",
aspeed_crypto_algs[i].alg.skcipher.base.cra_name);
aspeed_crypto_algs[i].alg.skcipher.base.base.cra_name);
}
}
@ -1124,10 +1176,10 @@ void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++) {
aspeed_crypto_algs_g6[i].hace_dev = hace_dev;
rc = crypto_register_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
rc = crypto_engine_register_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
if (rc) {
CIPHER_DBG(hace_dev, "Failed to register %s\n",
aspeed_crypto_algs_g6[i].alg.skcipher.base.cra_name);
aspeed_crypto_algs_g6[i].alg.skcipher.base.base.cra_name);
}
}
}

View File

@ -4,6 +4,17 @@
*/
#include "aspeed-hace.h"
#include <crypto/engine.h>
#include <crypto/hmac.h>
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/string.h>
#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
#define AHASH_DBG(h, fmt, ...) \
@ -48,28 +59,6 @@ static const __be64 sha512_iv[8] = {
cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7)
};
static const __be32 sha512_224_iv[16] = {
cpu_to_be32(0xC8373D8CUL), cpu_to_be32(0xA24D5419UL),
cpu_to_be32(0x6699E173UL), cpu_to_be32(0xD6D4DC89UL),
cpu_to_be32(0xAEB7FA1DUL), cpu_to_be32(0x829CFF32UL),
cpu_to_be32(0x14D59D67UL), cpu_to_be32(0xCF9F2F58UL),
cpu_to_be32(0x692B6D0FUL), cpu_to_be32(0xA84DD47BUL),
cpu_to_be32(0x736FE377UL), cpu_to_be32(0x4289C404UL),
cpu_to_be32(0xA8859D3FUL), cpu_to_be32(0xC8361D6AUL),
cpu_to_be32(0xADE61211UL), cpu_to_be32(0xA192D691UL)
};
static const __be32 sha512_256_iv[16] = {
cpu_to_be32(0x94213122UL), cpu_to_be32(0x2CF72BFCUL),
cpu_to_be32(0xA35F559FUL), cpu_to_be32(0xC2644CC8UL),
cpu_to_be32(0x6BB89323UL), cpu_to_be32(0x51B1536FUL),
cpu_to_be32(0x19773896UL), cpu_to_be32(0xBDEA4059UL),
cpu_to_be32(0xE23E2896UL), cpu_to_be32(0xE3FF8EA8UL),
cpu_to_be32(0x251E5EBEUL), cpu_to_be32(0x92398653UL),
cpu_to_be32(0xFC99012BUL), cpu_to_be32(0xAAB8852CUL),
cpu_to_be32(0xDC2DB70EUL), cpu_to_be32(0xA22CC581UL)
};
/* The purpose of this padding is to ensure that the padded message is a
* multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
* The bit "1" is appended at the end of the message followed by
@ -565,8 +554,8 @@ static int aspeed_ahash_do_request(struct crypto_engine *engine, void *areq)
return 0;
}
static int aspeed_ahash_prepare_request(struct crypto_engine *engine,
void *areq)
static void aspeed_ahash_prepare_request(struct crypto_engine *engine,
void *areq)
{
struct ahash_request *req = ahash_request_cast(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@ -581,8 +570,12 @@ static int aspeed_ahash_prepare_request(struct crypto_engine *engine,
hash_engine->dma_prepare = aspeed_ahash_dma_prepare_sg;
else
hash_engine->dma_prepare = aspeed_ahash_dma_prepare;
}
return 0;
static int aspeed_ahash_do_one(struct crypto_engine *engine, void *areq)
{
aspeed_ahash_prepare_request(engine, areq);
return aspeed_ahash_do_request(engine, areq);
}
static int aspeed_sham_update(struct ahash_request *req)
@ -750,62 +743,6 @@ static int aspeed_sham_init(struct ahash_request *req)
return 0;
}
static int aspeed_sha512s_init(struct ahash_request *req)
{
struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
struct aspeed_sha_hmac_ctx *bctx = tctx->base;
AHASH_DBG(hace_dev, "digest size: %d\n", crypto_ahash_digestsize(tfm));
rctx->cmd = HASH_CMD_ACC_MODE;
rctx->flags = 0;
switch (crypto_ahash_digestsize(tfm)) {
case SHA224_DIGEST_SIZE:
rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512_224 |
HASH_CMD_SHA_SWAP;
rctx->flags |= SHA_FLAGS_SHA512_224;
rctx->digsize = SHA224_DIGEST_SIZE;
rctx->block_size = SHA512_BLOCK_SIZE;
rctx->sha_iv = sha512_224_iv;
rctx->ivsize = 64;
memcpy(rctx->digest, sha512_224_iv, rctx->ivsize);
break;
case SHA256_DIGEST_SIZE:
rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512_256 |
HASH_CMD_SHA_SWAP;
rctx->flags |= SHA_FLAGS_SHA512_256;
rctx->digsize = SHA256_DIGEST_SIZE;
rctx->block_size = SHA512_BLOCK_SIZE;
rctx->sha_iv = sha512_256_iv;
rctx->ivsize = 64;
memcpy(rctx->digest, sha512_256_iv, rctx->ivsize);
break;
default:
dev_warn(tctx->hace_dev->dev, "digest size %d not support\n",
crypto_ahash_digestsize(tfm));
return -EINVAL;
}
rctx->bufcnt = 0;
rctx->total = 0;
rctx->digcnt[0] = 0;
rctx->digcnt[1] = 0;
/* HMAC init */
if (tctx->flags & SHA_FLAGS_HMAC) {
rctx->digcnt[0] = rctx->block_size;
rctx->bufcnt = rctx->block_size;
memcpy(rctx->buffer, bctx->ipad, rctx->block_size);
rctx->flags |= SHA_FLAGS_HMAC;
}
return 0;
}
static int aspeed_sham_digest(struct ahash_request *req)
{
return aspeed_sham_init(req) ? : aspeed_sham_finup(req);
@ -854,7 +791,7 @@ static int aspeed_sham_cra_init(struct crypto_tfm *tfm)
struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
struct aspeed_hace_alg *ast_alg;
ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash);
ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash.base);
tctx->hace_dev = ast_alg->hace_dev;
tctx->flags = 0;
@ -876,10 +813,6 @@ static int aspeed_sham_cra_init(struct crypto_tfm *tfm)
}
}
tctx->enginectx.op.do_one_request = aspeed_ahash_do_request;
tctx->enginectx.op.prepare_request = aspeed_ahash_prepare_request;
tctx->enginectx.op.unprepare_request = NULL;
return 0;
}
@ -917,7 +850,7 @@ static int aspeed_sham_import(struct ahash_request *req, const void *in)
static struct aspeed_hace_alg aspeed_ahash_algs[] = {
{
.alg.ahash = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@ -944,9 +877,12 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
}
}
},
.alg.ahash.op = {
.do_one_request = aspeed_ahash_do_one,
},
},
{
.alg.ahash = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@ -973,9 +909,12 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
}
}
},
.alg.ahash.op = {
.do_one_request = aspeed_ahash_do_one,
},
},
{
.alg.ahash = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@ -1002,10 +941,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
}
}
},
.alg.ahash.op = {
.do_one_request = aspeed_ahash_do_one,
},
},
{
.alg_base = "sha1",
.alg.ahash = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@ -1034,10 +976,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
}
}
},
.alg.ahash.op = {
.do_one_request = aspeed_ahash_do_one,
},
},
{
.alg_base = "sha224",
.alg.ahash = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@ -1066,10 +1011,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
}
}
},
.alg.ahash.op = {
.do_one_request = aspeed_ahash_do_one,
},
},
{
.alg_base = "sha256",
.alg.ahash = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@ -1098,12 +1046,15 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
}
}
},
.alg.ahash.op = {
.do_one_request = aspeed_ahash_do_one,
},
},
};
static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
{
.alg.ahash = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@ -1130,9 +1081,12 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
}
}
},
.alg.ahash.op = {
.do_one_request = aspeed_ahash_do_one,
},
},
{
.alg.ahash = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@ -1159,68 +1113,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
}
}
},
},
{
.alg.ahash = {
.init = aspeed_sha512s_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
.finup = aspeed_sham_finup,
.digest = aspeed_sham_digest,
.export = aspeed_sham_export,
.import = aspeed_sham_import,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct aspeed_sham_reqctx),
.base = {
.cra_name = "sha512_224",
.cra_driver_name = "aspeed-sha512_224",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aspeed_sham_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = aspeed_sham_cra_init,
.cra_exit = aspeed_sham_cra_exit,
}
}
},
},
{
.alg.ahash = {
.init = aspeed_sha512s_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
.finup = aspeed_sham_finup,
.digest = aspeed_sham_digest,
.export = aspeed_sham_export,
.import = aspeed_sham_import,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct aspeed_sham_reqctx),
.base = {
.cra_name = "sha512_256",
.cra_driver_name = "aspeed-sha512_256",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aspeed_sham_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = aspeed_sham_cra_init,
.cra_exit = aspeed_sham_cra_exit,
}
}
.alg.ahash.op = {
.do_one_request = aspeed_ahash_do_one,
},
},
{
.alg_base = "sha384",
.alg.ahash = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@ -1249,10 +1148,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
}
}
},
.alg.ahash.op = {
.do_one_request = aspeed_ahash_do_one,
},
},
{
.alg_base = "sha512",
.alg.ahash = {
.alg.ahash.base = {
.init = aspeed_sham_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
@ -1281,69 +1183,8 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
}
}
},
},
{
.alg_base = "sha512_224",
.alg.ahash = {
.init = aspeed_sha512s_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
.finup = aspeed_sham_finup,
.digest = aspeed_sham_digest,
.setkey = aspeed_sham_setkey,
.export = aspeed_sham_export,
.import = aspeed_sham_import,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = sizeof(struct aspeed_sham_reqctx),
.base = {
.cra_name = "hmac(sha512_224)",
.cra_driver_name = "aspeed-hmac-sha512_224",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
sizeof(struct aspeed_sha_hmac_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = aspeed_sham_cra_init,
.cra_exit = aspeed_sham_cra_exit,
}
}
},
},
{
.alg_base = "sha512_256",
.alg.ahash = {
.init = aspeed_sha512s_init,
.update = aspeed_sham_update,
.final = aspeed_sham_final,
.finup = aspeed_sham_finup,
.digest = aspeed_sham_digest,
.setkey = aspeed_sham_setkey,
.export = aspeed_sham_export,
.import = aspeed_sham_import,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
.statesize = sizeof(struct aspeed_sham_reqctx),
.base = {
.cra_name = "hmac(sha512_256)",
.cra_driver_name = "aspeed-hmac-sha512_256",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aspeed_sham_ctx) +
sizeof(struct aspeed_sha_hmac_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = aspeed_sham_cra_init,
.cra_exit = aspeed_sham_cra_exit,
}
}
.alg.ahash.op = {
.do_one_request = aspeed_ahash_do_one,
},
},
};
@ -1353,13 +1194,13 @@ void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
int i;
for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++)
crypto_unregister_ahash(&aspeed_ahash_algs[i].alg.ahash);
crypto_engine_unregister_ahash(&aspeed_ahash_algs[i].alg.ahash);
if (hace_dev->version != AST2600_VERSION)
return;
for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++)
crypto_unregister_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
crypto_engine_unregister_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
}
void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
@ -1370,10 +1211,10 @@ void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++) {
aspeed_ahash_algs[i].hace_dev = hace_dev;
rc = crypto_register_ahash(&aspeed_ahash_algs[i].alg.ahash);
rc = crypto_engine_register_ahash(&aspeed_ahash_algs[i].alg.ahash);
if (rc) {
AHASH_DBG(hace_dev, "Failed to register %s\n",
aspeed_ahash_algs[i].alg.ahash.halg.base.cra_name);
aspeed_ahash_algs[i].alg.ahash.base.halg.base.cra_name);
}
}
@ -1382,10 +1223,10 @@ void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++) {
aspeed_ahash_algs_g6[i].hace_dev = hace_dev;
rc = crypto_register_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
rc = crypto_engine_register_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
if (rc) {
AHASH_DBG(hace_dev, "Failed to register %s\n",
aspeed_ahash_algs_g6[i].alg.ahash.halg.base.cra_name);
aspeed_ahash_algs_g6[i].alg.ahash.base.halg.base.cra_name);
}
}
}

View File

@ -3,7 +3,14 @@
* Copyright (c) 2021 Aspeed Technology Inc.
*/
#include "aspeed-hace.h"
#include <crypto/engine.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
@ -11,8 +18,6 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include "aspeed-hace.h"
#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
#define HACE_DBG(d, fmt, ...) \
dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)

View File

@ -2,25 +2,14 @@
#ifndef __ASPEED_HACE_H__
#define __ASPEED_HACE_H__
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/fips.h>
#include <linux/dma-mapping.h>
#include <crypto/aes.h>
#include <crypto/des.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/des.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/kpp.h>
#include <crypto/internal/skcipher.h>
#include <crypto/algapi.h>
#include <crypto/engine.h>
#include <crypto/hmac.h>
#include <crypto/sha1.h>
#include <crypto/hash.h>
#include <crypto/sha2.h>
#include <linux/bits.h>
#include <linux/compiler_attributes.h>
#include <linux/interrupt.h>
#include <linux/types.h>
/*****************************
* *
@ -144,6 +133,7 @@
HACE_CMD_OFB | HACE_CMD_CTR)
struct aspeed_hace_dev;
struct scatterlist;
typedef int (*aspeed_hace_fn_t)(struct aspeed_hace_dev *);
@ -178,8 +168,6 @@ struct aspeed_sha_hmac_ctx {
};
struct aspeed_sham_ctx {
struct crypto_engine_ctx enginectx;
struct aspeed_hace_dev *hace_dev;
unsigned long flags; /* hmac flag */
@ -235,8 +223,6 @@ struct aspeed_engine_crypto {
};
struct aspeed_cipher_ctx {
struct crypto_engine_ctx enginectx;
struct aspeed_hace_dev *hace_dev;
int key_len;
u8 key[AES_MAX_KEYLENGTH];
@ -275,8 +261,8 @@ struct aspeed_hace_alg {
const char *alg_base;
union {
struct skcipher_alg skcipher;
struct ahash_alg ahash;
struct skcipher_engine_alg skcipher;
struct ahash_engine_alg ahash;
} alg;
};

View File

@ -28,7 +28,7 @@
#include <linux/irq.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/of_device.h>
#include <linux/mod_devicetable.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <crypto/scatterwalk.h>
@ -2533,13 +2533,11 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
}
}
#if defined(CONFIG_OF)
static const struct of_device_id atmel_aes_dt_ids[] = {
{ .compatible = "atmel,at91sam9g46-aes" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
#endif
static int atmel_aes_probe(struct platform_device *pdev)
{
@ -2566,11 +2564,9 @@ static int atmel_aes_probe(struct platform_device *pdev)
crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
/* Get the base address */
aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!aes_res) {
dev_err(dev, "no MEM resource info\n");
err = -ENODEV;
aes_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &aes_res);
if (IS_ERR(aes_dd->io_base)) {
err = PTR_ERR(aes_dd->io_base);
goto err_tasklet_kill;
}
aes_dd->phys_base = aes_res->start;
@ -2597,13 +2593,6 @@ static int atmel_aes_probe(struct platform_device *pdev)
goto err_tasklet_kill;
}
aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
if (IS_ERR(aes_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
err = PTR_ERR(aes_dd->io_base);
goto err_tasklet_kill;
}
err = clk_prepare(aes_dd->iclk);
if (err)
goto err_tasklet_kill;
@ -2687,7 +2676,7 @@ static struct platform_driver atmel_aes_driver = {
.remove = atmel_aes_remove,
.driver = {
.name = "atmel_aes",
.of_match_table = of_match_ptr(atmel_aes_dt_ids),
.of_match_table = atmel_aes_dt_ids,
},
};

View File

@ -14,7 +14,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/workqueue.h>

View File

@ -28,7 +28,7 @@
#include <linux/irq.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/of_device.h>
#include <linux/mod_devicetable.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <crypto/scatterwalk.h>
@ -1770,7 +1770,8 @@ static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd)
size_t bs = ctx->block_size;
size_t i, num_words = bs / sizeof(u32);
memcpy(hmac->opad, hmac->ipad, bs);
unsafe_memcpy(hmac->opad, hmac->ipad, bs,
"fortified memcpy causes -Wrestrict warning");
for (i = 0; i < num_words; ++i) {
hmac->ipad[i] ^= 0x36363636;
hmac->opad[i] ^= 0x5c5c5c5c;
@ -2499,8 +2500,8 @@ static int atmel_sha_dma_init(struct atmel_sha_dev *dd)
{
dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
if (IS_ERR(dd->dma_lch_in.chan)) {
dev_err(dd->dev, "DMA channel is not available\n");
return PTR_ERR(dd->dma_lch_in.chan);
return dev_err_probe(dd->dev, PTR_ERR(dd->dma_lch_in.chan),
"DMA channel is not available\n");
}
dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
@ -2570,14 +2571,12 @@ static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
}
}
#if defined(CONFIG_OF)
static const struct of_device_id atmel_sha_dt_ids[] = {
{ .compatible = "atmel,at91sam9g46-sha" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
#endif
static int atmel_sha_probe(struct platform_device *pdev)
{
@ -2604,11 +2603,9 @@ static int atmel_sha_probe(struct platform_device *pdev)
crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
/* Get the base address */
sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!sha_res) {
dev_err(dev, "no MEM resource info\n");
err = -ENODEV;
sha_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &sha_res);
if (IS_ERR(sha_dd->io_base)) {
err = PTR_ERR(sha_dd->io_base);
goto err_tasklet_kill;
}
sha_dd->phys_base = sha_res->start;
@ -2635,13 +2632,6 @@ static int atmel_sha_probe(struct platform_device *pdev)
goto err_tasklet_kill;
}
sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
if (IS_ERR(sha_dd->io_base)) {
dev_err(dev, "can't ioremap\n");
err = PTR_ERR(sha_dd->io_base);
goto err_tasklet_kill;
}
err = clk_prepare(sha_dd->iclk);
if (err)
goto err_tasklet_kill;
@ -2716,7 +2706,7 @@ static struct platform_driver atmel_sha_driver = {
.remove = atmel_sha_remove,
.driver = {
.name = "atmel_sha",
.of_match_table = of_match_ptr(atmel_sha_dt_ids),
.of_match_table = atmel_sha_dt_ids,
},
};

View File

@ -28,7 +28,7 @@
#include <linux/irq.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/of_device.h>
#include <linux/mod_devicetable.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <crypto/scatterwalk.h>
@ -1139,13 +1139,11 @@ static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
}
}
#if defined(CONFIG_OF)
static const struct of_device_id atmel_tdes_dt_ids[] = {
{ .compatible = "atmel,at91sam9g46-tdes" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids);
#endif
static int atmel_tdes_probe(struct platform_device *pdev)
{
@ -1172,11 +1170,9 @@ static int atmel_tdes_probe(struct platform_device *pdev)
crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH);
/* Get the base address */
tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!tdes_res) {
dev_err(dev, "no MEM resource info\n");
err = -ENODEV;
tdes_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &tdes_res);
if (IS_ERR(tdes_dd->io_base)) {
err = PTR_ERR(tdes_dd->io_base);
goto err_tasklet_kill;
}
tdes_dd->phys_base = tdes_res->start;
@ -1203,12 +1199,6 @@ static int atmel_tdes_probe(struct platform_device *pdev)
goto err_tasklet_kill;
}
tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
if (IS_ERR(tdes_dd->io_base)) {
err = PTR_ERR(tdes_dd->io_base);
goto err_tasklet_kill;
}
err = atmel_tdes_hw_version_init(tdes_dd);
if (err)
goto err_tasklet_kill;
@ -1282,7 +1272,7 @@ static struct platform_driver atmel_tdes_driver = {
.remove = atmel_tdes_remove,
.driver = {
.name = "atmel_tdes",
.of_match_table = of_match_ptr(atmel_tdes_dt_ids),
.of_match_table = atmel_tdes_dt_ids,
},
};

View File

@ -15,8 +15,7 @@
#include <linux/kthread.h>
#include <linux/rtnetlink.h>
#include <linux/sched.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/bitops.h>
@ -2397,7 +2396,8 @@ static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
memset(ctx->ipad + ctx->authkeylen, 0,
blocksize - ctx->authkeylen);
ctx->authkeylen = 0;
memcpy(ctx->opad, ctx->ipad, blocksize);
unsafe_memcpy(ctx->opad, ctx->ipad, blocksize,
"fortified memcpy causes -Wrestrict warning");
for (index = 0; index < blocksize; index++) {
ctx->ipad[index] ^= HMAC_IPAD_VALUE;

File diff suppressed because it is too large Load Diff

View File

@ -65,9 +65,13 @@
#include "sg_sw_sec4.h"
#include "key_gen.h"
#include "caamhash_desc.h"
#include <crypto/engine.h>
#include <crypto/internal/engine.h>
#include <crypto/internal/hash.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#define CAAM_CRA_PRIORITY 3000
@ -89,7 +93,6 @@ static struct list_head hash_list;
/* ahash per-session context */
struct caam_hash_ctx {
struct crypto_engine_ctx enginectx;
u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
@ -368,10 +371,8 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
int ret;
desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL);
if (!desc) {
dev_err(jrdev, "unable to allocate key input memory\n");
if (!desc)
return -ENOMEM;
}
init_job_desc(desc, 0);
@ -702,19 +703,14 @@ static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
int sg_num, u32 *sh_desc,
dma_addr_t sh_desc_dma)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
struct caam_hash_state *state = ahash_request_ctx_dma(req);
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
struct ahash_edesc *edesc;
unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
edesc = kzalloc(sizeof(*edesc) + sg_size, flags);
if (!edesc) {
dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags);
if (!edesc)
return NULL;
}
state->edesc = edesc;
@ -1757,7 +1753,7 @@ static struct caam_hash_template driver_hash[] = {
struct caam_hash_alg {
struct list_head entry;
int alg_type;
struct ahash_alg ahash_alg;
struct ahash_engine_alg ahash_alg;
};
static int caam_hash_cra_init(struct crypto_tfm *tfm)
@ -1769,7 +1765,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
struct ahash_alg *alg =
container_of(halg, struct ahash_alg, halg);
struct caam_hash_alg *caam_hash =
container_of(alg, struct caam_hash_alg, ahash_alg);
container_of(alg, struct caam_hash_alg, ahash_alg.base);
struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
/* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
@ -1860,8 +1856,6 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
sh_desc_digest) -
sh_desc_update_offset;
ctx->enginectx.op.do_one_request = ahash_do_one_req;
crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
/*
@ -1894,7 +1888,7 @@ void caam_algapi_hash_exit(void)
return;
list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
crypto_unregister_ahash(&t_alg->ahash_alg);
crypto_engine_unregister_ahash(&t_alg->ahash_alg);
list_del(&t_alg->entry);
kfree(t_alg);
}
@ -1909,13 +1903,11 @@ caam_hash_alloc(struct caam_hash_template *template,
struct crypto_alg *alg;
t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
if (!t_alg) {
pr_err("failed to allocate t_alg\n");
if (!t_alg)
return ERR_PTR(-ENOMEM);
}
t_alg->ahash_alg = template->template_ahash;
halg = &t_alg->ahash_alg;
t_alg->ahash_alg.base = template->template_ahash;
halg = &t_alg->ahash_alg.base;
alg = &halg->halg.base;
if (keyed) {
@ -1928,7 +1920,7 @@ caam_hash_alloc(struct caam_hash_template *template,
template->name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->driver_name);
t_alg->ahash_alg.setkey = NULL;
halg->setkey = NULL;
}
alg->cra_module = THIS_MODULE;
alg->cra_init = caam_hash_cra_init;
@ -1940,6 +1932,7 @@ caam_hash_alloc(struct caam_hash_template *template,
alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
t_alg->alg_type = template->alg_type;
t_alg->ahash_alg.op.do_one_request = ahash_do_one_req;
return t_alg;
}
@ -2001,10 +1994,10 @@ int caam_algapi_hash_init(struct device *ctrldev)
continue;
}
err = crypto_register_ahash(&t_alg->ahash_alg);
err = crypto_engine_register_ahash(&t_alg->ahash_alg);
if (err) {
pr_warn("%s alg registration failed: %d\n",
t_alg->ahash_alg.halg.base.cra_driver_name,
t_alg->ahash_alg.base.halg.base.cra_driver_name,
err);
kfree(t_alg);
} else
@ -2021,10 +2014,10 @@ int caam_algapi_hash_init(struct device *ctrldev)
continue;
}
err = crypto_register_ahash(&t_alg->ahash_alg);
err = crypto_engine_register_ahash(&t_alg->ahash_alg);
if (err) {
pr_warn("%s alg registration failed: %d\n",
t_alg->ahash_alg.halg.base.cra_driver_name,
t_alg->ahash_alg.base.halg.base.cra_driver_name,
err);
kfree(t_alg);
} else

View File

@ -16,8 +16,12 @@
#include "desc_constr.h"
#include "sg_sw_sec4.h"
#include "caampkc.h"
#include <crypto/internal/engine.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
@ -38,7 +42,7 @@ static u8 *zero_buffer;
static bool init_done;
struct caam_akcipher_alg {
struct akcipher_alg akcipher;
struct akcipher_engine_alg akcipher;
bool registered;
};
@ -225,7 +229,9 @@ static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
if (len && *buff)
break;
sg_miter_next(&miter);
if (!sg_miter_next(&miter))
break;
buff = miter.addr;
len = miter.length;
@ -1121,8 +1127,6 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
return -ENOMEM;
}
ctx->enginectx.op.do_one_request = akcipher_do_one_req;
return 0;
}
@ -1139,7 +1143,7 @@ static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
}
static struct caam_akcipher_alg caam_rsa = {
.akcipher = {
.akcipher.base = {
.encrypt = caam_rsa_enc,
.decrypt = caam_rsa_dec,
.set_pub_key = caam_rsa_set_pub_key,
@ -1155,7 +1159,10 @@ static struct caam_akcipher_alg caam_rsa = {
.cra_ctxsize = sizeof(struct caam_rsa_ctx) +
CRYPTO_DMA_PADDING,
},
}
},
.akcipher.op = {
.do_one_request = akcipher_do_one_req,
},
};
/* Public Key Cryptography module initialization handler */
@ -1193,12 +1200,12 @@ int caam_pkc_init(struct device *ctrldev)
if (!zero_buffer)
return -ENOMEM;
err = crypto_register_akcipher(&caam_rsa.akcipher);
err = crypto_engine_register_akcipher(&caam_rsa.akcipher);
if (err) {
kfree(zero_buffer);
dev_warn(ctrldev, "%s alg registration failed\n",
caam_rsa.akcipher.base.cra_driver_name);
caam_rsa.akcipher.base.base.cra_driver_name);
} else {
init_done = true;
caam_rsa.registered = true;
@ -1214,7 +1221,7 @@ void caam_pkc_exit(void)
return;
if (caam_rsa.registered)
crypto_unregister_akcipher(&caam_rsa.akcipher);
crypto_engine_unregister_akcipher(&caam_rsa.akcipher);
kfree(zero_buffer);
}

View File

@ -12,7 +12,6 @@
#define _PKC_DESC_H_
#include "compat.h"
#include "pdb.h"
#include <crypto/engine.h>
/**
* caam_priv_key_form - CAAM RSA private key representation
@ -88,13 +87,11 @@ struct caam_rsa_key {
/**
* caam_rsa_ctx - per session context.
* @enginectx : crypto engine context
* @key : RSA key in DMA zone
* @dev : device structure
* @padding_dma : dma address of padding, for adding it to the input
*/
struct caam_rsa_ctx {
struct crypto_engine_ctx enginectx;
struct caam_rsa_key key;
struct device *dev;
dma_addr_t padding_dma;

View File

@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/sys_soc.h>
#include <linux/fsl/mc.h>
@ -740,6 +741,109 @@ static int caam_ctrl_rng_init(struct device *dev)
return 0;
}
/* Indicate if the internal state of the CAAM is lost during PM */
static int caam_off_during_pm(void)
{
bool not_off_during_pm = of_machine_is_compatible("fsl,imx6q") ||
of_machine_is_compatible("fsl,imx6qp") ||
of_machine_is_compatible("fsl,imx6dl");
return not_off_during_pm ? 0 : 1;
}
static void caam_state_save(struct device *dev)
{
struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
struct caam_ctl_state *state = &ctrlpriv->state;
struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
u32 deco_inst, jr_inst;
int i;
state->mcr = rd_reg32(&ctrl->mcr);
state->scfgr = rd_reg32(&ctrl->scfgr);
deco_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
CHA_ID_MS_DECO_MASK) >> CHA_ID_MS_DECO_SHIFT;
for (i = 0; i < deco_inst; i++) {
state->deco_mid[i].liodn_ms =
rd_reg32(&ctrl->deco_mid[i].liodn_ms);
state->deco_mid[i].liodn_ls =
rd_reg32(&ctrl->deco_mid[i].liodn_ls);
}
jr_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
CHA_ID_MS_JR_MASK) >> CHA_ID_MS_JR_SHIFT;
for (i = 0; i < jr_inst; i++) {
state->jr_mid[i].liodn_ms =
rd_reg32(&ctrl->jr_mid[i].liodn_ms);
state->jr_mid[i].liodn_ls =
rd_reg32(&ctrl->jr_mid[i].liodn_ls);
}
}
static void caam_state_restore(const struct device *dev)
{
const struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
const struct caam_ctl_state *state = &ctrlpriv->state;
struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
u32 deco_inst, jr_inst;
int i;
wr_reg32(&ctrl->mcr, state->mcr);
wr_reg32(&ctrl->scfgr, state->scfgr);
deco_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
CHA_ID_MS_DECO_MASK) >> CHA_ID_MS_DECO_SHIFT;
for (i = 0; i < deco_inst; i++) {
wr_reg32(&ctrl->deco_mid[i].liodn_ms,
state->deco_mid[i].liodn_ms);
wr_reg32(&ctrl->deco_mid[i].liodn_ls,
state->deco_mid[i].liodn_ls);
}
jr_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
CHA_ID_MS_JR_MASK) >> CHA_ID_MS_JR_SHIFT;
for (i = 0; i < jr_inst; i++) {
wr_reg32(&ctrl->jr_mid[i].liodn_ms,
state->jr_mid[i].liodn_ms);
wr_reg32(&ctrl->jr_mid[i].liodn_ls,
state->jr_mid[i].liodn_ls);
}
if (ctrlpriv->virt_en == 1)
clrsetbits_32(&ctrl->jrstart, 0, JRSTART_JR0_START |
JRSTART_JR1_START | JRSTART_JR2_START |
JRSTART_JR3_START);
}
static int caam_ctrl_suspend(struct device *dev)
{
const struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
if (ctrlpriv->caam_off_during_pm && !ctrlpriv->optee_en)
caam_state_save(dev);
return 0;
}
static int caam_ctrl_resume(struct device *dev)
{
struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
int ret = 0;
if (ctrlpriv->caam_off_during_pm && !ctrlpriv->optee_en) {
caam_state_restore(dev);
/* HW and rng will be reset so deinstantiation can be removed */
devm_remove_action(dev, devm_deinstantiate_rng, dev);
ret = caam_ctrl_rng_init(dev);
}
return ret;
}
static DEFINE_SIMPLE_DEV_PM_OPS(caam_ctrl_pm_ops, caam_ctrl_suspend, caam_ctrl_resume);
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
@ -771,6 +875,8 @@ static int caam_probe(struct platform_device *pdev)
caam_imx = (bool)imx_soc_match;
ctrlpriv->caam_off_during_pm = caam_imx && caam_off_during_pm();
if (imx_soc_match) {
/*
* Until Layerscape and i.MX OP-TEE get in sync,
@ -1033,6 +1139,7 @@ static struct platform_driver caam_driver = {
.driver = {
.name = "caam",
.of_match_table = caam_match,
.pm = pm_ptr(&caam_ctrl_pm_ops),
},
.probe = caam_probe,
};

View File

@ -4,7 +4,7 @@
* Private/internal definitions between modules
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
* Copyright 2019 NXP
* Copyright 2019, 2023 NXP
*/
#ifndef INTERN_H
@ -47,6 +47,16 @@ struct caam_jrentry_info {
u32 desc_size; /* Stored size for postprocessing, header derived */
};
struct caam_jr_state {
dma_addr_t inpbusaddr;
dma_addr_t outbusaddr;
};
struct caam_jr_dequeue_params {
struct device *dev;
int enable_itr;
};
/* Private sub-storage for a single JobR */
struct caam_drv_private_jr {
struct list_head list_node; /* Job Ring device list */
@ -54,6 +64,7 @@ struct caam_drv_private_jr {
int ridx;
struct caam_job_ring __iomem *rregs; /* JobR's register space */
struct tasklet_struct irqtask;
struct caam_jr_dequeue_params tasklet_params;
int irq; /* One per queue */
bool hwrng;
@ -71,6 +82,15 @@ struct caam_drv_private_jr {
int tail; /* entinfo (s/w ring) tail index */
void *outring; /* Base of output ring, DMA-safe */
struct crypto_engine *engine;
struct caam_jr_state state; /* State of the JR during PM */
};
struct caam_ctl_state {
struct masterid deco_mid[16];
struct masterid jr_mid[4];
u32 mcr;
u32 scfgr;
};
/*
@ -116,6 +136,9 @@ struct caam_drv_private {
struct dentry *ctl; /* controller dir */
struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
#endif
int caam_off_during_pm; /* If the CAAM is reset after suspend */
struct caam_ctl_state state; /* State of the CTL during PM */
};
#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API

View File

@ -9,6 +9,7 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include "compat.h"
#include "ctrl.h"
@ -117,6 +118,23 @@ static int caam_jr_flush(struct device *dev)
return caam_jr_stop_processing(dev, JRCR_RESET);
}
/* The resume can be used after a park or a flush if CAAM has not been reset */
static int caam_jr_restart_processing(struct device *dev)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
u32 halt_status = rd_reg32(&jrp->rregs->jrintstatus) &
JRINT_ERR_HALT_MASK;
/* Check that the flush/park is completed */
if (halt_status != JRINT_ERR_HALT_COMPLETE)
return -1;
/* Resume processing of jobs */
clrsetbits_32(&jrp->rregs->jrintstatus, 0, JRINT_ERR_HALT_COMPLETE);
return 0;
}
static int caam_reset_hw_jr(struct device *dev)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
@ -215,7 +233,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
* tasklet if jobs done.
*/
irqstate = rd_reg32(&jrp->rregs->jrintstatus);
if (!irqstate)
if (!(irqstate & JRINT_JR_INT))
return IRQ_NONE;
/*
@ -245,7 +263,8 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
static void caam_jr_dequeue(unsigned long devarg)
{
int hw_idx, sw_idx, i, head, tail;
struct device *dev = (struct device *)devarg;
struct caam_jr_dequeue_params *params = (void *)devarg;
struct device *dev = params->dev;
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
u32 *userdesc, userstatus;
@ -319,8 +338,9 @@ static void caam_jr_dequeue(unsigned long devarg)
outring_used--;
}
/* reenable / unmask IRQs */
clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
if (params->enable_itr)
/* reenable / unmask IRQs */
clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
}
/**
@ -445,8 +465,16 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
* Guarantee that the descriptor's DMA address has been written to
* the next slot in the ring before the write index is updated, since
* other cores may update this index independently.
*
* Under heavy DDR load, smp_wmb() or dma_wmb() fail to make the input
* ring be updated before the CAAM starts reading it. So, CAAM will
* process, again, an old descriptor address and will put it in the
* output ring. This will make caam_jr_dequeue() to fail, since this
* old descriptor is not in the software ring.
* To fix this, use wmb() which works on the full system instead of
* inner/outer shareable domains.
*/
smp_wmb();
wmb();
jrp->head = (head + 1) & (JOBR_DEPTH - 1);
@ -470,6 +498,29 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
}
EXPORT_SYMBOL(caam_jr_enqueue);
static void caam_jr_init_hw(struct device *dev, dma_addr_t inpbusaddr,
dma_addr_t outbusaddr)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
wr_reg64(&jrp->rregs->outring_base, outbusaddr);
wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
/* Select interrupt coalescing parameters */
clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
(JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
(JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
}
static void caam_jr_reset_index(struct caam_drv_private_jr *jrp)
{
jrp->out_ring_read_index = 0;
jrp->head = 0;
jrp->tail = 0;
}
/*
* Init JobR independent of platform property detection
*/
@ -506,25 +557,16 @@ static int caam_jr_init(struct device *dev)
jrp->entinfo[i].desc_addr_dma = !0;
/* Setup rings */
jrp->out_ring_read_index = 0;
jrp->head = 0;
jrp->tail = 0;
wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
wr_reg64(&jrp->rregs->outring_base, outbusaddr);
wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
caam_jr_reset_index(jrp);
jrp->inpring_avail = JOBR_DEPTH;
caam_jr_init_hw(dev, inpbusaddr, outbusaddr);
spin_lock_init(&jrp->inplock);
/* Select interrupt coalescing parameters */
clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
(JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
(JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
jrp->tasklet_params.dev = dev;
jrp->tasklet_params.enable_itr = 1;
tasklet_init(&jrp->irqtask, caam_jr_dequeue,
(unsigned long)&jrp->tasklet_params);
/* Connect job ring interrupt handler. */
error = devm_request_irq(dev, jrp->irq, caam_jr_interrupt, IRQF_SHARED,
@ -635,11 +677,134 @@ static int caam_jr_probe(struct platform_device *pdev)
atomic_set(&jrpriv->tfm_count, 0);
device_init_wakeup(&pdev->dev, 1);
device_set_wakeup_enable(&pdev->dev, false);
register_algs(jrpriv, jrdev->parent);
return 0;
}
static void caam_jr_get_hw_state(struct device *dev)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
jrp->state.inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
jrp->state.outbusaddr = rd_reg64(&jrp->rregs->outring_base);
}
static int caam_jr_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct caam_drv_private_jr *jrpriv = platform_get_drvdata(pdev);
struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev->parent);
struct caam_jr_dequeue_params suspend_params = {
.dev = dev,
.enable_itr = 0,
};
/* Remove the node from Physical JobR list maintained by driver */
spin_lock(&driver_data.jr_alloc_lock);
list_del(&jrpriv->list_node);
spin_unlock(&driver_data.jr_alloc_lock);
if (jrpriv->hwrng)
caam_rng_exit(dev->parent);
if (ctrlpriv->caam_off_during_pm) {
int err;
tasklet_disable(&jrpriv->irqtask);
/* mask itr to call flush */
clrsetbits_32(&jrpriv->rregs->rconfig_lo, 0, JRCFG_IMSK);
/* Invalid job in process */
err = caam_jr_flush(dev);
if (err) {
dev_err(dev, "Failed to flush\n");
return err;
}
/* Dequeing jobs flushed */
caam_jr_dequeue((unsigned long)&suspend_params);
/* Save state */
caam_jr_get_hw_state(dev);
} else if (device_may_wakeup(&pdev->dev)) {
enable_irq_wake(jrpriv->irq);
}
return 0;
}
static int caam_jr_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct caam_drv_private_jr *jrpriv = platform_get_drvdata(pdev);
struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev->parent);
if (ctrlpriv->caam_off_during_pm) {
u64 inp_addr;
int err;
/*
* Check if the CAAM has been resetted checking the address of
* the input ring
*/
inp_addr = rd_reg64(&jrpriv->rregs->inpring_base);
if (inp_addr != 0) {
/* JR still has some configuration */
if (inp_addr == jrpriv->state.inpbusaddr) {
/* JR has not been resetted */
err = caam_jr_restart_processing(dev);
if (err) {
dev_err(dev,
"Restart processing failed\n");
return err;
}
tasklet_enable(&jrpriv->irqtask);
clrsetbits_32(&jrpriv->rregs->rconfig_lo,
JRCFG_IMSK, 0);
goto add_jr;
} else if (ctrlpriv->optee_en) {
/* JR has been used by OPTEE, reset it */
err = caam_reset_hw_jr(dev);
if (err) {
dev_err(dev, "Failed to reset JR\n");
return err;
}
} else {
/* No explanation, return error */
return -EIO;
}
}
caam_jr_reset_index(jrpriv);
caam_jr_init_hw(dev, jrpriv->state.inpbusaddr,
jrpriv->state.outbusaddr);
tasklet_enable(&jrpriv->irqtask);
} else if (device_may_wakeup(&pdev->dev)) {
disable_irq_wake(jrpriv->irq);
}
add_jr:
spin_lock(&driver_data.jr_alloc_lock);
list_add_tail(&jrpriv->list_node, &driver_data.jr_list);
spin_unlock(&driver_data.jr_alloc_lock);
if (jrpriv->hwrng)
jrpriv->hwrng = !caam_rng_init(dev->parent);
return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(caam_jr_pm_ops, caam_jr_suspend, caam_jr_resume);
static const struct of_device_id caam_jr_match[] = {
{
.compatible = "fsl,sec-v4.0-job-ring",
@ -655,6 +820,7 @@ static struct platform_driver caam_jr_driver = {
.driver = {
.name = "caam_jr",
.of_match_table = caam_jr_match,
.pm = pm_ptr(&caam_jr_pm_ops),
},
.probe = caam_jr_probe,
.remove = caam_jr_remove,

View File

@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <soc/fsl/qman.h>

View File

@ -459,12 +459,6 @@ struct masterid {
u32 liodn_ls; /* LIODN for non-sequence and seq access */
};
/* Partition ID for DMA configuration */
struct partid {
u32 rsvd1;
u32 pidr; /* partition ID, DECO */
};
/* RNGB test mode (replicated twice in some configurations) */
/* Padded out to 0x100 */
struct rngtst {
@ -590,8 +584,7 @@ struct caam_ctrl {
u32 deco_rsr; /* DECORSR - Deco Request Source */
u32 rsvd11;
u32 deco_rq; /* DECORR - DECO Request */
struct partid deco_mid[5]; /* DECOxLIODNR - 1 per DECO */
u32 rsvd5[22];
struct masterid deco_mid[16]; /* DECOxLIODNR - 1 per DECO */
/* DECO Availability/Reset Section 120-3ff */
u32 deco_avail; /* DAR - DECO availability */

View File

@ -11,7 +11,8 @@ ccp-$(CONFIG_PCI) += sp-pci.o
ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \
sev-dev.o \
tee-dev.o \
platform-access.o
platform-access.o \
dbc.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \

250
drivers/crypto/ccp/dbc.c Normal file
View File

@ -0,0 +1,250 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* AMD Secure Processor Dynamic Boost Control interface
*
* Copyright (C) 2023 Advanced Micro Devices, Inc.
*
* Author: Mario Limonciello <mario.limonciello@amd.com>
*/
#include "dbc.h"
struct error_map {
u32 psp;
int ret;
};
#define DBC_ERROR_ACCESS_DENIED 0x0001
#define DBC_ERROR_EXCESS_DATA 0x0004
#define DBC_ERROR_BAD_PARAMETERS 0x0006
#define DBC_ERROR_BAD_STATE 0x0007
#define DBC_ERROR_NOT_IMPLEMENTED 0x0009
#define DBC_ERROR_BUSY 0x000D
#define DBC_ERROR_MESSAGE_FAILURE 0x0307
#define DBC_ERROR_OVERFLOW 0x300F
#define DBC_ERROR_SIGNATURE_INVALID 0x3072
static struct error_map error_codes[] = {
{DBC_ERROR_ACCESS_DENIED, -EACCES},
{DBC_ERROR_EXCESS_DATA, -E2BIG},
{DBC_ERROR_BAD_PARAMETERS, -EINVAL},
{DBC_ERROR_BAD_STATE, -EAGAIN},
{DBC_ERROR_MESSAGE_FAILURE, -ENOENT},
{DBC_ERROR_NOT_IMPLEMENTED, -ENOENT},
{DBC_ERROR_BUSY, -EBUSY},
{DBC_ERROR_OVERFLOW, -ENFILE},
{DBC_ERROR_SIGNATURE_INVALID, -EPERM},
{0x0, 0x0},
};
static int send_dbc_cmd(struct psp_dbc_device *dbc_dev,
enum psp_platform_access_msg msg)
{
int ret;
dbc_dev->mbox->req.header.status = 0;
ret = psp_send_platform_access_msg(msg, (struct psp_request *)dbc_dev->mbox);
if (ret == -EIO) {
int i;
dev_dbg(dbc_dev->dev,
"msg 0x%x failed with PSP error: 0x%x\n",
msg, dbc_dev->mbox->req.header.status);
for (i = 0; error_codes[i].psp; i++) {
if (dbc_dev->mbox->req.header.status == error_codes[i].psp)
return error_codes[i].ret;
}
}
return ret;
}
static int send_dbc_nonce(struct psp_dbc_device *dbc_dev)
{
int ret;
dbc_dev->mbox->req.header.payload_size = sizeof(dbc_dev->mbox->dbc_nonce);
ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_GET_NONCE);
if (ret == -EAGAIN) {
dev_dbg(dbc_dev->dev, "retrying get nonce\n");
ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_GET_NONCE);
}
return ret;
}
static int send_dbc_parameter(struct psp_dbc_device *dbc_dev)
{
dbc_dev->mbox->req.header.payload_size = sizeof(dbc_dev->mbox->dbc_param);
switch (dbc_dev->mbox->dbc_param.user.msg_index) {
case PARAM_SET_FMAX_CAP:
case PARAM_SET_PWR_CAP:
case PARAM_SET_GFX_MODE:
return send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_SET_PARAMETER);
case PARAM_GET_FMAX_CAP:
case PARAM_GET_PWR_CAP:
case PARAM_GET_CURR_TEMP:
case PARAM_GET_FMAX_MAX:
case PARAM_GET_FMAX_MIN:
case PARAM_GET_SOC_PWR_MAX:
case PARAM_GET_SOC_PWR_MIN:
case PARAM_GET_SOC_PWR_CUR:
case PARAM_GET_GFX_MODE:
return send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_GET_PARAMETER);
}
return -EINVAL;
}
void dbc_dev_destroy(struct psp_device *psp)
{
struct psp_dbc_device *dbc_dev = psp->dbc_data;
if (!dbc_dev)
return;
misc_deregister(&dbc_dev->char_dev);
mutex_destroy(&dbc_dev->ioctl_mutex);
psp->dbc_data = NULL;
}
static long dbc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct psp_device *psp_master = psp_get_master_device();
void __user *argp = (void __user *)arg;
struct psp_dbc_device *dbc_dev;
int ret;
if (!psp_master || !psp_master->dbc_data)
return -ENODEV;
dbc_dev = psp_master->dbc_data;
mutex_lock(&dbc_dev->ioctl_mutex);
switch (cmd) {
case DBCIOCNONCE:
if (copy_from_user(&dbc_dev->mbox->dbc_nonce.user, argp,
sizeof(struct dbc_user_nonce))) {
ret = -EFAULT;
goto unlock;
}
ret = send_dbc_nonce(dbc_dev);
if (ret)
goto unlock;
if (copy_to_user(argp, &dbc_dev->mbox->dbc_nonce.user,
sizeof(struct dbc_user_nonce))) {
ret = -EFAULT;
goto unlock;
}
break;
case DBCIOCUID:
dbc_dev->mbox->req.header.payload_size = sizeof(dbc_dev->mbox->dbc_set_uid);
if (copy_from_user(&dbc_dev->mbox->dbc_set_uid.user, argp,
sizeof(struct dbc_user_setuid))) {
ret = -EFAULT;
goto unlock;
}
ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_SET_UID);
if (ret)
goto unlock;
if (copy_to_user(argp, &dbc_dev->mbox->dbc_set_uid.user,
sizeof(struct dbc_user_setuid))) {
ret = -EFAULT;
goto unlock;
}
break;
case DBCIOCPARAM:
if (copy_from_user(&dbc_dev->mbox->dbc_param.user, argp,
sizeof(struct dbc_user_param))) {
ret = -EFAULT;
goto unlock;
}
ret = send_dbc_parameter(dbc_dev);
if (ret)
goto unlock;
if (copy_to_user(argp, &dbc_dev->mbox->dbc_param.user,
sizeof(struct dbc_user_param))) {
ret = -EFAULT;
goto unlock;
}
break;
default:
ret = -EINVAL;
}
unlock:
mutex_unlock(&dbc_dev->ioctl_mutex);
return ret;
}
static const struct file_operations dbc_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = dbc_ioctl,
};
int dbc_dev_init(struct psp_device *psp)
{
struct device *dev = psp->dev;
struct psp_dbc_device *dbc_dev;
int ret;
if (!PSP_FEATURE(psp, DBC))
return 0;
dbc_dev = devm_kzalloc(dev, sizeof(*dbc_dev), GFP_KERNEL);
if (!dbc_dev)
return -ENOMEM;
BUILD_BUG_ON(sizeof(union dbc_buffer) > PAGE_SIZE);
dbc_dev->mbox = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0);
if (!dbc_dev->mbox) {
ret = -ENOMEM;
goto cleanup_dev;
}
psp->dbc_data = dbc_dev;
dbc_dev->dev = dev;
ret = send_dbc_nonce(dbc_dev);
if (ret == -EACCES) {
dev_dbg(dbc_dev->dev,
"dynamic boost control was previously authenticated\n");
ret = 0;
}
dev_dbg(dbc_dev->dev, "dynamic boost control is %savailable\n",
ret ? "un" : "");
if (ret) {
ret = 0;
goto cleanup_mbox;
}
dbc_dev->char_dev.minor = MISC_DYNAMIC_MINOR;
dbc_dev->char_dev.name = "dbc";
dbc_dev->char_dev.fops = &dbc_fops;
dbc_dev->char_dev.mode = 0600;
ret = misc_register(&dbc_dev->char_dev);
if (ret)
goto cleanup_mbox;
mutex_init(&dbc_dev->ioctl_mutex);
return 0;
cleanup_mbox:
devm_free_pages(dev, (unsigned long)dbc_dev->mbox);
cleanup_dev:
psp->dbc_data = NULL;
devm_kfree(dev, dbc_dev);
return ret;
}

56
drivers/crypto/ccp/dbc.h Normal file
View File

@ -0,0 +1,56 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* AMD Platform Security Processor (PSP) Dynamic Boost Control support
*
* Copyright (C) 2023 Advanced Micro Devices, Inc.
*
* Author: Mario Limonciello <mario.limonciello@amd.com>
*/
#ifndef __DBC_H__
#define __DBC_H__
#include <uapi/linux/psp-dbc.h>
#include <linux/device.h>
#include <linux/miscdevice.h>
#include <linux/psp-platform-access.h>
#include "psp-dev.h"
struct psp_dbc_device {
struct device *dev;
union dbc_buffer *mbox;
struct mutex ioctl_mutex;
struct miscdevice char_dev;
};
struct dbc_nonce {
struct psp_req_buffer_hdr header;
struct dbc_user_nonce user;
} __packed;
struct dbc_set_uid {
struct psp_req_buffer_hdr header;
struct dbc_user_setuid user;
} __packed;
struct dbc_param {
struct psp_req_buffer_hdr header;
struct dbc_user_param user;
} __packed;
union dbc_buffer {
struct psp_request req;
struct dbc_nonce dbc_nonce;
struct dbc_set_uid dbc_set_uid;
struct dbc_param dbc_param;
};
void dbc_dev_destroy(struct psp_device *psp);
int dbc_dev_init(struct psp_device *psp);
#endif /* __DBC_H */

View File

@ -15,6 +15,7 @@
#include "sev-dev.h"
#include "tee-dev.h"
#include "platform-access.h"
#include "dbc.h"
struct psp_device *psp_master;
@ -112,6 +113,12 @@ static void psp_init_platform_access(struct psp_device *psp)
dev_warn(psp->dev, "platform access init failed: %d\n", ret);
return;
}
/* dbc must come after platform access as it tests the feature */
ret = dbc_dev_init(psp);
if (ret)
dev_warn(psp->dev, "failed to init dynamic boost control: %d\n",
ret);
}
static int psp_init(struct psp_device *psp)
@ -173,13 +180,14 @@ int psp_dev_init(struct sp_device *sp)
goto e_err;
}
/* master device must be set for platform access */
if (psp->sp->set_psp_master_device)
psp->sp->set_psp_master_device(psp->sp);
ret = psp_init(psp);
if (ret)
goto e_irq;
if (sp->set_psp_master_device)
sp->set_psp_master_device(sp);
/* Enable interrupt */
iowrite32(-1, psp->io_regs + psp->vdata->inten_reg);
@ -188,6 +196,9 @@ int psp_dev_init(struct sp_device *sp)
return 0;
e_irq:
if (sp->clear_psp_master_device)
sp->clear_psp_master_device(sp);
sp_free_psp_irq(psp->sp, psp);
e_err:
sp->psp_data = NULL;
@ -213,6 +224,8 @@ void psp_dev_destroy(struct sp_device *sp)
tee_dev_destroy(psp);
dbc_dev_destroy(psp);
platform_access_dev_destroy(psp);
sp_free_psp_irq(sp, psp);

View File

@ -40,6 +40,7 @@ struct psp_device {
void *sev_data;
void *tee_data;
void *platform_access_data;
void *dbc_data;
unsigned int capability;
};

View File

@ -28,6 +28,10 @@
#define CACHE_NONE 0x00
#define CACHE_WB_NO_ALLOC 0xb7
#define PLATFORM_FEATURE_DBC 0x1
#define PSP_FEATURE(psp, feat) (psp->vdata && psp->vdata->platform_features & PLATFORM_FEATURE_##feat)
/* Structure to hold CCP device data */
struct ccp_device;
struct ccp_vdata {
@ -51,6 +55,7 @@ struct tee_vdata {
const unsigned int cmdbuff_addr_hi_reg;
const unsigned int ring_wptr_reg;
const unsigned int ring_rptr_reg;
const unsigned int info_reg;
};
struct platform_access_vdata {
@ -69,6 +74,8 @@ struct psp_vdata {
const unsigned int feature_reg;
const unsigned int inten_reg;
const unsigned int intsts_reg;
const unsigned int bootloader_info_reg;
const unsigned int platform_features;
};
/* Structure to hold SP device data */

View File

@ -8,6 +8,7 @@
* Author: Gary R Hook <gary.hook@amd.com>
*/
#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
@ -24,6 +25,12 @@
#include "ccp-dev.h"
#include "psp-dev.h"
/* used for version string AA.BB.CC.DD */
#define AA GENMASK(31, 24)
#define BB GENMASK(23, 16)
#define CC GENMASK(15, 8)
#define DD GENMASK(7, 0)
#define MSIX_VECTORS 2
struct sp_pci {
@ -32,7 +39,7 @@ struct sp_pci {
};
static struct sp_device *sp_dev_master;
#define attribute_show(name, def) \
#define security_attribute_show(name, def) \
static ssize_t name##_show(struct device *d, struct device_attribute *attr, \
char *buf) \
{ \
@ -42,24 +49,24 @@ static ssize_t name##_show(struct device *d, struct device_attribute *attr, \
return sysfs_emit(buf, "%d\n", (psp->capability & bit) > 0); \
}
attribute_show(fused_part, FUSED_PART)
security_attribute_show(fused_part, FUSED_PART)
static DEVICE_ATTR_RO(fused_part);
attribute_show(debug_lock_on, DEBUG_LOCK_ON)
security_attribute_show(debug_lock_on, DEBUG_LOCK_ON)
static DEVICE_ATTR_RO(debug_lock_on);
attribute_show(tsme_status, TSME_STATUS)
security_attribute_show(tsme_status, TSME_STATUS)
static DEVICE_ATTR_RO(tsme_status);
attribute_show(anti_rollback_status, ANTI_ROLLBACK_STATUS)
security_attribute_show(anti_rollback_status, ANTI_ROLLBACK_STATUS)
static DEVICE_ATTR_RO(anti_rollback_status);
attribute_show(rpmc_production_enabled, RPMC_PRODUCTION_ENABLED)
security_attribute_show(rpmc_production_enabled, RPMC_PRODUCTION_ENABLED)
static DEVICE_ATTR_RO(rpmc_production_enabled);
attribute_show(rpmc_spirom_available, RPMC_SPIROM_AVAILABLE)
security_attribute_show(rpmc_spirom_available, RPMC_SPIROM_AVAILABLE)
static DEVICE_ATTR_RO(rpmc_spirom_available);
attribute_show(hsp_tpm_available, HSP_TPM_AVAILABLE)
security_attribute_show(hsp_tpm_available, HSP_TPM_AVAILABLE)
static DEVICE_ATTR_RO(hsp_tpm_available);
attribute_show(rom_armor_enforced, ROM_ARMOR_ENFORCED)
security_attribute_show(rom_armor_enforced, ROM_ARMOR_ENFORCED)
static DEVICE_ATTR_RO(rom_armor_enforced);
static struct attribute *psp_attrs[] = {
static struct attribute *psp_security_attrs[] = {
&dev_attr_fused_part.attr,
&dev_attr_debug_lock_on.attr,
&dev_attr_tsme_status.attr,
@ -83,13 +90,70 @@ static umode_t psp_security_is_visible(struct kobject *kobj, struct attribute *a
return 0;
}
static struct attribute_group psp_attr_group = {
.attrs = psp_attrs,
static struct attribute_group psp_security_attr_group = {
.attrs = psp_security_attrs,
.is_visible = psp_security_is_visible,
};
#define version_attribute_show(name, _offset) \
static ssize_t name##_show(struct device *d, struct device_attribute *attr, \
char *buf) \
{ \
struct sp_device *sp = dev_get_drvdata(d); \
struct psp_device *psp = sp->psp_data; \
unsigned int val = ioread32(psp->io_regs + _offset); \
return sysfs_emit(buf, "%02lx.%02lx.%02lx.%02lx\n", \
FIELD_GET(AA, val), \
FIELD_GET(BB, val), \
FIELD_GET(CC, val), \
FIELD_GET(DD, val)); \
}
version_attribute_show(bootloader_version, psp->vdata->bootloader_info_reg)
static DEVICE_ATTR_RO(bootloader_version);
version_attribute_show(tee_version, psp->vdata->tee->info_reg)
static DEVICE_ATTR_RO(tee_version);
static struct attribute *psp_firmware_attrs[] = {
&dev_attr_bootloader_version.attr,
&dev_attr_tee_version.attr,
NULL,
};
static umode_t psp_firmware_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
{
struct device *dev = kobj_to_dev(kobj);
struct sp_device *sp = dev_get_drvdata(dev);
struct psp_device *psp = sp->psp_data;
unsigned int val = 0xffffffff;
if (!psp)
return 0;
if (attr == &dev_attr_bootloader_version.attr &&
psp->vdata->bootloader_info_reg)
val = ioread32(psp->io_regs + psp->vdata->bootloader_info_reg);
if (attr == &dev_attr_tee_version.attr &&
psp->capability & PSP_CAPABILITY_TEE &&
psp->vdata->tee->info_reg)
val = ioread32(psp->io_regs + psp->vdata->tee->info_reg);
/* If platform disallows accessing this register it will be all f's */
if (val != 0xffffffff)
return 0444;
return 0;
}
static struct attribute_group psp_firmware_attr_group = {
.attrs = psp_firmware_attrs,
.is_visible = psp_firmware_is_visible,
};
static const struct attribute_group *psp_groups[] = {
&psp_attr_group,
&psp_security_attr_group,
&psp_firmware_attr_group,
NULL,
};
@ -359,6 +423,7 @@ static const struct tee_vdata teev1 = {
.cmdbuff_addr_hi_reg = 0x1054c, /* C2PMSG_19 */
.ring_wptr_reg = 0x10550, /* C2PMSG_20 */
.ring_rptr_reg = 0x10554, /* C2PMSG_21 */
.info_reg = 0x109e8, /* C2PMSG_58 */
};
static const struct tee_vdata teev2 = {
@ -384,6 +449,7 @@ static const struct platform_access_vdata pa_v2 = {
static const struct psp_vdata pspv1 = {
.sev = &sevv1,
.bootloader_info_reg = 0x105ec, /* C2PMSG_59 */
.feature_reg = 0x105fc, /* C2PMSG_63 */
.inten_reg = 0x10610, /* P2CMSG_INTEN */
.intsts_reg = 0x10614, /* P2CMSG_INTSTS */
@ -391,6 +457,7 @@ static const struct psp_vdata pspv1 = {
static const struct psp_vdata pspv2 = {
.sev = &sevv2,
.bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
.feature_reg = 0x109fc, /* C2PMSG_63 */
.inten_reg = 0x10690, /* P2CMSG_INTEN */
.intsts_reg = 0x10694, /* P2CMSG_INTSTS */
@ -399,14 +466,17 @@ static const struct psp_vdata pspv2 = {
static const struct psp_vdata pspv3 = {
.tee = &teev1,
.platform_access = &pa_v1,
.bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
.feature_reg = 0x109fc, /* C2PMSG_63 */
.inten_reg = 0x10690, /* P2CMSG_INTEN */
.intsts_reg = 0x10694, /* P2CMSG_INTSTS */
.platform_features = PLATFORM_FEATURE_DBC,
};
static const struct psp_vdata pspv4 = {
.sev = &sevv2,
.tee = &teev1,
.bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
.feature_reg = 0x109fc, /* C2PMSG_63 */
.inten_reg = 0x10690, /* P2CMSG_INTEN */
.intsts_reg = 0x10694, /* P2CMSG_INTSTS */

View File

@ -14,7 +14,6 @@
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include "cc_driver.h"

View File

@ -2216,7 +2216,8 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
memcpy(hmacctx->ipad, key, keylen);
}
memset(hmacctx->ipad + keylen, 0, bs - keylen);
memcpy(hmacctx->opad, hmacctx->ipad, bs);
unsafe_memcpy(hmacctx->opad, hmacctx->ipad, bs,
"fortified memcpy causes -Wrestrict warning");
for (i = 0; i < bs / sizeof(int); i++) {
*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;

View File

@ -133,7 +133,6 @@ int start_crypto(void);
int stop_crypto(void);
int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
const struct pkt_gl *pgl);
int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev);
int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
int err);
#endif /* __CHCR_CORE_H__ */

View File

@ -344,7 +344,6 @@ void chcr_add_cipher_dst_ent(struct skcipher_request *req,
struct cpl_rx_phys_dsgl *phys_cpl,
struct cipher_wr_param *wrparam,
unsigned short qid);
int sg_nents_len_skip(struct scatterlist *sg, u64 len, u64 skip);
void chcr_add_hash_src_ent(struct ahash_request *req, struct ulptx_sgl *ulptx,
struct hash_wr_param *param);
int chcr_hash_dma_map(struct device *dev, struct ahash_request *req);

View File

@ -15,7 +15,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <crypto/internal/rng.h>
@ -277,7 +277,7 @@ static int exynos_rng_probe(struct platform_device *pdev)
if (!rng)
return -ENOMEM;
rng->type = (enum exynos_prng_type)of_device_get_match_data(&pdev->dev);
rng->type = (uintptr_t)of_device_get_match_data(&pdev->dev);
mutex_init(&rng->lock);

View File

@ -8,13 +8,17 @@
* ECB mode.
*/
#include <linux/crypto.h>
#include <crypto/engine.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/pm_runtime.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/skcipher.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "sl3516-ce.h"
/* sl3516_ce_need_fallback - check if a request can be handled by the CE */
@ -105,7 +109,7 @@ static int sl3516_ce_cipher_fallback(struct skcipher_request *areq)
struct sl3516_ce_alg_template *algt;
int err;
algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher.base);
algt->stat_fb++;
skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
@ -136,7 +140,7 @@ static int sl3516_ce_cipher(struct skcipher_request *areq)
int err = 0;
int i;
algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher.base);
dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
crypto_tfm_alg_name(areq->base.tfm),
@ -258,7 +262,7 @@ theend:
return err;
}
static int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *areq)
int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *areq)
{
int err;
struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
@ -318,7 +322,7 @@ int sl3516_ce_cipher_init(struct crypto_tfm *tfm)
memset(op, 0, sizeof(struct sl3516_ce_cipher_tfm_ctx));
algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher.base);
op->ce = algt->ce;
op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
@ -335,10 +339,6 @@ int sl3516_ce_cipher_init(struct crypto_tfm *tfm)
crypto_tfm_alg_driver_name(&sktfm->base),
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
op->enginectx.op.do_one_request = sl3516_ce_handle_cipher_request;
op->enginectx.op.prepare_request = NULL;
op->enginectx.op.unprepare_request = NULL;
err = pm_runtime_get_sync(op->ce->dev);
if (err < 0)
goto error_pm;

View File

@ -6,22 +6,24 @@
*
* Core file which registers crypto algorithms supported by the CryptoEngine
*/
#include <crypto/engine.h>
#include <crypto/internal/rng.h>
#include <crypto/internal/skcipher.h>
#include <linux/clk.h>
#include <linux/crypto.h>
#include <linux/debugfs.h>
#include <linux/dev_printk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <crypto/internal/rng.h>
#include <crypto/internal/skcipher.h>
#include "sl3516-ce.h"
@ -217,7 +219,7 @@ static struct sl3516_ce_alg_template ce_algs[] = {
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.mode = ECB_AES,
.alg.skcipher = {
.alg.skcipher.base = {
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-sl3516",
@ -236,11 +238,13 @@ static struct sl3516_ce_alg_template ce_algs[] = {
.setkey = sl3516_ce_aes_setkey,
.encrypt = sl3516_ce_skencrypt,
.decrypt = sl3516_ce_skdecrypt,
}
},
.alg.skcipher.op = {
.do_one_request = sl3516_ce_handle_cipher_request,
},
},
};
#ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
static int sl3516_ce_debugfs_show(struct seq_file *seq, void *v)
{
struct sl3516_ce_dev *ce = seq->private;
@ -264,8 +268,8 @@ static int sl3516_ce_debugfs_show(struct seq_file *seq, void *v)
switch (ce_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
ce_algs[i].alg.skcipher.base.cra_driver_name,
ce_algs[i].alg.skcipher.base.cra_name,
ce_algs[i].alg.skcipher.base.base.cra_driver_name,
ce_algs[i].alg.skcipher.base.base.cra_name,
ce_algs[i].stat_req, ce_algs[i].stat_fb);
break;
}
@ -274,7 +278,6 @@ static int sl3516_ce_debugfs_show(struct seq_file *seq, void *v)
}
DEFINE_SHOW_ATTRIBUTE(sl3516_ce_debugfs);
#endif
static int sl3516_ce_register_algs(struct sl3516_ce_dev *ce)
{
@ -286,11 +289,11 @@ static int sl3516_ce_register_algs(struct sl3516_ce_dev *ce)
switch (ce_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
dev_info(ce->dev, "DEBUG: Register %s\n",
ce_algs[i].alg.skcipher.base.cra_name);
err = crypto_register_skcipher(&ce_algs[i].alg.skcipher);
ce_algs[i].alg.skcipher.base.base.cra_name);
err = crypto_engine_register_skcipher(&ce_algs[i].alg.skcipher);
if (err) {
dev_err(ce->dev, "Fail to register %s\n",
ce_algs[i].alg.skcipher.base.cra_name);
ce_algs[i].alg.skcipher.base.base.cra_name);
ce_algs[i].ce = NULL;
return err;
}
@ -313,8 +316,8 @@ static void sl3516_ce_unregister_algs(struct sl3516_ce_dev *ce)
switch (ce_algs[i].type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
dev_info(ce->dev, "Unregister %d %s\n", i,
ce_algs[i].alg.skcipher.base.cra_name);
crypto_unregister_skcipher(&ce_algs[i].alg.skcipher);
ce_algs[i].alg.skcipher.base.base.cra_name);
crypto_engine_unregister_skcipher(&ce_algs[i].alg.skcipher);
break;
}
}
@ -473,13 +476,20 @@ static int sl3516_ce_probe(struct platform_device *pdev)
pm_runtime_put_sync(ce->dev);
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SL3516_DEBUG)) {
struct dentry *dbgfs_dir __maybe_unused;
struct dentry *dbgfs_stats __maybe_unused;
/* Ignore error of debugfs */
dbgfs_dir = debugfs_create_dir("sl3516", NULL);
dbgfs_stats = debugfs_create_file("stats", 0444,
dbgfs_dir, ce,
&sl3516_ce_debugfs_fops);
#ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
/* Ignore error of debugfs */
ce->dbgfs_dir = debugfs_create_dir("sl3516", NULL);
ce->dbgfs_stats = debugfs_create_file("stats", 0444,
ce->dbgfs_dir, ce,
&sl3516_ce_debugfs_fops);
ce->dbgfs_dir = dbgfs_dir;
ce->dbgfs_stats = dbgfs_stats;
#endif
}
return 0;
error_pmuse:

View File

@ -17,7 +17,6 @@
#include <crypto/engine.h>
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
#include <linux/crypto.h>
#include <linux/debugfs.h>
#include <linux/hw_random.h>
@ -292,16 +291,12 @@ struct sl3516_ce_cipher_req_ctx {
/*
* struct sl3516_ce_cipher_tfm_ctx - context for a skcipher TFM
* @enginectx: crypto_engine used by this TFM
* @key: pointer to key data
* @keylen: len of the key
* @ce: pointer to the private data of driver handling this TFM
* @fallback_tfm: pointer to the fallback TFM
*
* enginectx must be the first element
*/
struct sl3516_ce_cipher_tfm_ctx {
struct crypto_engine_ctx enginectx;
u32 *key;
u32 keylen;
struct sl3516_ce_dev *ce;
@ -324,7 +319,7 @@ struct sl3516_ce_alg_template {
u32 mode;
struct sl3516_ce_dev *ce;
union {
struct skcipher_alg skcipher;
struct skcipher_engine_alg skcipher;
} alg;
unsigned long stat_req;
unsigned long stat_fb;
@ -345,3 +340,4 @@ int sl3516_ce_run_task(struct sl3516_ce_dev *ce,
int sl3516_ce_rng_register(struct sl3516_ce_dev *ce);
void sl3516_ce_rng_unregister(struct sl3516_ce_dev *ce);
int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *areq);

View File

@ -1392,9 +1392,9 @@ static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
unsigned int sz, sz_shift, curve_sz;
struct device *dev = ctx->dev;
char key[HPRE_ECC_MAX_KSZ];
unsigned int sz, sz_shift;
struct ecdh params;
int ret;
@ -1406,7 +1406,13 @@ static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
/* Use stdrng to generate private key */
if (!params.key || !params.key_size) {
params.key = key;
params.key_size = hpre_ecdh_get_curvesz(ctx->curve_id);
curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
if (!curve_sz) {
dev_err(dev, "Invalid curve size!\n");
return -EINVAL;
}
params.key_size = curve_sz - 1;
ret = ecdh_gen_privkey(ctx, &params);
if (ret)
return ret;

View File

@ -209,7 +209,7 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = {
{HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37},
{HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37},
{HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
{HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xFFFFFE},
{HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFFFE},
{HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE},
{HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE},
{HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
@ -275,6 +275,9 @@ static const struct hpre_hw_error hpre_hw_errors[] = {
}, {
.int_msk = BIT(23),
.msg = "sva_fsm_timeout_int_set"
}, {
.int_msk = BIT(24),
.msg = "sva_int_set"
}, {
/* sentinel */
}

View File

@ -88,6 +88,8 @@
#define QM_DB_PRIORITY_SHIFT_V1 48
#define QM_PAGE_SIZE 0x0034
#define QM_QP_DB_INTERVAL 0x10000
#define QM_DB_TIMEOUT_CFG 0x100074
#define QM_DB_TIMEOUT_SET 0x1fffff
#define QM_MEM_START_INIT 0x100040
#define QM_MEM_INIT_DONE 0x100044
@ -954,6 +956,11 @@ static irqreturn_t qm_mb_cmd_irq(int irq, void *data)
if (!val)
return IRQ_NONE;
if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) {
dev_warn(&qm->pdev->dev, "Driver is down, message cannot be processed!\n");
return IRQ_HANDLED;
}
schedule_work(&qm->cmd_process);
return IRQ_HANDLED;
@ -997,7 +1004,7 @@ static void qm_reset_function(struct hisi_qm *qm)
return;
}
ret = hisi_qm_stop(qm, QM_FLR);
ret = hisi_qm_stop(qm, QM_DOWN);
if (ret) {
dev_err(dev, "failed to stop qm when reset function\n");
goto clear_bit;
@ -2743,6 +2750,9 @@ void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
test_bit(QM_RESETTING, &qm->misc_ctl))
msleep(WAIT_PERIOD);
if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
flush_work(&qm->cmd_process);
udelay(REMOVE_WAIT_DELAY);
}
EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish);
@ -3243,7 +3253,7 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
}
if (qm->status.stop_reason == QM_SOFT_RESET ||
qm->status.stop_reason == QM_FLR) {
qm->status.stop_reason == QM_DOWN) {
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
ret = qm_stop_started_qp(qm);
if (ret < 0) {
@ -4539,11 +4549,11 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev)
if (qm->fun_type == QM_HW_PF)
qm_cmd_uninit(qm);
ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_FLR);
ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_DOWN);
if (ret)
pci_err(pdev, "failed to stop vfs by pf in FLR.\n");
ret = hisi_qm_stop(qm, QM_FLR);
ret = hisi_qm_stop(qm, QM_DOWN);
if (ret) {
pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
@ -4641,9 +4651,11 @@ void hisi_qm_dev_shutdown(struct pci_dev *pdev)
struct hisi_qm *qm = pci_get_drvdata(pdev);
int ret;
ret = hisi_qm_stop(qm, QM_NORMAL);
ret = hisi_qm_stop(qm, QM_DOWN);
if (ret)
dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
hisi_qm_cache_wb(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown);
@ -4807,7 +4819,7 @@ static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
cmd = msg & QM_MB_CMD_DATA_MASK;
switch (cmd) {
case QM_PF_FLR_PREPARE:
qm_pf_reset_vf_process(qm, QM_FLR);
qm_pf_reset_vf_process(qm, QM_DOWN);
break;
case QM_PF_SRST_PREPARE:
qm_pf_reset_vf_process(qm, QM_SOFT_RESET);
@ -5371,6 +5383,8 @@ int hisi_qm_init(struct hisi_qm *qm)
goto err_pci_init;
if (qm->fun_type == QM_HW_PF) {
/* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */
writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG);
qm_disable_clock_gate(qm);
ret = qm_dev_mem_reset(qm);
if (ret) {
@ -5538,6 +5552,8 @@ static int qm_rebuild_for_resume(struct hisi_qm *qm)
qm_cmd_init(qm);
hisi_qm_dev_err_init(qm);
/* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */
writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG);
qm_disable_clock_gate(qm);
ret = qm_dev_mem_reset(qm);
if (ret)

View File

@ -1107,8 +1107,8 @@ static int sec_queue_res_cfg(struct sec_queue *queue)
}
queue->task_irq = platform_get_irq(to_platform_device(dev),
queue->queue_id * 2 + 1);
if (queue->task_irq <= 0) {
ret = -EINVAL;
if (queue->task_irq < 0) {
ret = queue->task_irq;
goto err_free_ring_db;
}

View File

@ -13,7 +13,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
@ -1105,7 +1105,7 @@ static struct platform_driver img_hash_driver = {
.driver = {
.name = "img-hash-accelerator",
.pm = &img_hash_pm_ops,
.of_match_table = of_match_ptr(img_hash_match),
.of_match_table = img_hash_match,
}
};
module_platform_driver(img_hash_driver);

View File

@ -5,24 +5,23 @@
* Copyright (C) 2018-2020 Intel Corporation
*/
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <crypto/aes.h>
#include <crypto/engine.h>
#include <crypto/gcm.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/string.h>
#include "ocs-aes.h"
@ -38,7 +37,6 @@
/**
* struct ocs_aes_tctx - OCS AES Transform context
* @engine_ctx: Engine context.
* @aes_dev: The OCS AES device.
* @key: AES/SM4 key.
* @key_len: The length (in bytes) of @key.
@ -47,7 +45,6 @@
* @use_fallback: Whether or not fallback cipher should be used.
*/
struct ocs_aes_tctx {
struct crypto_engine_ctx engine_ctx;
struct ocs_aes_dev *aes_dev;
u8 key[OCS_AES_KEYSIZE_256];
unsigned int key_len;
@ -1148,15 +1145,6 @@ static int kmb_ocs_sm4_ccm_decrypt(struct aead_request *req)
return kmb_ocs_aead_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CCM);
}
static inline int ocs_common_init(struct ocs_aes_tctx *tctx)
{
tctx->engine_ctx.op.prepare_request = NULL;
tctx->engine_ctx.op.do_one_request = kmb_ocs_aes_sk_do_one_request;
tctx->engine_ctx.op.unprepare_request = NULL;
return 0;
}
static int ocs_aes_init_tfm(struct crypto_skcipher *tfm)
{
const char *alg_name = crypto_tfm_alg_name(&tfm->base);
@ -1172,16 +1160,14 @@ static int ocs_aes_init_tfm(struct crypto_skcipher *tfm)
crypto_skcipher_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
return ocs_common_init(tctx);
return 0;
}
static int ocs_sm4_init_tfm(struct crypto_skcipher *tfm)
{
struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
crypto_skcipher_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
return ocs_common_init(tctx);
return 0;
}
static inline void clear_key(struct ocs_aes_tctx *tctx)
@ -1206,15 +1192,6 @@ static void ocs_exit_tfm(struct crypto_skcipher *tfm)
}
}
static inline int ocs_common_aead_init(struct ocs_aes_tctx *tctx)
{
tctx->engine_ctx.op.prepare_request = NULL;
tctx->engine_ctx.op.do_one_request = kmb_ocs_aes_aead_do_one_request;
tctx->engine_ctx.op.unprepare_request = NULL;
return 0;
}
static int ocs_aes_aead_cra_init(struct crypto_aead *tfm)
{
const char *alg_name = crypto_tfm_alg_name(&tfm->base);
@ -1233,7 +1210,7 @@ static int ocs_aes_aead_cra_init(struct crypto_aead *tfm)
(sizeof(struct aead_request) +
crypto_aead_reqsize(tctx->sw_cipher.aead))));
return ocs_common_aead_init(tctx);
return 0;
}
static int kmb_ocs_aead_ccm_setauthsize(struct crypto_aead *tfm,
@ -1261,11 +1238,9 @@ static int kmb_ocs_aead_gcm_setauthsize(struct crypto_aead *tfm,
static int ocs_sm4_aead_cra_init(struct crypto_aead *tfm)
{
struct ocs_aes_tctx *tctx = crypto_aead_ctx(tfm);
crypto_aead_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
return ocs_common_aead_init(tctx);
return 0;
}
static void ocs_aead_cra_exit(struct crypto_aead *tfm)
@ -1280,182 +1255,190 @@ static void ocs_aead_cra_exit(struct crypto_aead *tfm)
}
}
static struct skcipher_alg algs[] = {
static struct skcipher_engine_alg algs[] = {
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
{
.base.cra_name = "ecb(aes)",
.base.cra_driver_name = "ecb-aes-keembay-ocs",
.base.cra_priority = KMB_OCS_PRIORITY,
.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.base.cra_module = THIS_MODULE,
.base.cra_alignmask = 0,
.base.base.cra_name = "ecb(aes)",
.base.base.cra_driver_name = "ecb-aes-keembay-ocs",
.base.base.cra_priority = KMB_OCS_PRIORITY,
.base.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.base.base.cra_blocksize = AES_BLOCK_SIZE,
.base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.base.base.cra_module = THIS_MODULE,
.base.base.cra_alignmask = 0,
.min_keysize = OCS_AES_MIN_KEY_SIZE,
.max_keysize = OCS_AES_MAX_KEY_SIZE,
.setkey = kmb_ocs_aes_set_key,
.encrypt = kmb_ocs_aes_ecb_encrypt,
.decrypt = kmb_ocs_aes_ecb_decrypt,
.init = ocs_aes_init_tfm,
.exit = ocs_exit_tfm,
.base.min_keysize = OCS_AES_MIN_KEY_SIZE,
.base.max_keysize = OCS_AES_MAX_KEY_SIZE,
.base.setkey = kmb_ocs_aes_set_key,
.base.encrypt = kmb_ocs_aes_ecb_encrypt,
.base.decrypt = kmb_ocs_aes_ecb_decrypt,
.base.init = ocs_aes_init_tfm,
.base.exit = ocs_exit_tfm,
.op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
{
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "cbc-aes-keembay-ocs",
.base.cra_priority = KMB_OCS_PRIORITY,
.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.base.cra_module = THIS_MODULE,
.base.cra_alignmask = 0,
.base.base.cra_name = "cbc(aes)",
.base.base.cra_driver_name = "cbc-aes-keembay-ocs",
.base.base.cra_priority = KMB_OCS_PRIORITY,
.base.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.base.base.cra_blocksize = AES_BLOCK_SIZE,
.base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.base.base.cra_module = THIS_MODULE,
.base.base.cra_alignmask = 0,
.min_keysize = OCS_AES_MIN_KEY_SIZE,
.max_keysize = OCS_AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = kmb_ocs_aes_set_key,
.encrypt = kmb_ocs_aes_cbc_encrypt,
.decrypt = kmb_ocs_aes_cbc_decrypt,
.init = ocs_aes_init_tfm,
.exit = ocs_exit_tfm,
.base.min_keysize = OCS_AES_MIN_KEY_SIZE,
.base.max_keysize = OCS_AES_MAX_KEY_SIZE,
.base.ivsize = AES_BLOCK_SIZE,
.base.setkey = kmb_ocs_aes_set_key,
.base.encrypt = kmb_ocs_aes_cbc_encrypt,
.base.decrypt = kmb_ocs_aes_cbc_decrypt,
.base.init = ocs_aes_init_tfm,
.base.exit = ocs_exit_tfm,
.op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
{
.base.cra_name = "ctr(aes)",
.base.cra_driver_name = "ctr-aes-keembay-ocs",
.base.cra_priority = KMB_OCS_PRIORITY,
.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.base.cra_module = THIS_MODULE,
.base.cra_alignmask = 0,
.base.base.cra_name = "ctr(aes)",
.base.base.cra_driver_name = "ctr-aes-keembay-ocs",
.base.base.cra_priority = KMB_OCS_PRIORITY,
.base.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.base.base.cra_blocksize = 1,
.base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.base.base.cra_module = THIS_MODULE,
.base.base.cra_alignmask = 0,
.min_keysize = OCS_AES_MIN_KEY_SIZE,
.max_keysize = OCS_AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = kmb_ocs_aes_set_key,
.encrypt = kmb_ocs_aes_ctr_encrypt,
.decrypt = kmb_ocs_aes_ctr_decrypt,
.init = ocs_aes_init_tfm,
.exit = ocs_exit_tfm,
.base.min_keysize = OCS_AES_MIN_KEY_SIZE,
.base.max_keysize = OCS_AES_MAX_KEY_SIZE,
.base.ivsize = AES_BLOCK_SIZE,
.base.setkey = kmb_ocs_aes_set_key,
.base.encrypt = kmb_ocs_aes_ctr_encrypt,
.base.decrypt = kmb_ocs_aes_ctr_decrypt,
.base.init = ocs_aes_init_tfm,
.base.exit = ocs_exit_tfm,
.op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
{
.base.cra_name = "cts(cbc(aes))",
.base.cra_driver_name = "cts-aes-keembay-ocs",
.base.cra_priority = KMB_OCS_PRIORITY,
.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.base.cra_module = THIS_MODULE,
.base.cra_alignmask = 0,
.base.base.cra_name = "cts(cbc(aes))",
.base.base.cra_driver_name = "cts-aes-keembay-ocs",
.base.base.cra_priority = KMB_OCS_PRIORITY,
.base.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY |
CRYPTO_ALG_NEED_FALLBACK,
.base.base.cra_blocksize = AES_BLOCK_SIZE,
.base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.base.base.cra_module = THIS_MODULE,
.base.base.cra_alignmask = 0,
.min_keysize = OCS_AES_MIN_KEY_SIZE,
.max_keysize = OCS_AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = kmb_ocs_aes_set_key,
.encrypt = kmb_ocs_aes_cts_encrypt,
.decrypt = kmb_ocs_aes_cts_decrypt,
.init = ocs_aes_init_tfm,
.exit = ocs_exit_tfm,
.base.min_keysize = OCS_AES_MIN_KEY_SIZE,
.base.max_keysize = OCS_AES_MAX_KEY_SIZE,
.base.ivsize = AES_BLOCK_SIZE,
.base.setkey = kmb_ocs_aes_set_key,
.base.encrypt = kmb_ocs_aes_cts_encrypt,
.base.decrypt = kmb_ocs_aes_cts_decrypt,
.base.init = ocs_aes_init_tfm,
.base.exit = ocs_exit_tfm,
.op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
{
.base.cra_name = "ecb(sm4)",
.base.cra_driver_name = "ecb-sm4-keembay-ocs",
.base.cra_priority = KMB_OCS_PRIORITY,
.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.base.cra_module = THIS_MODULE,
.base.cra_alignmask = 0,
.base.base.cra_name = "ecb(sm4)",
.base.base.cra_driver_name = "ecb-sm4-keembay-ocs",
.base.base.cra_priority = KMB_OCS_PRIORITY,
.base.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.base.base.cra_blocksize = AES_BLOCK_SIZE,
.base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.base.base.cra_module = THIS_MODULE,
.base.base.cra_alignmask = 0,
.min_keysize = OCS_SM4_KEY_SIZE,
.max_keysize = OCS_SM4_KEY_SIZE,
.setkey = kmb_ocs_sm4_set_key,
.encrypt = kmb_ocs_sm4_ecb_encrypt,
.decrypt = kmb_ocs_sm4_ecb_decrypt,
.init = ocs_sm4_init_tfm,
.exit = ocs_exit_tfm,
.base.min_keysize = OCS_SM4_KEY_SIZE,
.base.max_keysize = OCS_SM4_KEY_SIZE,
.base.setkey = kmb_ocs_sm4_set_key,
.base.encrypt = kmb_ocs_sm4_ecb_encrypt,
.base.decrypt = kmb_ocs_sm4_ecb_decrypt,
.base.init = ocs_sm4_init_tfm,
.base.exit = ocs_exit_tfm,
.op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
{
.base.cra_name = "cbc(sm4)",
.base.cra_driver_name = "cbc-sm4-keembay-ocs",
.base.cra_priority = KMB_OCS_PRIORITY,
.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.base.cra_module = THIS_MODULE,
.base.cra_alignmask = 0,
.base.base.cra_name = "cbc(sm4)",
.base.base.cra_driver_name = "cbc-sm4-keembay-ocs",
.base.base.cra_priority = KMB_OCS_PRIORITY,
.base.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.base.base.cra_blocksize = AES_BLOCK_SIZE,
.base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.base.base.cra_module = THIS_MODULE,
.base.base.cra_alignmask = 0,
.min_keysize = OCS_SM4_KEY_SIZE,
.max_keysize = OCS_SM4_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = kmb_ocs_sm4_set_key,
.encrypt = kmb_ocs_sm4_cbc_encrypt,
.decrypt = kmb_ocs_sm4_cbc_decrypt,
.init = ocs_sm4_init_tfm,
.exit = ocs_exit_tfm,
.base.min_keysize = OCS_SM4_KEY_SIZE,
.base.max_keysize = OCS_SM4_KEY_SIZE,
.base.ivsize = AES_BLOCK_SIZE,
.base.setkey = kmb_ocs_sm4_set_key,
.base.encrypt = kmb_ocs_sm4_cbc_encrypt,
.base.decrypt = kmb_ocs_sm4_cbc_decrypt,
.base.init = ocs_sm4_init_tfm,
.base.exit = ocs_exit_tfm,
.op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
{
.base.cra_name = "ctr(sm4)",
.base.cra_driver_name = "ctr-sm4-keembay-ocs",
.base.cra_priority = KMB_OCS_PRIORITY,
.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.base.cra_module = THIS_MODULE,
.base.cra_alignmask = 0,
.base.base.cra_name = "ctr(sm4)",
.base.base.cra_driver_name = "ctr-sm4-keembay-ocs",
.base.base.cra_priority = KMB_OCS_PRIORITY,
.base.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.base.base.cra_blocksize = 1,
.base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.base.base.cra_module = THIS_MODULE,
.base.base.cra_alignmask = 0,
.min_keysize = OCS_SM4_KEY_SIZE,
.max_keysize = OCS_SM4_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = kmb_ocs_sm4_set_key,
.encrypt = kmb_ocs_sm4_ctr_encrypt,
.decrypt = kmb_ocs_sm4_ctr_decrypt,
.init = ocs_sm4_init_tfm,
.exit = ocs_exit_tfm,
.base.min_keysize = OCS_SM4_KEY_SIZE,
.base.max_keysize = OCS_SM4_KEY_SIZE,
.base.ivsize = AES_BLOCK_SIZE,
.base.setkey = kmb_ocs_sm4_set_key,
.base.encrypt = kmb_ocs_sm4_ctr_encrypt,
.base.decrypt = kmb_ocs_sm4_ctr_decrypt,
.base.init = ocs_sm4_init_tfm,
.base.exit = ocs_exit_tfm,
.op.do_one_request = kmb_ocs_aes_sk_do_one_request,
},
#ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
{
.base.cra_name = "cts(cbc(sm4))",
.base.cra_driver_name = "cts-sm4-keembay-ocs",
.base.cra_priority = KMB_OCS_PRIORITY,
.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.base.cra_blocksize = AES_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.base.cra_module = THIS_MODULE,
.base.cra_alignmask = 0,
.base.base.cra_name = "cts(cbc(sm4))",
.base.base.cra_driver_name = "cts-sm4-keembay-ocs",
.base.base.cra_priority = KMB_OCS_PRIORITY,
.base.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.base.base.cra_blocksize = AES_BLOCK_SIZE,
.base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
.base.base.cra_module = THIS_MODULE,
.base.base.cra_alignmask = 0,
.min_keysize = OCS_SM4_KEY_SIZE,
.max_keysize = OCS_SM4_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = kmb_ocs_sm4_set_key,
.encrypt = kmb_ocs_sm4_cts_encrypt,
.decrypt = kmb_ocs_sm4_cts_decrypt,
.init = ocs_sm4_init_tfm,
.exit = ocs_exit_tfm,
.base.min_keysize = OCS_SM4_KEY_SIZE,
.base.max_keysize = OCS_SM4_KEY_SIZE,
.base.ivsize = AES_BLOCK_SIZE,
.base.setkey = kmb_ocs_sm4_set_key,
.base.encrypt = kmb_ocs_sm4_cts_encrypt,
.base.decrypt = kmb_ocs_sm4_cts_decrypt,
.base.init = ocs_sm4_init_tfm,
.base.exit = ocs_exit_tfm,
.op.do_one_request = kmb_ocs_aes_sk_do_one_request,
}
#endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
};
static struct aead_alg algs_aead[] = {
static struct aead_engine_alg algs_aead[] = {
{
.base = {
.base.base = {
.cra_name = "gcm(aes)",
.cra_driver_name = "gcm-aes-keembay-ocs",
.cra_priority = KMB_OCS_PRIORITY,
@ -1467,17 +1450,18 @@ static struct aead_alg algs_aead[] = {
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = ocs_aes_aead_cra_init,
.exit = ocs_aead_cra_exit,
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setauthsize = kmb_ocs_aead_gcm_setauthsize,
.setkey = kmb_ocs_aes_aead_set_key,
.encrypt = kmb_ocs_aes_gcm_encrypt,
.decrypt = kmb_ocs_aes_gcm_decrypt,
.base.init = ocs_aes_aead_cra_init,
.base.exit = ocs_aead_cra_exit,
.base.ivsize = GCM_AES_IV_SIZE,
.base.maxauthsize = AES_BLOCK_SIZE,
.base.setauthsize = kmb_ocs_aead_gcm_setauthsize,
.base.setkey = kmb_ocs_aes_aead_set_key,
.base.encrypt = kmb_ocs_aes_gcm_encrypt,
.base.decrypt = kmb_ocs_aes_gcm_decrypt,
.op.do_one_request = kmb_ocs_aes_aead_do_one_request,
},
{
.base = {
.base.base = {
.cra_name = "ccm(aes)",
.cra_driver_name = "ccm-aes-keembay-ocs",
.cra_priority = KMB_OCS_PRIORITY,
@ -1489,17 +1473,18 @@ static struct aead_alg algs_aead[] = {
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = ocs_aes_aead_cra_init,
.exit = ocs_aead_cra_exit,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setauthsize = kmb_ocs_aead_ccm_setauthsize,
.setkey = kmb_ocs_aes_aead_set_key,
.encrypt = kmb_ocs_aes_ccm_encrypt,
.decrypt = kmb_ocs_aes_ccm_decrypt,
.base.init = ocs_aes_aead_cra_init,
.base.exit = ocs_aead_cra_exit,
.base.ivsize = AES_BLOCK_SIZE,
.base.maxauthsize = AES_BLOCK_SIZE,
.base.setauthsize = kmb_ocs_aead_ccm_setauthsize,
.base.setkey = kmb_ocs_aes_aead_set_key,
.base.encrypt = kmb_ocs_aes_ccm_encrypt,
.base.decrypt = kmb_ocs_aes_ccm_decrypt,
.op.do_one_request = kmb_ocs_aes_aead_do_one_request,
},
{
.base = {
.base.base = {
.cra_name = "gcm(sm4)",
.cra_driver_name = "gcm-sm4-keembay-ocs",
.cra_priority = KMB_OCS_PRIORITY,
@ -1510,17 +1495,18 @@ static struct aead_alg algs_aead[] = {
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = ocs_sm4_aead_cra_init,
.exit = ocs_aead_cra_exit,
.ivsize = GCM_AES_IV_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setauthsize = kmb_ocs_aead_gcm_setauthsize,
.setkey = kmb_ocs_sm4_aead_set_key,
.encrypt = kmb_ocs_sm4_gcm_encrypt,
.decrypt = kmb_ocs_sm4_gcm_decrypt,
.base.init = ocs_sm4_aead_cra_init,
.base.exit = ocs_aead_cra_exit,
.base.ivsize = GCM_AES_IV_SIZE,
.base.maxauthsize = AES_BLOCK_SIZE,
.base.setauthsize = kmb_ocs_aead_gcm_setauthsize,
.base.setkey = kmb_ocs_sm4_aead_set_key,
.base.encrypt = kmb_ocs_sm4_gcm_encrypt,
.base.decrypt = kmb_ocs_sm4_gcm_decrypt,
.op.do_one_request = kmb_ocs_aes_aead_do_one_request,
},
{
.base = {
.base.base = {
.cra_name = "ccm(sm4)",
.cra_driver_name = "ccm-sm4-keembay-ocs",
.cra_priority = KMB_OCS_PRIORITY,
@ -1531,21 +1517,22 @@ static struct aead_alg algs_aead[] = {
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
},
.init = ocs_sm4_aead_cra_init,
.exit = ocs_aead_cra_exit,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setauthsize = kmb_ocs_aead_ccm_setauthsize,
.setkey = kmb_ocs_sm4_aead_set_key,
.encrypt = kmb_ocs_sm4_ccm_encrypt,
.decrypt = kmb_ocs_sm4_ccm_decrypt,
.base.init = ocs_sm4_aead_cra_init,
.base.exit = ocs_aead_cra_exit,
.base.ivsize = AES_BLOCK_SIZE,
.base.maxauthsize = AES_BLOCK_SIZE,
.base.setauthsize = kmb_ocs_aead_ccm_setauthsize,
.base.setkey = kmb_ocs_sm4_aead_set_key,
.base.encrypt = kmb_ocs_sm4_ccm_encrypt,
.base.decrypt = kmb_ocs_sm4_ccm_decrypt,
.op.do_one_request = kmb_ocs_aes_aead_do_one_request,
}
};
static void unregister_aes_algs(struct ocs_aes_dev *aes_dev)
{
crypto_unregister_aeads(algs_aead, ARRAY_SIZE(algs_aead));
crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
crypto_engine_unregister_aeads(algs_aead, ARRAY_SIZE(algs_aead));
crypto_engine_unregister_skciphers(algs, ARRAY_SIZE(algs));
}
static int register_aes_algs(struct ocs_aes_dev *aes_dev)
@ -1556,13 +1543,13 @@ static int register_aes_algs(struct ocs_aes_dev *aes_dev)
* If any algorithm fails to register, all preceding algorithms that
* were successfully registered will be automatically unregistered.
*/
ret = crypto_register_aeads(algs_aead, ARRAY_SIZE(algs_aead));
ret = crypto_engine_register_aeads(algs_aead, ARRAY_SIZE(algs_aead));
if (ret)
return ret;
ret = crypto_register_skciphers(algs, ARRAY_SIZE(algs));
ret = crypto_engine_register_skciphers(algs, ARRAY_SIZE(algs));
if (ret)
crypto_unregister_aeads(algs_aead, ARRAY_SIZE(algs));
crypto_engine_unregister_aeads(algs_aead, ARRAY_SIZE(algs));
return ret;
}

Some files were not shown because too many files have changed in this diff Show More