From d00e708cef16442cabaf23f653baf924f5d66e83 Mon Sep 17 00:00:00 2001 From: Atsushi Nemoto Date: Thu, 6 Apr 2006 03:26:01 +1000 Subject: [PATCH 01/14] [CRYPTO] khazad: Use 32-bit reads on key On 64-bit platform, reading 64-bit keys (which is supposed to be 32-bit aligned) at a time will result in unaligned access. Signed-off-by: Atsushi Nemoto Signed-off-by: Herbert Xu --- crypto/khazad.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/crypto/khazad.c b/crypto/khazad.c index 807f2bf4ea24..5b8dc9a2d374 100644 --- a/crypto/khazad.c +++ b/crypto/khazad.c @@ -758,7 +758,7 @@ static int khazad_setkey(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) { struct khazad_ctx *ctx = ctx_arg; - const __be64 *key = (const __be64 *)in_key; + const __be32 *key = (const __be32 *)in_key; int r; const u64 *S = T7; u64 K2, K1; @@ -769,8 +769,9 @@ static int khazad_setkey(void *ctx_arg, const u8 *in_key, return -EINVAL; } - K2 = be64_to_cpu(key[0]); - K1 = be64_to_cpu(key[1]); + /* key is supposed to be 32-bit aligned */ + K2 = ((u64)be32_to_cpu(key[0]) << 32) | be32_to_cpu(key[1]); + K1 = ((u64)be32_to_cpu(key[2]) << 32) | be32_to_cpu(key[3]); /* setup the encrypt key */ for (r = 0; r <= KHAZAD_ROUNDS; r++) { From e1147d8f47eb8fef93f98a30858192145137d2b2 Mon Sep 17 00:00:00 2001 From: Atsushi Nemoto Date: Mon, 10 Apr 2006 08:42:35 +1000 Subject: [PATCH 02/14] [CRYPTO] digest: Add alignment handling Some hash modules load/store data words directly. The digest layer should pass properly aligned buffer to update()/final() method. This patch also add cra_alignmask to some hash modules. Signed-off-by: Atsushi Nemoto Signed-off-by: Herbert Xu --- crypto/digest.c | 42 +++++++++++++++++++++++++++--------------- crypto/michael_mic.c | 1 + crypto/sha1.c | 1 + crypto/sha256.c | 1 + crypto/sha512.c | 2 ++ crypto/tgr192.c | 3 +++ 6 files changed, 35 insertions(+), 15 deletions(-) diff --git a/crypto/digest.c b/crypto/digest.c index d9b6ac9dbf8d..062d0a5a2c89 100644 --- a/crypto/digest.c +++ b/crypto/digest.c @@ -27,6 +27,7 @@ static void update(struct crypto_tfm *tfm, struct scatterlist *sg, unsigned int nsg) { unsigned int i; + unsigned int alignmask = crypto_tfm_alg_alignmask(tfm); for (i = 0; i < nsg; i++) { @@ -38,12 +39,24 @@ static void update(struct crypto_tfm *tfm, unsigned int bytes_from_page = min(l, ((unsigned int) (PAGE_SIZE)) - offset); - char *p = crypto_kmap(pg, 0) + offset; + char *src = crypto_kmap(pg, 0); + char *p = src + offset; + if (unlikely(offset & alignmask)) { + unsigned int bytes = + alignmask + 1 - (offset & alignmask); + bytes = min(bytes, bytes_from_page); + tfm->__crt_alg->cra_digest.dia_update + (crypto_tfm_ctx(tfm), p, + bytes); + p += bytes; + bytes_from_page -= bytes; + l -= bytes; + } tfm->__crt_alg->cra_digest.dia_update (crypto_tfm_ctx(tfm), p, bytes_from_page); - crypto_kunmap(p, 0); + crypto_kunmap(src, 0); crypto_yield(tfm); offset = 0; pg++; @@ -54,7 +67,15 @@ static void update(struct crypto_tfm *tfm, static void final(struct crypto_tfm *tfm, u8 *out) { - tfm->__crt_alg->cra_digest.dia_final(crypto_tfm_ctx(tfm), out); + unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); + if (unlikely((unsigned long)out & alignmask)) { + unsigned int size = crypto_tfm_alg_digestsize(tfm); + u8 buffer[size + alignmask]; + u8 *dst = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); + tfm->__crt_alg->cra_digest.dia_final(crypto_tfm_ctx(tfm), dst); + memcpy(out, dst, size); + } else + tfm->__crt_alg->cra_digest.dia_final(crypto_tfm_ctx(tfm), out); } static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) @@ -69,18 +90,9 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) static void digest(struct crypto_tfm *tfm, struct scatterlist *sg, unsigned int nsg, u8 *out) { - unsigned int i; - - tfm->crt_digest.dit_init(tfm); - - for (i = 0; i < nsg; i++) { - char *p = crypto_kmap(sg[i].page, 0) + sg[i].offset; - tfm->__crt_alg->cra_digest.dia_update(crypto_tfm_ctx(tfm), - p, sg[i].length); - crypto_kunmap(p, 0); - crypto_yield(tfm); - } - crypto_digest_final(tfm, out); + init(tfm); + update(tfm, sg, nsg); + final(tfm, out); } int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags) diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c index 4f6ab23e14ad..701f859ed767 100644 --- a/crypto/michael_mic.c +++ b/crypto/michael_mic.c @@ -145,6 +145,7 @@ static struct crypto_alg michael_mic_alg = { .cra_blocksize = 8, .cra_ctxsize = sizeof(struct michael_mic_ctx), .cra_module = THIS_MODULE, + .cra_alignmask = 3, .cra_list = LIST_HEAD_INIT(michael_mic_alg.cra_list), .cra_u = { .digest = { .dia_digestsize = 8, diff --git a/crypto/sha1.c b/crypto/sha1.c index 21571ed35b7e..b96f57d95a82 100644 --- a/crypto/sha1.c +++ b/crypto/sha1.c @@ -112,6 +112,7 @@ static struct crypto_alg alg = { .cra_blocksize = SHA1_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sha1_ctx), .cra_module = THIS_MODULE, + .cra_alignmask = 3, .cra_list = LIST_HEAD_INIT(alg.cra_list), .cra_u = { .digest = { .dia_digestsize = SHA1_DIGEST_SIZE, diff --git a/crypto/sha256.c b/crypto/sha256.c index 9d5ef674d6a9..d62264a8a33e 100644 --- a/crypto/sha256.c +++ b/crypto/sha256.c @@ -313,6 +313,7 @@ static struct crypto_alg alg = { .cra_blocksize = SHA256_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sha256_ctx), .cra_module = THIS_MODULE, + .cra_alignmask = 3, .cra_list = LIST_HEAD_INIT(alg.cra_list), .cra_u = { .digest = { .dia_digestsize = SHA256_DIGEST_SIZE, diff --git a/crypto/sha512.c b/crypto/sha512.c index 3e6e9392310c..7dbec4f6b947 100644 --- a/crypto/sha512.c +++ b/crypto/sha512.c @@ -281,6 +281,7 @@ static struct crypto_alg sha512 = { .cra_blocksize = SHA512_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sha512_ctx), .cra_module = THIS_MODULE, + .cra_alignmask = 3, .cra_list = LIST_HEAD_INIT(sha512.cra_list), .cra_u = { .digest = { .dia_digestsize = SHA512_DIGEST_SIZE, @@ -295,6 +296,7 @@ static struct crypto_alg sha384 = { .cra_flags = CRYPTO_ALG_TYPE_DIGEST, .cra_blocksize = SHA384_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sha512_ctx), + .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(sha384.cra_list), .cra_u = { .digest = { diff --git a/crypto/tgr192.c b/crypto/tgr192.c index 2d8e44f6fbe9..1eae1bb7e495 100644 --- a/crypto/tgr192.c +++ b/crypto/tgr192.c @@ -627,6 +627,7 @@ static struct crypto_alg tgr192 = { .cra_blocksize = TGR192_BLOCK_SIZE, .cra_ctxsize = sizeof(struct tgr192_ctx), .cra_module = THIS_MODULE, + .cra_alignmask = 7, .cra_list = LIST_HEAD_INIT(tgr192.cra_list), .cra_u = {.digest = { .dia_digestsize = TGR192_DIGEST_SIZE, @@ -641,6 +642,7 @@ static struct crypto_alg tgr160 = { .cra_blocksize = TGR192_BLOCK_SIZE, .cra_ctxsize = sizeof(struct tgr192_ctx), .cra_module = THIS_MODULE, + .cra_alignmask = 7, .cra_list = LIST_HEAD_INIT(tgr160.cra_list), .cra_u = {.digest = { .dia_digestsize = TGR160_DIGEST_SIZE, @@ -655,6 +657,7 @@ static struct crypto_alg tgr128 = { .cra_blocksize = TGR192_BLOCK_SIZE, .cra_ctxsize = sizeof(struct tgr192_ctx), .cra_module = THIS_MODULE, + .cra_alignmask = 7, .cra_list = LIST_HEAD_INIT(tgr128.cra_list), .cra_u = {.digest = { .dia_digestsize = TGR128_DIGEST_SIZE, From 8b55ba0303bb59c34fab8e015634018780491614 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 14 May 2006 11:00:39 +1000 Subject: [PATCH 03/14] [CRYPTO] aes-i586: Get rid of useless function wrappers The wrappers aes_encrypt/aes_decrypt simply reverse the order of the function arguments. It's just as easy to get the actual assembly code to read them in the opposite order. Signed-off-by: Herbert Xu --- arch/i386/crypto/aes-i586-asm.S | 9 ++++----- arch/i386/crypto/aes.c | 18 ++++-------------- 2 files changed, 8 insertions(+), 19 deletions(-) diff --git a/arch/i386/crypto/aes-i586-asm.S b/arch/i386/crypto/aes-i586-asm.S index 911b15377f2e..2851f7fe51e6 100644 --- a/arch/i386/crypto/aes-i586-asm.S +++ b/arch/i386/crypto/aes-i586-asm.S @@ -36,16 +36,13 @@ .file "aes-i586-asm.S" .text -// aes_rval aes_enc_blk(const unsigned char in_blk[], unsigned char out_blk[], const aes_ctx cx[1])// -// aes_rval aes_dec_blk(const unsigned char in_blk[], unsigned char out_blk[], const aes_ctx cx[1])// - #define tlen 1024 // length of each of 4 'xor' arrays (256 32-bit words) // offsets to parameters with one register pushed onto stack -#define in_blk 8 // input byte array address parameter +#define in_blk 16 // input byte array address parameter #define out_blk 12 // output byte array address parameter -#define ctx 16 // AES context structure +#define ctx 8 // AES context structure // offsets in context structure @@ -220,6 +217,7 @@ do_col (table, r5,r0,r1,r4, r2,r3); /* idx=r5 */ // AES (Rijndael) Encryption Subroutine +/* void aes_enc_blk(void *ctx, u8 *out_blk, const u8 *in_blk) */ .global aes_enc_blk @@ -295,6 +293,7 @@ aes_enc_blk: ret // AES (Rijndael) Decryption Subroutine +/* void aes_dec_blk(void *ctx, u8 *out_blk, const u8 *in_blk) */ .global aes_dec_blk diff --git a/arch/i386/crypto/aes.c b/arch/i386/crypto/aes.c index a50397b1d5c7..a0e033510a3b 100644 --- a/arch/i386/crypto/aes.c +++ b/arch/i386/crypto/aes.c @@ -45,8 +45,8 @@ #include #include -asmlinkage void aes_enc_blk(const u8 *src, u8 *dst, void *ctx); -asmlinkage void aes_dec_blk(const u8 *src, u8 *dst, void *ctx); +asmlinkage void aes_enc_blk(void *ctx, u8 *dst, const u8 *src); +asmlinkage void aes_dec_blk(void *ctx, u8 *dst, const u8 *src); #define AES_MIN_KEY_SIZE 16 #define AES_MAX_KEY_SIZE 32 @@ -464,16 +464,6 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) return 0; } -static inline void aes_encrypt(void *ctx, u8 *dst, const u8 *src) -{ - aes_enc_blk(src, dst, ctx); -} -static inline void aes_decrypt(void *ctx, u8 *dst, const u8 *src) -{ - aes_dec_blk(src, dst, ctx); -} - - static struct crypto_alg aes_alg = { .cra_name = "aes", .cra_driver_name = "aes-i586", @@ -488,8 +478,8 @@ static struct crypto_alg aes_alg = { .cia_min_keysize = AES_MIN_KEY_SIZE, .cia_max_keysize = AES_MAX_KEY_SIZE, .cia_setkey = aes_set_key, - .cia_encrypt = aes_encrypt, - .cia_decrypt = aes_decrypt + .cia_encrypt = aes_enc_blk, + .cia_decrypt = aes_dec_blk } } }; From 43600106e32809a4dead79fec67a63e9860e3d5d Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 16 May 2006 22:06:54 +1000 Subject: [PATCH 04/14] [CRYPTO] digest: Remove unnecessary zeroing during init Various digest algorithms operate one block at a time and therefore keep a temporary buffer of partial blocks. This buffer does not need to be initialised since there is a counter which indicates what is and isn't valid in it. Signed-off-by: Herbert Xu --- arch/s390/crypto/sha1_s390.c | 23 ++++++++++++----------- arch/s390/crypto/sha256_s390.c | 1 - crypto/sha256.c | 1 - crypto/sha512.c | 2 -- crypto/tgr192.c | 1 - 5 files changed, 12 insertions(+), 16 deletions(-) diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c index 98c896b86dcd..36bb5346a8c4 100644 --- a/arch/s390/crypto/sha1_s390.c +++ b/arch/s390/crypto/sha1_s390.c @@ -40,19 +40,20 @@ struct crypt_s390_sha1_ctx { u8 buffer[2 * SHA1_BLOCK_SIZE]; }; -static void -sha1_init(void *ctx) +static void sha1_init(void *ctx_arg) { - static const struct crypt_s390_sha1_ctx initstate = { - .state = { - 0x67452301, - 0xEFCDAB89, - 0x98BADCFE, - 0x10325476, - 0xC3D2E1F0 - }, + struct crypt_s390_sha1_ctx *ctx = ctx_arg; + static const u32 initstate[5] = { + 0x67452301, + 0xEFCDAB89, + 0x98BADCFE, + 0x10325476, + 0xC3D2E1F0 }; - memcpy(ctx, &initstate, sizeof(initstate)); + + ctx->count = 0; + memcpy(ctx->state, &initstate, sizeof(initstate)); + ctx->buf_len = 0; } static void diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c index 1ec5e92b3454..2c76e7bee41c 100644 --- a/arch/s390/crypto/sha256_s390.c +++ b/arch/s390/crypto/sha256_s390.c @@ -44,7 +44,6 @@ static void sha256_init(void *ctx) sctx->state[6] = 0x1f83d9ab; sctx->state[7] = 0x5be0cd19; sctx->count = 0; - memset(sctx->buf, 0, sizeof(sctx->buf)); } static void sha256_update(void *ctx, const u8 *data, unsigned int len) diff --git a/crypto/sha256.c b/crypto/sha256.c index d62264a8a33e..4533a0564895 100644 --- a/crypto/sha256.c +++ b/crypto/sha256.c @@ -242,7 +242,6 @@ static void sha256_init(void *ctx) sctx->state[6] = H6; sctx->state[7] = H7; sctx->count[0] = sctx->count[1] = 0; - memset(sctx->buf, 0, sizeof(sctx->buf)); } static void sha256_update(void *ctx, const u8 *data, unsigned int len) diff --git a/crypto/sha512.c b/crypto/sha512.c index 7dbec4f6b947..bc77a66d9de2 100644 --- a/crypto/sha512.c +++ b/crypto/sha512.c @@ -173,7 +173,6 @@ sha512_init(void *ctx) sctx->state[6] = H6; sctx->state[7] = H7; sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0; - memset(sctx->buf, 0, sizeof(sctx->buf)); } static void @@ -189,7 +188,6 @@ sha384_init(void *ctx) sctx->state[6] = HP6; sctx->state[7] = HP7; sctx->count[0] = sctx->count[1] = sctx->count[2] = sctx->count[3] = 0; - memset(sctx->buf, 0, sizeof(sctx->buf)); } static void diff --git a/crypto/tgr192.c b/crypto/tgr192.c index 1eae1bb7e495..004bb841cc5b 100644 --- a/crypto/tgr192.c +++ b/crypto/tgr192.c @@ -500,7 +500,6 @@ static void tgr192_init(void *ctx) { struct tgr192_ctx *tctx = ctx; - memset (tctx->hash, 0, 64); tctx->a = 0x0123456789abcdefULL; tctx->b = 0xfedcba9876543210ULL; tctx->c = 0xf096a5b4c3b2e187ULL; From 6c2bb98bc33ae33c7a33a133a4cd5a06395fece5 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 16 May 2006 22:09:29 +1000 Subject: [PATCH 05/14] [CRYPTO] all: Pass tfm instead of ctx to algorithms Up until now algorithms have been happy to get a context pointer since they know everything that's in the tfm already (e.g., alignment, block size). However, once we have parameterised algorithms, such information will be specific to each tfm. So the algorithm API needs to be changed to pass the tfm structure instead of the context pointer. This patch is basically a text substitution. The only tricky bit is the assembly routines that need to get the context pointer offset through asm-offsets.h. Signed-off-by: Herbert Xu --- arch/i386/crypto/aes-i586-asm.S | 28 +++++++-------- arch/i386/crypto/aes.c | 10 +++--- arch/i386/kernel/asm-offsets.c | 3 ++ arch/s390/crypto/aes_s390.c | 14 ++++---- arch/s390/crypto/des_s390.c | 42 +++++++++++----------- arch/s390/crypto/sha1_s390.c | 15 ++++---- arch/s390/crypto/sha256_s390.c | 13 +++---- arch/x86_64/crypto/aes-x86_64-asm.S | 18 ++++++---- arch/x86_64/crypto/aes.c | 10 +++--- arch/x86_64/kernel/asm-offsets.c | 3 ++ crypto/aes.c | 14 ++++---- crypto/anubis.c | 13 ++++--- crypto/arc4.c | 9 ++--- crypto/blowfish.c | 19 +++++----- crypto/cast5.c | 14 ++++---- crypto/cast6.c | 15 ++++---- crypto/cipher.c | 14 ++++---- crypto/compress.c | 6 ++-- crypto/crc32c.c | 19 +++++----- crypto/crypto_null.c | 17 ++++----- crypto/deflate.c | 19 +++++----- crypto/des.c | 27 +++++++------- crypto/digest.c | 15 ++++---- crypto/khazad.c | 14 ++++---- crypto/md4.c | 12 +++---- crypto/md5.c | 12 +++---- crypto/michael_mic.c | 19 +++++----- crypto/serpent.c | 27 ++++++++------ crypto/sha1.c | 17 ++++----- crypto/sha256.c | 17 ++++----- crypto/sha512.c | 25 +++++++------ crypto/tea.c | 55 ++++++++++++++--------------- crypto/tgr192.c | 29 ++++++++------- crypto/twofish.c | 14 ++++---- crypto/wp512.c | 24 ++++++------- drivers/crypto/padlock-aes.c | 29 ++++++++------- include/linux/crypto.h | 29 +++++++-------- 37 files changed, 349 insertions(+), 331 deletions(-) diff --git a/arch/i386/crypto/aes-i586-asm.S b/arch/i386/crypto/aes-i586-asm.S index 2851f7fe51e6..f942f0c8f630 100644 --- a/arch/i386/crypto/aes-i586-asm.S +++ b/arch/i386/crypto/aes-i586-asm.S @@ -36,19 +36,19 @@ .file "aes-i586-asm.S" .text +#include + #define tlen 1024 // length of each of 4 'xor' arrays (256 32-bit words) -// offsets to parameters with one register pushed onto stack +/* offsets to parameters with one register pushed onto stack */ +#define tfm 8 +#define out_blk 12 +#define in_blk 16 -#define in_blk 16 // input byte array address parameter -#define out_blk 12 // output byte array address parameter -#define ctx 8 // AES context structure - -// offsets in context structure - -#define ekey 0 // encryption key schedule base address -#define nrnd 256 // number of rounds -#define dkey 260 // decryption key schedule base address +/* offsets in crypto_tfm structure */ +#define ekey (crypto_tfm_ctx_offset + 0) +#define nrnd (crypto_tfm_ctx_offset + 256) +#define dkey (crypto_tfm_ctx_offset + 260) // register mapping for encrypt and decrypt subroutines @@ -217,7 +217,7 @@ do_col (table, r5,r0,r1,r4, r2,r3); /* idx=r5 */ // AES (Rijndael) Encryption Subroutine -/* void aes_enc_blk(void *ctx, u8 *out_blk, const u8 *in_blk) */ +/* void aes_enc_blk(struct crypto_tfm *tfm, u8 *out_blk, const u8 *in_blk) */ .global aes_enc_blk @@ -228,7 +228,7 @@ aes_enc_blk: push %ebp - mov ctx(%esp),%ebp // pointer to context + mov tfm(%esp),%ebp // CAUTION: the order and the values used in these assigns // rely on the register mappings @@ -293,7 +293,7 @@ aes_enc_blk: ret // AES (Rijndael) Decryption Subroutine -/* void aes_dec_blk(void *ctx, u8 *out_blk, const u8 *in_blk) */ +/* void aes_dec_blk(struct crypto_tfm *tfm, u8 *out_blk, const u8 *in_blk) */ .global aes_dec_blk @@ -304,7 +304,7 @@ aes_enc_blk: aes_dec_blk: push %ebp - mov ctx(%esp),%ebp // pointer to context + mov tfm(%esp),%ebp // CAUTION: the order and the values used in these assigns // rely on the register mappings diff --git a/arch/i386/crypto/aes.c b/arch/i386/crypto/aes.c index a0e033510a3b..b9c7d99160f1 100644 --- a/arch/i386/crypto/aes.c +++ b/arch/i386/crypto/aes.c @@ -45,8 +45,8 @@ #include #include -asmlinkage void aes_enc_blk(void *ctx, u8 *dst, const u8 *src); -asmlinkage void aes_dec_blk(void *ctx, u8 *dst, const u8 *src); +asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); +asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src); #define AES_MIN_KEY_SIZE 16 #define AES_MAX_KEY_SIZE 32 @@ -378,12 +378,12 @@ static void gen_tabs(void) k[8*(i)+11] = ss[3]; \ } -static int -aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) +static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len, u32 *flags) { int i; u32 ss[8]; - struct aes_ctx *ctx = ctx_arg; + struct aes_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *key = (const __le32 *)in_key; /* encryption schedule */ diff --git a/arch/i386/kernel/asm-offsets.c b/arch/i386/kernel/asm-offsets.c index 36d66e2077d0..1c3a809e6421 100644 --- a/arch/i386/kernel/asm-offsets.c +++ b/arch/i386/kernel/asm-offsets.c @@ -4,6 +4,7 @@ * to extract and format the required data. */ +#include #include #include #include @@ -69,4 +70,6 @@ void foo(void) DEFINE(PAGE_SIZE_asm, PAGE_SIZE); DEFINE(VSYSCALL_BASE, __fix_to_virt(FIX_VSYSCALL)); + + OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); } diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index c5ca2dc5d428..5713c7e5bd16 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -37,10 +37,10 @@ struct s390_aes_ctx { int key_len; }; -static int aes_set_key(void *ctx, const u8 *in_key, unsigned int key_len, - u32 *flags) +static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len, u32 *flags) { - struct s390_aes_ctx *sctx = ctx; + struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); switch (key_len) { case 16: @@ -70,9 +70,9 @@ fail: return -EINVAL; } -static void aes_encrypt(void *ctx, u8 *out, const u8 *in) +static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { - const struct s390_aes_ctx *sctx = ctx; + const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); switch (sctx->key_len) { case 16: @@ -90,9 +90,9 @@ static void aes_encrypt(void *ctx, u8 *out, const u8 *in) } } -static void aes_decrypt(void *ctx, u8 *out, const u8 *in) +static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { - const struct s390_aes_ctx *sctx = ctx; + const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); switch (sctx->key_len) { case 16: diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index e3c37aa0a199..b3f7496a79b4 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c @@ -44,10 +44,10 @@ struct crypt_s390_des3_192_ctx { u8 key[DES3_192_KEY_SIZE]; }; -static int des_setkey(void *ctx, const u8 *key, unsigned int keylen, - u32 *flags) +static int des_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen, u32 *flags) { - struct crypt_s390_des_ctx *dctx = ctx; + struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm); int ret; /* test if key is valid (not a weak key) */ @@ -57,16 +57,16 @@ static int des_setkey(void *ctx, const u8 *key, unsigned int keylen, return ret; } -static void des_encrypt(void *ctx, u8 *out, const u8 *in) +static void des_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { - struct crypt_s390_des_ctx *dctx = ctx; + struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm); crypt_s390_km(KM_DEA_ENCRYPT, dctx->key, out, in, DES_BLOCK_SIZE); } -static void des_decrypt(void *ctx, u8 *out, const u8 *in) +static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { - struct crypt_s390_des_ctx *dctx = ctx; + struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm); crypt_s390_km(KM_DEA_DECRYPT, dctx->key, out, in, DES_BLOCK_SIZE); } @@ -166,11 +166,11 @@ static struct crypto_alg des_alg = { * Implementers MUST reject keys that exhibit this property. * */ -static int des3_128_setkey(void *ctx, const u8 *key, unsigned int keylen, - u32 *flags) +static int des3_128_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen, u32 *flags) { int i, ret; - struct crypt_s390_des3_128_ctx *dctx = ctx; + struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm); const u8* temp_key = key; if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) { @@ -186,17 +186,17 @@ static int des3_128_setkey(void *ctx, const u8 *key, unsigned int keylen, return 0; } -static void des3_128_encrypt(void *ctx, u8 *dst, const u8 *src) +static void des3_128_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { - struct crypt_s390_des3_128_ctx *dctx = ctx; + struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm); crypt_s390_km(KM_TDEA_128_ENCRYPT, dctx->key, dst, (void*)src, DES3_128_BLOCK_SIZE); } -static void des3_128_decrypt(void *ctx, u8 *dst, const u8 *src) +static void des3_128_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { - struct crypt_s390_des3_128_ctx *dctx = ctx; + struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm); crypt_s390_km(KM_TDEA_128_DECRYPT, dctx->key, dst, (void*)src, DES3_128_BLOCK_SIZE); @@ -302,11 +302,11 @@ static struct crypto_alg des3_128_alg = { * property. * */ -static int des3_192_setkey(void *ctx, const u8 *key, unsigned int keylen, - u32 *flags) +static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen, u32 *flags) { int i, ret; - struct crypt_s390_des3_192_ctx *dctx = ctx; + struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm); const u8* temp_key = key; if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && @@ -325,17 +325,17 @@ static int des3_192_setkey(void *ctx, const u8 *key, unsigned int keylen, return 0; } -static void des3_192_encrypt(void *ctx, u8 *dst, const u8 *src) +static void des3_192_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { - struct crypt_s390_des3_192_ctx *dctx = ctx; + struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm); crypt_s390_km(KM_TDEA_192_ENCRYPT, dctx->key, dst, (void*)src, DES3_192_BLOCK_SIZE); } -static void des3_192_decrypt(void *ctx, u8 *dst, const u8 *src) +static void des3_192_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { - struct crypt_s390_des3_192_ctx *dctx = ctx; + struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm); crypt_s390_km(KM_TDEA_192_DECRYPT, dctx->key, dst, (void*)src, DES3_192_BLOCK_SIZE); diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c index 36bb5346a8c4..9d34a35b1aa5 100644 --- a/arch/s390/crypto/sha1_s390.c +++ b/arch/s390/crypto/sha1_s390.c @@ -40,9 +40,9 @@ struct crypt_s390_sha1_ctx { u8 buffer[2 * SHA1_BLOCK_SIZE]; }; -static void sha1_init(void *ctx_arg) +static void sha1_init(struct crypto_tfm *tfm) { - struct crypt_s390_sha1_ctx *ctx = ctx_arg; + struct crypt_s390_sha1_ctx *ctx = crypto_tfm_ctx(tfm); static const u32 initstate[5] = { 0x67452301, 0xEFCDAB89, @@ -56,13 +56,13 @@ static void sha1_init(void *ctx_arg) ctx->buf_len = 0; } -static void -sha1_update(void *ctx, const u8 *data, unsigned int len) +static void sha1_update(struct crypto_tfm *tfm, const u8 *data, + unsigned int len) { struct crypt_s390_sha1_ctx *sctx; long imd_len; - sctx = ctx; + sctx = crypto_tfm_ctx(tfm); sctx->count += len * 8; //message bit length //anything in buffer yet? -> must be completed @@ -111,10 +111,9 @@ pad_message(struct crypt_s390_sha1_ctx* sctx) } /* Add padding and return the message digest. */ -static void -sha1_final(void* ctx, u8 *out) +static void sha1_final(struct crypto_tfm *tfm, u8 *out) { - struct crypt_s390_sha1_ctx *sctx = ctx; + struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); //must perform manual padding pad_message(sctx); diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c index 2c76e7bee41c..f573df30f31d 100644 --- a/arch/s390/crypto/sha256_s390.c +++ b/arch/s390/crypto/sha256_s390.c @@ -31,9 +31,9 @@ struct s390_sha256_ctx { u8 buf[2 * SHA256_BLOCK_SIZE]; }; -static void sha256_init(void *ctx) +static void sha256_init(struct crypto_tfm *tfm) { - struct s390_sha256_ctx *sctx = ctx; + struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm); sctx->state[0] = 0x6a09e667; sctx->state[1] = 0xbb67ae85; @@ -46,9 +46,10 @@ static void sha256_init(void *ctx) sctx->count = 0; } -static void sha256_update(void *ctx, const u8 *data, unsigned int len) +static void sha256_update(struct crypto_tfm *tfm, const u8 *data, + unsigned int len) { - struct s390_sha256_ctx *sctx = ctx; + struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm); unsigned int index; int ret; @@ -107,9 +108,9 @@ static void pad_message(struct s390_sha256_ctx* sctx) } /* Add padding and return the message digest */ -static void sha256_final(void* ctx, u8 *out) +static void sha256_final(struct crypto_tfm *tfm, u8 *out) { - struct s390_sha256_ctx *sctx = ctx; + struct s390_sha256_ctx *sctx = crypto_tfm_ctx(tfm); /* must perform manual padding */ pad_message(sctx); diff --git a/arch/x86_64/crypto/aes-x86_64-asm.S b/arch/x86_64/crypto/aes-x86_64-asm.S index 483cbb23ab8d..f3ba643e144d 100644 --- a/arch/x86_64/crypto/aes-x86_64-asm.S +++ b/arch/x86_64/crypto/aes-x86_64-asm.S @@ -15,6 +15,10 @@ .text +#include + +#define BASE crypto_tfm_ctx_offset + #define R1 %rax #define R1E %eax #define R1X %ax @@ -46,19 +50,19 @@ #define R10 %r10 #define R11 %r11 -#define prologue(FUNC,BASE,B128,B192,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11) \ +#define prologue(FUNC,KEY,B128,B192,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11) \ .global FUNC; \ .type FUNC,@function; \ .align 8; \ FUNC: movq r1,r2; \ movq r3,r4; \ - leaq BASE+52(r8),r9; \ + leaq BASE+KEY+52(r8),r9; \ movq r10,r11; \ movl (r7),r5 ## E; \ movl 4(r7),r1 ## E; \ movl 8(r7),r6 ## E; \ movl 12(r7),r7 ## E; \ - movl (r8),r10 ## E; \ + movl BASE(r8),r10 ## E; \ xorl -48(r9),r5 ## E; \ xorl -44(r9),r1 ## E; \ xorl -40(r9),r6 ## E; \ @@ -128,8 +132,8 @@ FUNC: movq r1,r2; \ movl r3 ## E,r1 ## E; \ movl r4 ## E,r2 ## E; -#define entry(FUNC,BASE,B128,B192) \ - prologue(FUNC,BASE,B128,B192,R2,R8,R7,R9,R1,R3,R4,R6,R10,R5,R11) +#define entry(FUNC,KEY,B128,B192) \ + prologue(FUNC,KEY,B128,B192,R2,R8,R7,R9,R1,R3,R4,R6,R10,R5,R11) #define return epilogue(R8,R2,R9,R7,R5,R6,R3,R4,R11) @@ -147,7 +151,7 @@ FUNC: movq r1,r2; \ #define decrypt_final(TAB,OFFSET) \ round(TAB,OFFSET,R2,R1,R4,R3,R6,R5,R7,R10,R5,R6,R3,R4) -/* void aes_encrypt(void *ctx, u8 *out, const u8 *in) */ +/* void aes_encrypt(stuct crypto_tfm *tfm, u8 *out, const u8 *in) */ entry(aes_encrypt,0,enc128,enc192) encrypt_round(aes_ft_tab,-96) @@ -166,7 +170,7 @@ enc128: encrypt_round(aes_ft_tab,-32) encrypt_final(aes_fl_tab,112) return -/* void aes_decrypt(void *ctx, u8 *out, const u8 *in) */ +/* void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) */ entry(aes_decrypt,240,dec128,dec192) decrypt_round(aes_it_tab,-96) diff --git a/arch/x86_64/crypto/aes.c b/arch/x86_64/crypto/aes.c index 6f77e7700d32..d6f8e0463b5d 100644 --- a/arch/x86_64/crypto/aes.c +++ b/arch/x86_64/crypto/aes.c @@ -227,10 +227,10 @@ static void __init gen_tabs(void) t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t; \ } -static int aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, - u32 *flags) +static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len, u32 *flags) { - struct aes_ctx *ctx = ctx_arg; + struct aes_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *key = (const __le32 *)in_key; u32 i, j, t, u, v, w; @@ -283,8 +283,8 @@ static int aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, return 0; } -extern void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in); -extern void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in); +extern void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in); +extern void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in); static struct crypto_alg aes_alg = { .cra_name = "aes", diff --git a/arch/x86_64/kernel/asm-offsets.c b/arch/x86_64/kernel/asm-offsets.c index 38834bbbae11..96687e2beb2c 100644 --- a/arch/x86_64/kernel/asm-offsets.c +++ b/arch/x86_64/kernel/asm-offsets.c @@ -4,6 +4,7 @@ * and format the required data. */ +#include #include #include #include @@ -68,5 +69,7 @@ int main(void) DEFINE(pbe_next, offsetof(struct pbe, next)); BLANK(); DEFINE(TSS_ist, offsetof(struct tss_struct, ist)); + BLANK(); + DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx)); return 0; } diff --git a/crypto/aes.c b/crypto/aes.c index a5017292e066..a038711831e7 100644 --- a/crypto/aes.c +++ b/crypto/aes.c @@ -248,10 +248,10 @@ gen_tabs (void) t ^= E_KEY[8 * i + 7]; E_KEY[8 * i + 15] = t; \ } -static int -aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) +static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len, u32 *flags) { - struct aes_ctx *ctx = ctx_arg; + struct aes_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *key = (const __le32 *)in_key; u32 i, t, u, v, w; @@ -318,9 +318,9 @@ aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) f_rl(bo, bi, 2, k); \ f_rl(bo, bi, 3, k) -static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in) +static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { - const struct aes_ctx *ctx = ctx_arg; + const struct aes_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *src = (const __le32 *)in; __le32 *dst = (__le32 *)out; u32 b0[4], b1[4]; @@ -373,9 +373,9 @@ static void aes_encrypt(void *ctx_arg, u8 *out, const u8 *in) i_rl(bo, bi, 2, k); \ i_rl(bo, bi, 3, k) -static void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in) +static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { - const struct aes_ctx *ctx = ctx_arg; + const struct aes_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *src = (const __le32 *)in; __le32 *dst = (__le32 *)out; u32 b0[4], b1[4]; diff --git a/crypto/anubis.c b/crypto/anubis.c index 2c796bdb91a6..7e2e1a29800e 100644 --- a/crypto/anubis.c +++ b/crypto/anubis.c @@ -460,16 +460,15 @@ static const u32 rc[] = { 0xf726ffedU, 0xe89d6f8eU, 0x19a0f089U, }; -static int anubis_setkey(void *ctx_arg, const u8 *in_key, +static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len, u32 *flags) { + struct anubis_ctx *ctx = crypto_tfm_ctx(tfm); const __be32 *key = (const __be32 *)in_key; int N, R, i, r; u32 kappa[ANUBIS_MAX_N]; u32 inter[ANUBIS_MAX_N]; - struct anubis_ctx *ctx = ctx_arg; - switch (key_len) { case 16: case 20: case 24: case 28: @@ -660,15 +659,15 @@ static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4], dst[i] = cpu_to_be32(inter[i]); } -static void anubis_encrypt(void *ctx_arg, u8 *dst, const u8 *src) +static void anubis_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { - struct anubis_ctx *ctx = ctx_arg; + struct anubis_ctx *ctx = crypto_tfm_ctx(tfm); anubis_crypt(ctx->E, dst, src, ctx->R); } -static void anubis_decrypt(void *ctx_arg, u8 *dst, const u8 *src) +static void anubis_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { - struct anubis_ctx *ctx = ctx_arg; + struct anubis_ctx *ctx = crypto_tfm_ctx(tfm); anubis_crypt(ctx->D, dst, src, ctx->R); } diff --git a/crypto/arc4.c b/crypto/arc4.c index 9efbcaae88a1..5edc6a65b987 100644 --- a/crypto/arc4.c +++ b/crypto/arc4.c @@ -24,9 +24,10 @@ struct arc4_ctx { u8 x, y; }; -static int arc4_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) +static int arc4_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len, u32 *flags) { - struct arc4_ctx *ctx = ctx_arg; + struct arc4_ctx *ctx = crypto_tfm_ctx(tfm); int i, j = 0, k = 0; ctx->x = 1; @@ -48,9 +49,9 @@ static int arc4_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u return 0; } -static void arc4_crypt(void *ctx_arg, u8 *out, const u8 *in) +static void arc4_crypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { - struct arc4_ctx *ctx = ctx_arg; + struct arc4_ctx *ctx = crypto_tfm_ctx(tfm); u8 *const S = ctx->S; u8 x = ctx->x; diff --git a/crypto/blowfish.c b/crypto/blowfish.c index 7f710b201f20..490265f42b3b 100644 --- a/crypto/blowfish.c +++ b/crypto/blowfish.c @@ -349,7 +349,7 @@ static void encrypt_block(struct bf_ctx *bctx, u32 *dst, u32 *src) dst[1] = yl; } -static void bf_encrypt(void *ctx, u8 *dst, const u8 *src) +static void bf_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { const __be32 *in_blk = (const __be32 *)src; __be32 *const out_blk = (__be32 *)dst; @@ -357,17 +357,18 @@ static void bf_encrypt(void *ctx, u8 *dst, const u8 *src) in32[0] = be32_to_cpu(in_blk[0]); in32[1] = be32_to_cpu(in_blk[1]); - encrypt_block(ctx, out32, in32); + encrypt_block(crypto_tfm_ctx(tfm), out32, in32); out_blk[0] = cpu_to_be32(out32[0]); out_blk[1] = cpu_to_be32(out32[1]); } -static void bf_decrypt(void *ctx, u8 *dst, const u8 *src) +static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { + struct bf_ctx *ctx = crypto_tfm_ctx(tfm); const __be32 *in_blk = (const __be32 *)src; __be32 *const out_blk = (__be32 *)dst; - const u32 *P = ((struct bf_ctx *)ctx)->p; - const u32 *S = ((struct bf_ctx *)ctx)->s; + const u32 *P = ctx->p; + const u32 *S = ctx->s; u32 yl = be32_to_cpu(in_blk[0]); u32 yr = be32_to_cpu(in_blk[1]); @@ -398,12 +399,14 @@ static void bf_decrypt(void *ctx, u8 *dst, const u8 *src) /* * Calculates the blowfish S and P boxes for encryption and decryption. */ -static int bf_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) +static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen, u32 *flags) { + struct bf_ctx *ctx = crypto_tfm_ctx(tfm); + u32 *P = ctx->p; + u32 *S = ctx->s; short i, j, count; u32 data[2], temp; - u32 *P = ((struct bf_ctx *)ctx)->p; - u32 *S = ((struct bf_ctx *)ctx)->s; /* Copy the initialization s-boxes */ for (i = 0, count = 0; i < 256; i++) diff --git a/crypto/cast5.c b/crypto/cast5.c index 8834c8580c04..08eef58c1d3d 100644 --- a/crypto/cast5.c +++ b/crypto/cast5.c @@ -577,9 +577,9 @@ static const u32 sb8[256] = { (((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]) ) -static void cast5_encrypt(void *ctx, u8 * outbuf, const u8 * inbuf) +static void cast5_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) { - struct cast5_ctx *c = (struct cast5_ctx *) ctx; + struct cast5_ctx *c = crypto_tfm_ctx(tfm); const __be32 *src = (const __be32 *)inbuf; __be32 *dst = (__be32 *)outbuf; u32 l, r, t; @@ -642,9 +642,9 @@ static void cast5_encrypt(void *ctx, u8 * outbuf, const u8 * inbuf) dst[1] = cpu_to_be32(l); } -static void cast5_decrypt(void *ctx, u8 * outbuf, const u8 * inbuf) +static void cast5_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) { - struct cast5_ctx *c = (struct cast5_ctx *) ctx; + struct cast5_ctx *c = crypto_tfm_ctx(tfm); const __be32 *src = (const __be32 *)inbuf; __be32 *dst = (__be32 *)outbuf; u32 l, r, t; @@ -769,15 +769,15 @@ static void key_schedule(u32 * x, u32 * z, u32 * k) } -static int -cast5_setkey(void *ctx, const u8 * key, unsigned key_len, u32 * flags) +static int cast5_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned key_len, u32 *flags) { + struct cast5_ctx *c = crypto_tfm_ctx(tfm); int i; u32 x[4]; u32 z[4]; u32 k[16]; __be32 p_key[4]; - struct cast5_ctx *c = (struct cast5_ctx *) ctx; if (key_len < 5 || key_len > 16) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; diff --git a/crypto/cast6.c b/crypto/cast6.c index 9e28740ba775..08e33bfc3ad1 100644 --- a/crypto/cast6.c +++ b/crypto/cast6.c @@ -381,13 +381,13 @@ static inline void W(u32 *key, unsigned int i) { key[7] ^= F2(key[0], Tr[i % 4][7], Tm[i][7]); } -static int -cast6_setkey(void *ctx, const u8 * in_key, unsigned key_len, u32 * flags) +static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key, + unsigned key_len, u32 *flags) { int i; u32 key[8]; __be32 p_key[8]; /* padded key */ - struct cast6_ctx *c = (struct cast6_ctx *) ctx; + struct cast6_ctx *c = crypto_tfm_ctx(tfm); if (key_len < 16 || key_len > 32 || key_len % 4 != 0) { *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; @@ -444,8 +444,9 @@ static inline void QBAR (u32 * block, u8 * Kr, u32 * Km) { block[2] ^= F1(block[3], Kr[0], Km[0]); } -static void cast6_encrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { - struct cast6_ctx * c = (struct cast6_ctx *)ctx; +static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) +{ + struct cast6_ctx *c = crypto_tfm_ctx(tfm); const __be32 *src = (const __be32 *)inbuf; __be32 *dst = (__be32 *)outbuf; u32 block[4]; @@ -476,8 +477,8 @@ static void cast6_encrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { dst[3] = cpu_to_be32(block[3]); } -static void cast6_decrypt (void * ctx, u8 * outbuf, const u8 * inbuf) { - struct cast6_ctx * c = (struct cast6_ctx *)ctx; +static void cast6_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf) { + struct cast6_ctx * c = crypto_tfm_ctx(tfm); const __be32 *src = (const __be32 *)inbuf; __be32 *dst = (__be32 *)outbuf; u32 block[4]; diff --git a/crypto/cipher.c b/crypto/cipher.c index 65bcea0cd17c..b899eb97abd7 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c @@ -187,7 +187,7 @@ static unsigned int cbc_process_encrypt(const struct cipher_desc *desc, void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block; int bsize = crypto_tfm_alg_blocksize(tfm); - void (*fn)(void *, u8 *, const u8 *) = desc->crfn; + void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn; u8 *iv = desc->info; unsigned int done = 0; @@ -195,7 +195,7 @@ static unsigned int cbc_process_encrypt(const struct cipher_desc *desc, do { xor(iv, src); - fn(crypto_tfm_ctx(tfm), dst, iv); + fn(tfm, dst, iv); memcpy(iv, dst, bsize); src += bsize; @@ -218,7 +218,7 @@ static unsigned int cbc_process_decrypt(const struct cipher_desc *desc, u8 *buf = (u8 *)ALIGN((unsigned long)stack, alignmask + 1); u8 **dst_p = src == dst ? &buf : &dst; - void (*fn)(void *, u8 *, const u8 *) = desc->crfn; + void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn; u8 *iv = desc->info; unsigned int done = 0; @@ -227,7 +227,7 @@ static unsigned int cbc_process_decrypt(const struct cipher_desc *desc, do { u8 *tmp_dst = *dst_p; - fn(crypto_tfm_ctx(tfm), tmp_dst, src); + fn(tfm, tmp_dst, src); xor(tmp_dst, iv); memcpy(iv, src, bsize); if (tmp_dst != dst) @@ -245,13 +245,13 @@ static unsigned int ecb_process(const struct cipher_desc *desc, u8 *dst, { struct crypto_tfm *tfm = desc->tfm; int bsize = crypto_tfm_alg_blocksize(tfm); - void (*fn)(void *, u8 *, const u8 *) = desc->crfn; + void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn; unsigned int done = 0; nbytes -= bsize; do { - fn(crypto_tfm_ctx(tfm), dst, src); + fn(tfm, dst, src); src += bsize; dst += bsize; @@ -268,7 +268,7 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; return -EINVAL; } else - return cia->cia_setkey(crypto_tfm_ctx(tfm), key, keylen, + return cia->cia_setkey(tfm, key, keylen, &tfm->crt_flags); } diff --git a/crypto/compress.c b/crypto/compress.c index eb36d9364da3..c12fc0c41dac 100644 --- a/crypto/compress.c +++ b/crypto/compress.c @@ -22,8 +22,7 @@ static int crypto_compress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { - return tfm->__crt_alg->cra_compress.coa_compress(crypto_tfm_ctx(tfm), - src, slen, dst, + return tfm->__crt_alg->cra_compress.coa_compress(tfm, src, slen, dst, dlen); } @@ -31,8 +30,7 @@ static int crypto_decompress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { - return tfm->__crt_alg->cra_compress.coa_decompress(crypto_tfm_ctx(tfm), - src, slen, dst, + return tfm->__crt_alg->cra_compress.coa_decompress(tfm, src, slen, dst, dlen); } diff --git a/crypto/crc32c.c b/crypto/crc32c.c index 953362423a5c..f2660123aeb4 100644 --- a/crypto/crc32c.c +++ b/crypto/crc32c.c @@ -31,9 +31,9 @@ struct chksum_ctx { * crc using table. */ -static void chksum_init(void *ctx) +static void chksum_init(struct crypto_tfm *tfm) { - struct chksum_ctx *mctx = ctx; + struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); mctx->crc = ~(u32)0; /* common usage */ } @@ -43,10 +43,10 @@ static void chksum_init(void *ctx) * If your algorithm starts with ~0, then XOR with ~0 before you set * the seed. */ -static int chksum_setkey(void *ctx, const u8 *key, unsigned int keylen, - u32 *flags) +static int chksum_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen, u32 *flags) { - struct chksum_ctx *mctx = ctx; + struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); if (keylen != sizeof(mctx->crc)) { if (flags) @@ -57,9 +57,10 @@ static int chksum_setkey(void *ctx, const u8 *key, unsigned int keylen, return 0; } -static void chksum_update(void *ctx, const u8 *data, unsigned int length) +static void chksum_update(struct crypto_tfm *tfm, const u8 *data, + unsigned int length) { - struct chksum_ctx *mctx = ctx; + struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); u32 mcrc; mcrc = crc32c(mctx->crc, data, (size_t)length); @@ -67,9 +68,9 @@ static void chksum_update(void *ctx, const u8 *data, unsigned int length) mctx->crc = mcrc; } -static void chksum_final(void *ctx, u8 *out) +static void chksum_final(struct crypto_tfm *tfm, u8 *out) { - struct chksum_ctx *mctx = ctx; + struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); u32 mcrc = (mctx->crc ^ ~(u32)0); *(u32 *)out = __le32_to_cpu(mcrc); diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c index 3fcf6e887e87..a0d956b52949 100644 --- a/crypto/crypto_null.c +++ b/crypto/crypto_null.c @@ -27,8 +27,8 @@ #define NULL_BLOCK_SIZE 1 #define NULL_DIGEST_SIZE 0 -static int null_compress(void *ctx, const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) +static int null_compress(struct crypto_tfm *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen) { if (slen > *dlen) return -EINVAL; @@ -37,20 +37,21 @@ static int null_compress(void *ctx, const u8 *src, unsigned int slen, return 0; } -static void null_init(void *ctx) +static void null_init(struct crypto_tfm *tfm) { } -static void null_update(void *ctx, const u8 *data, unsigned int len) +static void null_update(struct crypto_tfm *tfm, const u8 *data, + unsigned int len) { } -static void null_final(void *ctx, u8 *out) +static void null_final(struct crypto_tfm *tfm, u8 *out) { } -static int null_setkey(void *ctx, const u8 *key, - unsigned int keylen, u32 *flags) +static int null_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen, u32 *flags) { return 0; } -static void null_crypt(void *ctx, u8 *dst, const u8 *src) +static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { memcpy(dst, src, NULL_BLOCK_SIZE); } diff --git a/crypto/deflate.c b/crypto/deflate.c index f209368d62ae..5dd2404ae5b2 100644 --- a/crypto/deflate.c +++ b/crypto/deflate.c @@ -102,8 +102,9 @@ static void deflate_decomp_exit(struct deflate_ctx *ctx) kfree(ctx->decomp_stream.workspace); } -static int deflate_init(void *ctx) +static int deflate_init(struct crypto_tfm *tfm) { + struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); int ret; ret = deflate_comp_init(ctx); @@ -116,17 +117,19 @@ out: return ret; } -static void deflate_exit(void *ctx) +static void deflate_exit(struct crypto_tfm *tfm) { + struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); + deflate_comp_exit(ctx); deflate_decomp_exit(ctx); } -static int deflate_compress(void *ctx, const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) +static int deflate_compress(struct crypto_tfm *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen) { int ret = 0; - struct deflate_ctx *dctx = ctx; + struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); struct z_stream_s *stream = &dctx->comp_stream; ret = zlib_deflateReset(stream); @@ -151,12 +154,12 @@ out: return ret; } -static int deflate_decompress(void *ctx, const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) +static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen) { int ret = 0; - struct deflate_ctx *dctx = ctx; + struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); struct z_stream_s *stream = &dctx->decomp_stream; ret = zlib_inflateReset(stream); diff --git a/crypto/des.c b/crypto/des.c index 2d74cab40c3e..a9d3c235a6af 100644 --- a/crypto/des.c +++ b/crypto/des.c @@ -783,9 +783,10 @@ static void dkey(u32 *pe, const u8 *k) } } -static int des_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) +static int des_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen, u32 *flags) { - struct des_ctx *dctx = ctx; + struct des_ctx *dctx = crypto_tfm_ctx(tfm); u32 tmp[DES_EXPKEY_WORDS]; int ret; @@ -803,9 +804,10 @@ static int des_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) return 0; } -static void des_encrypt(void *ctx, u8 *dst, const u8 *src) +static void des_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { - const u32 *K = ((struct des_ctx *)ctx)->expkey; + struct des_ctx *ctx = crypto_tfm_ctx(tfm); + const u32 *K = ctx->expkey; const __le32 *s = (const __le32 *)src; __le32 *d = (__le32 *)dst; u32 L, R, A, B; @@ -825,9 +827,10 @@ static void des_encrypt(void *ctx, u8 *dst, const u8 *src) d[1] = cpu_to_le32(L); } -static void des_decrypt(void *ctx, u8 *dst, const u8 *src) +static void des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { - const u32 *K = ((struct des_ctx *)ctx)->expkey + DES_EXPKEY_WORDS - 2; + struct des_ctx *ctx = crypto_tfm_ctx(tfm); + const u32 *K = ctx->expkey + DES_EXPKEY_WORDS - 2; const __le32 *s = (const __le32 *)src; __le32 *d = (__le32 *)dst; u32 L, R, A, B; @@ -860,11 +863,11 @@ static void des_decrypt(void *ctx, u8 *dst, const u8 *src) * property. * */ -static int des3_ede_setkey(void *ctx, const u8 *key, +static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen, u32 *flags) { const u32 *K = (const u32 *)key; - struct des3_ede_ctx *dctx = ctx; + struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm); u32 *expkey = dctx->expkey; if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || @@ -881,9 +884,9 @@ static int des3_ede_setkey(void *ctx, const u8 *key, return 0; } -static void des3_ede_encrypt(void *ctx, u8 *dst, const u8 *src) +static void des3_ede_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { - struct des3_ede_ctx *dctx = ctx; + struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm); const u32 *K = dctx->expkey; const __le32 *s = (const __le32 *)src; __le32 *d = (__le32 *)dst; @@ -912,9 +915,9 @@ static void des3_ede_encrypt(void *ctx, u8 *dst, const u8 *src) d[1] = cpu_to_le32(L); } -static void des3_ede_decrypt(void *ctx, u8 *dst, const u8 *src) +static void des3_ede_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { - struct des3_ede_ctx *dctx = ctx; + struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm); const u32 *K = dctx->expkey + DES3_EDE_EXPKEY_WORDS - 2; const __le32 *s = (const __le32 *)src; __le32 *d = (__le32 *)dst; diff --git a/crypto/digest.c b/crypto/digest.c index 062d0a5a2c89..2d9d509c2c51 100644 --- a/crypto/digest.c +++ b/crypto/digest.c @@ -20,7 +20,7 @@ static void init(struct crypto_tfm *tfm) { - tfm->__crt_alg->cra_digest.dia_init(crypto_tfm_ctx(tfm)); + tfm->__crt_alg->cra_digest.dia_init(tfm); } static void update(struct crypto_tfm *tfm, @@ -46,16 +46,14 @@ static void update(struct crypto_tfm *tfm, unsigned int bytes = alignmask + 1 - (offset & alignmask); bytes = min(bytes, bytes_from_page); - tfm->__crt_alg->cra_digest.dia_update - (crypto_tfm_ctx(tfm), p, - bytes); + tfm->__crt_alg->cra_digest.dia_update(tfm, p, + bytes); p += bytes; bytes_from_page -= bytes; l -= bytes; } - tfm->__crt_alg->cra_digest.dia_update - (crypto_tfm_ctx(tfm), p, - bytes_from_page); + tfm->__crt_alg->cra_digest.dia_update(tfm, p, + bytes_from_page); crypto_kunmap(src, 0); crypto_yield(tfm); offset = 0; @@ -83,8 +81,7 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) u32 flags; if (tfm->__crt_alg->cra_digest.dia_setkey == NULL) return -ENOSYS; - return tfm->__crt_alg->cra_digest.dia_setkey(crypto_tfm_ctx(tfm), - key, keylen, &flags); + return tfm->__crt_alg->cra_digest.dia_setkey(tfm, key, keylen, &flags); } static void digest(struct crypto_tfm *tfm, diff --git a/crypto/khazad.c b/crypto/khazad.c index 5b8dc9a2d374..d4c9d3657b36 100644 --- a/crypto/khazad.c +++ b/crypto/khazad.c @@ -754,10 +754,10 @@ static const u64 c[KHAZAD_ROUNDS + 1] = { 0xccc41d14c363da5dULL, 0x5fdc7dcd7f5a6c5cULL, 0xf726ffede89d6f8eULL }; -static int khazad_setkey(void *ctx_arg, const u8 *in_key, - unsigned int key_len, u32 *flags) +static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len, u32 *flags) { - struct khazad_ctx *ctx = ctx_arg; + struct khazad_ctx *ctx = crypto_tfm_ctx(tfm); const __be32 *key = (const __be32 *)in_key; int r; const u64 *S = T7; @@ -841,15 +841,15 @@ static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1], *dst = cpu_to_be64(state); } -static void khazad_encrypt(void *ctx_arg, u8 *dst, const u8 *src) +static void khazad_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { - struct khazad_ctx *ctx = ctx_arg; + struct khazad_ctx *ctx = crypto_tfm_ctx(tfm); khazad_crypt(ctx->E, dst, src); } -static void khazad_decrypt(void *ctx_arg, u8 *dst, const u8 *src) +static void khazad_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { - struct khazad_ctx *ctx = ctx_arg; + struct khazad_ctx *ctx = crypto_tfm_ctx(tfm); khazad_crypt(ctx->D, dst, src); } diff --git a/crypto/md4.c b/crypto/md4.c index a2d6df5c0f8c..c1bc71bdc16b 100644 --- a/crypto/md4.c +++ b/crypto/md4.c @@ -152,9 +152,9 @@ static inline void md4_transform_helper(struct md4_ctx *ctx) md4_transform(ctx->hash, ctx->block); } -static void md4_init(void *ctx) +static void md4_init(struct crypto_tfm *tfm) { - struct md4_ctx *mctx = ctx; + struct md4_ctx *mctx = crypto_tfm_ctx(tfm); mctx->hash[0] = 0x67452301; mctx->hash[1] = 0xefcdab89; @@ -163,9 +163,9 @@ static void md4_init(void *ctx) mctx->byte_count = 0; } -static void md4_update(void *ctx, const u8 *data, unsigned int len) +static void md4_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) { - struct md4_ctx *mctx = ctx; + struct md4_ctx *mctx = crypto_tfm_ctx(tfm); const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); mctx->byte_count += len; @@ -193,9 +193,9 @@ static void md4_update(void *ctx, const u8 *data, unsigned int len) memcpy(mctx->block, data, len); } -static void md4_final(void *ctx, u8 *out) +static void md4_final(struct crypto_tfm *tfm, u8 *out) { - struct md4_ctx *mctx = ctx; + struct md4_ctx *mctx = crypto_tfm_ctx(tfm); const unsigned int offset = mctx->byte_count & 0x3f; char *p = (char *)mctx->block + offset; int padding = 56 - (offset + 1); diff --git a/crypto/md5.c b/crypto/md5.c index 7f041aef5da2..93d18e8b3d53 100644 --- a/crypto/md5.c +++ b/crypto/md5.c @@ -147,9 +147,9 @@ static inline void md5_transform_helper(struct md5_ctx *ctx) md5_transform(ctx->hash, ctx->block); } -static void md5_init(void *ctx) +static void md5_init(struct crypto_tfm *tfm) { - struct md5_ctx *mctx = ctx; + struct md5_ctx *mctx = crypto_tfm_ctx(tfm); mctx->hash[0] = 0x67452301; mctx->hash[1] = 0xefcdab89; @@ -158,9 +158,9 @@ static void md5_init(void *ctx) mctx->byte_count = 0; } -static void md5_update(void *ctx, const u8 *data, unsigned int len) +static void md5_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) { - struct md5_ctx *mctx = ctx; + struct md5_ctx *mctx = crypto_tfm_ctx(tfm); const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); mctx->byte_count += len; @@ -188,9 +188,9 @@ static void md5_update(void *ctx, const u8 *data, unsigned int len) memcpy(mctx->block, data, len); } -static void md5_final(void *ctx, u8 *out) +static void md5_final(struct crypto_tfm *tfm, u8 *out) { - struct md5_ctx *mctx = ctx; + struct md5_ctx *mctx = crypto_tfm_ctx(tfm); const unsigned int offset = mctx->byte_count & 0x3f; char *p = (char *)mctx->block + offset; int padding = 56 - (offset + 1); diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c index 701f859ed767..d061da21cfda 100644 --- a/crypto/michael_mic.c +++ b/crypto/michael_mic.c @@ -45,16 +45,17 @@ do { \ } while (0) -static void michael_init(void *ctx) +static void michael_init(struct crypto_tfm *tfm) { - struct michael_mic_ctx *mctx = ctx; + struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm); mctx->pending_len = 0; } -static void michael_update(void *ctx, const u8 *data, unsigned int len) +static void michael_update(struct crypto_tfm *tfm, const u8 *data, + unsigned int len) { - struct michael_mic_ctx *mctx = ctx; + struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm); const __le32 *src; if (mctx->pending_len) { @@ -90,9 +91,9 @@ static void michael_update(void *ctx, const u8 *data, unsigned int len) } -static void michael_final(void *ctx, u8 *out) +static void michael_final(struct crypto_tfm *tfm, u8 *out) { - struct michael_mic_ctx *mctx = ctx; + struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm); u8 *data = mctx->pending; __le32 *dst = (__le32 *)out; @@ -121,10 +122,10 @@ static void michael_final(void *ctx, u8 *out) } -static int michael_setkey(void *ctx, const u8 *key, unsigned int keylen, - u32 *flags) +static int michael_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen, u32 *flags) { - struct michael_mic_ctx *mctx = ctx; + struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm); const __le32 *data = (const __le32 *)key; if (keylen != 8) { diff --git a/crypto/serpent.c b/crypto/serpent.c index e366406ab49d..de60cdddbf4a 100644 --- a/crypto/serpent.c +++ b/crypto/serpent.c @@ -215,9 +215,11 @@ struct serpent_ctx { }; -static int serpent_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) +static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen, u32 *flags) { - u32 *k = ((struct serpent_ctx *)ctx)->expkey; + struct serpent_ctx *ctx = crypto_tfm_ctx(tfm); + u32 *k = ctx->expkey; u8 *k8 = (u8 *)k; u32 r0,r1,r2,r3,r4; int i; @@ -365,10 +367,11 @@ static int serpent_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *fl return 0; } -static void serpent_encrypt(void *ctx, u8 *dst, const u8 *src) +static void serpent_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { + struct serpent_ctx *ctx = crypto_tfm_ctx(tfm); const u32 - *k = ((struct serpent_ctx *)ctx)->expkey, + *k = ctx->expkey, *s = (const u32 *)src; u32 *d = (u32 *)dst, r0, r1, r2, r3, r4; @@ -423,8 +426,9 @@ static void serpent_encrypt(void *ctx, u8 *dst, const u8 *src) d[3] = cpu_to_le32(r3); } -static void serpent_decrypt(void *ctx, u8 *dst, const u8 *src) +static void serpent_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { + struct serpent_ctx *ctx = crypto_tfm_ctx(tfm); const u32 *k = ((struct serpent_ctx *)ctx)->expkey, *s = (const u32 *)src; @@ -492,7 +496,8 @@ static struct crypto_alg serpent_alg = { .cia_decrypt = serpent_decrypt } } }; -static int tnepres_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *flags) +static int tnepres_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen, u32 *flags) { u8 rev_key[SERPENT_MAX_KEY_SIZE]; int i; @@ -506,10 +511,10 @@ static int tnepres_setkey(void *ctx, const u8 *key, unsigned int keylen, u32 *fl for (i = 0; i < keylen; ++i) rev_key[keylen - i - 1] = key[i]; - return serpent_setkey(ctx, rev_key, keylen, flags); + return serpent_setkey(tfm, rev_key, keylen, flags); } -static void tnepres_encrypt(void *ctx, u8 *dst, const u8 *src) +static void tnepres_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { const u32 * const s = (const u32 * const)src; u32 * const d = (u32 * const)dst; @@ -521,7 +526,7 @@ static void tnepres_encrypt(void *ctx, u8 *dst, const u8 *src) rs[2] = swab32(s[1]); rs[3] = swab32(s[0]); - serpent_encrypt(ctx, (u8 *)rd, (u8 *)rs); + serpent_encrypt(tfm, (u8 *)rd, (u8 *)rs); d[0] = swab32(rd[3]); d[1] = swab32(rd[2]); @@ -529,7 +534,7 @@ static void tnepres_encrypt(void *ctx, u8 *dst, const u8 *src) d[3] = swab32(rd[0]); } -static void tnepres_decrypt(void *ctx, u8 *dst, const u8 *src) +static void tnepres_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { const u32 * const s = (const u32 * const)src; u32 * const d = (u32 * const)dst; @@ -541,7 +546,7 @@ static void tnepres_decrypt(void *ctx, u8 *dst, const u8 *src) rs[2] = swab32(s[1]); rs[3] = swab32(s[0]); - serpent_decrypt(ctx, (u8 *)rd, (u8 *)rs); + serpent_decrypt(tfm, (u8 *)rd, (u8 *)rs); d[0] = swab32(rd[3]); d[1] = swab32(rd[2]); diff --git a/crypto/sha1.c b/crypto/sha1.c index b96f57d95a82..6c77b689f87e 100644 --- a/crypto/sha1.c +++ b/crypto/sha1.c @@ -34,9 +34,9 @@ struct sha1_ctx { u8 buffer[64]; }; -static void sha1_init(void *ctx) +static void sha1_init(struct crypto_tfm *tfm) { - struct sha1_ctx *sctx = ctx; + struct sha1_ctx *sctx = crypto_tfm_ctx(tfm); static const struct sha1_ctx initstate = { 0, { 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0 }, @@ -46,9 +46,10 @@ static void sha1_init(void *ctx) *sctx = initstate; } -static void sha1_update(void *ctx, const u8 *data, unsigned int len) +static void sha1_update(struct crypto_tfm *tfm, const u8 *data, + unsigned int len) { - struct sha1_ctx *sctx = ctx; + struct sha1_ctx *sctx = crypto_tfm_ctx(tfm); unsigned int partial, done; const u8 *src; @@ -80,9 +81,9 @@ static void sha1_update(void *ctx, const u8 *data, unsigned int len) /* Add padding and return the message digest. */ -static void sha1_final(void* ctx, u8 *out) +static void sha1_final(struct crypto_tfm *tfm, u8 *out) { - struct sha1_ctx *sctx = ctx; + struct sha1_ctx *sctx = crypto_tfm_ctx(tfm); __be32 *dst = (__be32 *)out; u32 i, index, padlen; __be64 bits; @@ -93,10 +94,10 @@ static void sha1_final(void* ctx, u8 *out) /* Pad out to 56 mod 64 */ index = sctx->count & 0x3f; padlen = (index < 56) ? (56 - index) : ((64+56) - index); - sha1_update(sctx, padding, padlen); + sha1_update(tfm, padding, padlen); /* Append length */ - sha1_update(sctx, (const u8 *)&bits, sizeof(bits)); + sha1_update(tfm, (const u8 *)&bits, sizeof(bits)); /* Store state in digest */ for (i = 0; i < 5; i++) diff --git a/crypto/sha256.c b/crypto/sha256.c index 4533a0564895..bc71d85a7d02 100644 --- a/crypto/sha256.c +++ b/crypto/sha256.c @@ -230,9 +230,9 @@ static void sha256_transform(u32 *state, const u8 *input) memset(W, 0, 64 * sizeof(u32)); } -static void sha256_init(void *ctx) +static void sha256_init(struct crypto_tfm *tfm) { - struct sha256_ctx *sctx = ctx; + struct sha256_ctx *sctx = crypto_tfm_ctx(tfm); sctx->state[0] = H0; sctx->state[1] = H1; sctx->state[2] = H2; @@ -244,9 +244,10 @@ static void sha256_init(void *ctx) sctx->count[0] = sctx->count[1] = 0; } -static void sha256_update(void *ctx, const u8 *data, unsigned int len) +static void sha256_update(struct crypto_tfm *tfm, const u8 *data, + unsigned int len) { - struct sha256_ctx *sctx = ctx; + struct sha256_ctx *sctx = crypto_tfm_ctx(tfm); unsigned int i, index, part_len; /* Compute number of bytes mod 128 */ @@ -276,9 +277,9 @@ static void sha256_update(void *ctx, const u8 *data, unsigned int len) memcpy(&sctx->buf[index], &data[i], len-i); } -static void sha256_final(void* ctx, u8 *out) +static void sha256_final(struct crypto_tfm *tfm, u8 *out) { - struct sha256_ctx *sctx = ctx; + struct sha256_ctx *sctx = crypto_tfm_ctx(tfm); __be32 *dst = (__be32 *)out; __be32 bits[2]; unsigned int index, pad_len; @@ -292,10 +293,10 @@ static void sha256_final(void* ctx, u8 *out) /* Pad out to 56 mod 64. */ index = (sctx->count[0] >> 3) & 0x3f; pad_len = (index < 56) ? (56 - index) : ((64+56) - index); - sha256_update(sctx, padding, pad_len); + sha256_update(tfm, padding, pad_len); /* Append length (before padding) */ - sha256_update(sctx, (const u8 *)bits, sizeof(bits)); + sha256_update(tfm, (const u8 *)bits, sizeof(bits)); /* Store state in digest */ for (i = 0; i < 8; i++) diff --git a/crypto/sha512.c b/crypto/sha512.c index bc77a66d9de2..2dfe7f170b48 100644 --- a/crypto/sha512.c +++ b/crypto/sha512.c @@ -161,9 +161,9 @@ sha512_transform(u64 *state, u64 *W, const u8 *input) } static void -sha512_init(void *ctx) +sha512_init(struct crypto_tfm *tfm) { - struct sha512_ctx *sctx = ctx; + struct sha512_ctx *sctx = crypto_tfm_ctx(tfm); sctx->state[0] = H0; sctx->state[1] = H1; sctx->state[2] = H2; @@ -176,9 +176,9 @@ sha512_init(void *ctx) } static void -sha384_init(void *ctx) +sha384_init(struct crypto_tfm *tfm) { - struct sha512_ctx *sctx = ctx; + struct sha512_ctx *sctx = crypto_tfm_ctx(tfm); sctx->state[0] = HP0; sctx->state[1] = HP1; sctx->state[2] = HP2; @@ -191,9 +191,9 @@ sha384_init(void *ctx) } static void -sha512_update(void *ctx, const u8 *data, unsigned int len) +sha512_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) { - struct sha512_ctx *sctx = ctx; + struct sha512_ctx *sctx = crypto_tfm_ctx(tfm); unsigned int i, index, part_len; @@ -231,9 +231,9 @@ sha512_update(void *ctx, const u8 *data, unsigned int len) } static void -sha512_final(void *ctx, u8 *hash) +sha512_final(struct crypto_tfm *tfm, u8 *hash) { - struct sha512_ctx *sctx = ctx; + struct sha512_ctx *sctx = crypto_tfm_ctx(tfm); static u8 padding[128] = { 0x80, }; __be64 *dst = (__be64 *)hash; __be32 bits[4]; @@ -249,10 +249,10 @@ sha512_final(void *ctx, u8 *hash) /* Pad out to 112 mod 128. */ index = (sctx->count[0] >> 3) & 0x7f; pad_len = (index < 112) ? (112 - index) : ((128+112) - index); - sha512_update(sctx, padding, pad_len); + sha512_update(tfm, padding, pad_len); /* Append length (before padding) */ - sha512_update(sctx, (const u8 *)bits, sizeof(bits)); + sha512_update(tfm, (const u8 *)bits, sizeof(bits)); /* Store state in digest */ for (i = 0; i < 8; i++) @@ -262,12 +262,11 @@ sha512_final(void *ctx, u8 *hash) memset(sctx, 0, sizeof(struct sha512_ctx)); } -static void sha384_final(void *ctx, u8 *hash) +static void sha384_final(struct crypto_tfm *tfm, u8 *hash) { - struct sha512_ctx *sctx = ctx; u8 D[64]; - sha512_final(sctx, D); + sha512_final(tfm, D); memcpy(hash, D, 48); memset(D, 0, 64); diff --git a/crypto/tea.c b/crypto/tea.c index a6a02b30e470..5367adc82fc9 100644 --- a/crypto/tea.c +++ b/crypto/tea.c @@ -45,10 +45,10 @@ struct xtea_ctx { u32 KEY[4]; }; -static int tea_setkey(void *ctx_arg, const u8 *in_key, - unsigned int key_len, u32 *flags) -{ - struct tea_ctx *ctx = ctx_arg; +static int tea_setkey(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len, u32 *flags) +{ + struct tea_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *key = (const __le32 *)in_key; if (key_len != 16) @@ -66,12 +66,11 @@ static int tea_setkey(void *ctx_arg, const u8 *in_key, } -static void tea_encrypt(void *ctx_arg, u8 *dst, const u8 *src) -{ +static void tea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +{ u32 y, z, n, sum = 0; u32 k0, k1, k2, k3; - - struct tea_ctx *ctx = ctx_arg; + struct tea_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *in = (const __le32 *)src; __le32 *out = (__le32 *)dst; @@ -95,11 +94,11 @@ static void tea_encrypt(void *ctx_arg, u8 *dst, const u8 *src) out[1] = cpu_to_le32(z); } -static void tea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) -{ +static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +{ u32 y, z, n, sum; u32 k0, k1, k2, k3; - struct tea_ctx *ctx = ctx_arg; + struct tea_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *in = (const __le32 *)src; __le32 *out = (__le32 *)dst; @@ -125,10 +124,10 @@ static void tea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) out[1] = cpu_to_le32(z); } -static int xtea_setkey(void *ctx_arg, const u8 *in_key, - unsigned int key_len, u32 *flags) -{ - struct xtea_ctx *ctx = ctx_arg; +static int xtea_setkey(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len, u32 *flags) +{ + struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *key = (const __le32 *)in_key; if (key_len != 16) @@ -146,12 +145,11 @@ static int xtea_setkey(void *ctx_arg, const u8 *in_key, } -static void xtea_encrypt(void *ctx_arg, u8 *dst, const u8 *src) -{ +static void xtea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +{ u32 y, z, sum = 0; u32 limit = XTEA_DELTA * XTEA_ROUNDS; - - struct xtea_ctx *ctx = ctx_arg; + struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *in = (const __le32 *)src; __le32 *out = (__le32 *)dst; @@ -168,10 +166,10 @@ static void xtea_encrypt(void *ctx_arg, u8 *dst, const u8 *src) out[1] = cpu_to_le32(z); } -static void xtea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) -{ +static void xtea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +{ u32 y, z, sum; - struct tea_ctx *ctx = ctx_arg; + struct tea_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *in = (const __le32 *)src; __le32 *out = (__le32 *)dst; @@ -191,12 +189,11 @@ static void xtea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) } -static void xeta_encrypt(void *ctx_arg, u8 *dst, const u8 *src) -{ +static void xeta_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +{ u32 y, z, sum = 0; u32 limit = XTEA_DELTA * XTEA_ROUNDS; - - struct xtea_ctx *ctx = ctx_arg; + struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *in = (const __le32 *)src; __le32 *out = (__le32 *)dst; @@ -213,10 +210,10 @@ static void xeta_encrypt(void *ctx_arg, u8 *dst, const u8 *src) out[1] = cpu_to_le32(z); } -static void xeta_decrypt(void *ctx_arg, u8 *dst, const u8 *src) -{ +static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +{ u32 y, z, sum; - struct tea_ctx *ctx = ctx_arg; + struct tea_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *in = (const __le32 *)src; __le32 *out = (__le32 *)dst; diff --git a/crypto/tgr192.c b/crypto/tgr192.c index 004bb841cc5b..a0fadf3dd3e2 100644 --- a/crypto/tgr192.c +++ b/crypto/tgr192.c @@ -496,9 +496,9 @@ static void tgr192_transform(struct tgr192_ctx *tctx, const u8 * data) tctx->c = c; } -static void tgr192_init(void *ctx) +static void tgr192_init(struct crypto_tfm *tfm) { - struct tgr192_ctx *tctx = ctx; + struct tgr192_ctx *tctx = crypto_tfm_ctx(tfm); tctx->a = 0x0123456789abcdefULL; tctx->b = 0xfedcba9876543210ULL; @@ -510,9 +510,10 @@ static void tgr192_init(void *ctx) /* Update the message digest with the contents * of INBUF with length INLEN. */ -static void tgr192_update(void *ctx, const u8 * inbuf, unsigned int len) +static void tgr192_update(struct crypto_tfm *tfm, const u8 *inbuf, + unsigned int len) { - struct tgr192_ctx *tctx = ctx; + struct tgr192_ctx *tctx = crypto_tfm_ctx(tfm); if (tctx->count == 64) { /* flush the buffer */ tgr192_transform(tctx, tctx->hash); @@ -526,7 +527,7 @@ static void tgr192_update(void *ctx, const u8 * inbuf, unsigned int len) for (; len && tctx->count < 64; len--) { tctx->hash[tctx->count++] = *inbuf++; } - tgr192_update(tctx, NULL, 0); + tgr192_update(tfm, NULL, 0); if (!len) { return; } @@ -548,15 +549,15 @@ static void tgr192_update(void *ctx, const u8 * inbuf, unsigned int len) /* The routine terminates the computation */ -static void tgr192_final(void *ctx, u8 * out) +static void tgr192_final(struct crypto_tfm *tfm, u8 * out) { - struct tgr192_ctx *tctx = ctx; + struct tgr192_ctx *tctx = crypto_tfm_ctx(tfm); __be64 *dst = (__be64 *)out; __be64 *be64p; __le32 *le32p; u32 t, msb, lsb; - tgr192_update(tctx, NULL, 0); /* flush */ ; + tgr192_update(tfm, NULL, 0); /* flush */ ; msb = 0; t = tctx->nblocks; @@ -584,7 +585,7 @@ static void tgr192_final(void *ctx, u8 * out) while (tctx->count < 64) { tctx->hash[tctx->count++] = 0; } - tgr192_update(tctx, NULL, 0); /* flush */ ; + tgr192_update(tfm, NULL, 0); /* flush */ ; memset(tctx->hash, 0, 56); /* fill next block with zeroes */ } /* append the 64 bit count */ @@ -600,22 +601,20 @@ static void tgr192_final(void *ctx, u8 * out) dst[2] = be64p[2] = cpu_to_be64(tctx->c); } -static void tgr160_final(void *ctx, u8 * out) +static void tgr160_final(struct crypto_tfm *tfm, u8 * out) { - struct tgr192_ctx *wctx = ctx; u8 D[64]; - tgr192_final(wctx, D); + tgr192_final(tfm, D); memcpy(out, D, TGR160_DIGEST_SIZE); memset(D, 0, TGR192_DIGEST_SIZE); } -static void tgr128_final(void *ctx, u8 * out) +static void tgr128_final(struct crypto_tfm *tfm, u8 * out) { - struct tgr192_ctx *wctx = ctx; u8 D[64]; - tgr192_final(wctx, D); + tgr192_final(tfm, D); memcpy(out, D, TGR128_DIGEST_SIZE); memset(D, 0, TGR192_DIGEST_SIZE); } diff --git a/crypto/twofish.c b/crypto/twofish.c index ddfd5a3fcc5f..ec2488242e2d 100644 --- a/crypto/twofish.c +++ b/crypto/twofish.c @@ -643,11 +643,11 @@ struct twofish_ctx { }; /* Perform the key setup. */ -static int twofish_setkey(void *cx, const u8 *key, - unsigned int key_len, u32 *flags) +static int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int key_len, u32 *flags) { - struct twofish_ctx *ctx = cx; + struct twofish_ctx *ctx = crypto_tfm_ctx(tfm); int i, j, k; @@ -802,9 +802,9 @@ static int twofish_setkey(void *cx, const u8 *key, } /* Encrypt one block. in and out may be the same. */ -static void twofish_encrypt(void *cx, u8 *out, const u8 *in) +static void twofish_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { - struct twofish_ctx *ctx = cx; + struct twofish_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *src = (const __le32 *)in; __le32 *dst = (__le32 *)out; @@ -839,9 +839,9 @@ static void twofish_encrypt(void *cx, u8 *out, const u8 *in) } /* Decrypt one block. in and out may be the same. */ -static void twofish_decrypt(void *cx, u8 *out, const u8 *in) +static void twofish_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { - struct twofish_ctx *ctx = cx; + struct twofish_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *src = (const __le32 *)in; __le32 *dst = (__le32 *)out; diff --git a/crypto/wp512.c b/crypto/wp512.c index b226a126cfae..727d05a19ff4 100644 --- a/crypto/wp512.c +++ b/crypto/wp512.c @@ -981,9 +981,9 @@ static void wp512_process_buffer(struct wp512_ctx *wctx) { } -static void wp512_init (void *ctx) { +static void wp512_init(struct crypto_tfm *tfm) { + struct wp512_ctx *wctx = crypto_tfm_ctx(tfm); int i; - struct wp512_ctx *wctx = ctx; memset(wctx->bitLength, 0, 32); wctx->bufferBits = wctx->bufferPos = 0; @@ -993,10 +993,10 @@ static void wp512_init (void *ctx) { } } -static void wp512_update(void *ctx, const u8 *source, unsigned int len) +static void wp512_update(struct crypto_tfm *tfm, const u8 *source, + unsigned int len) { - - struct wp512_ctx *wctx = ctx; + struct wp512_ctx *wctx = crypto_tfm_ctx(tfm); int sourcePos = 0; unsigned int bits_len = len * 8; // convert to number of bits int sourceGap = (8 - ((int)bits_len & 7)) & 7; @@ -1054,9 +1054,9 @@ static void wp512_update(void *ctx, const u8 *source, unsigned int len) } -static void wp512_final(void *ctx, u8 *out) +static void wp512_final(struct crypto_tfm *tfm, u8 *out) { - struct wp512_ctx *wctx = ctx; + struct wp512_ctx *wctx = crypto_tfm_ctx(tfm); int i; u8 *buffer = wctx->buffer; u8 *bitLength = wctx->bitLength; @@ -1087,22 +1087,20 @@ static void wp512_final(void *ctx, u8 *out) wctx->bufferPos = bufferPos; } -static void wp384_final(void *ctx, u8 *out) +static void wp384_final(struct crypto_tfm *tfm, u8 *out) { - struct wp512_ctx *wctx = ctx; u8 D[64]; - wp512_final (wctx, D); + wp512_final(tfm, D); memcpy (out, D, WP384_DIGEST_SIZE); memset (D, 0, WP512_DIGEST_SIZE); } -static void wp256_final(void *ctx, u8 *out) +static void wp256_final(struct crypto_tfm *tfm, u8 *out) { - struct wp512_ctx *wctx = ctx; u8 D[64]; - wp512_final (wctx, D); + wp512_final(tfm, D); memcpy (out, D, WP256_DIGEST_SIZE); memset (D, 0, WP512_DIGEST_SIZE); } diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 5158a9db4bc5..b98ad203d6cb 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -282,19 +282,20 @@ aes_hw_extkey_available(uint8_t key_len) return 0; } -static inline struct aes_ctx *aes_ctx(void *ctx) +static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) { + unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm); unsigned long align = PADLOCK_ALIGNMENT; if (align <= crypto_tfm_ctx_alignment()) align = 1; - return (struct aes_ctx *)ALIGN((unsigned long)ctx, align); + return (struct aes_ctx *)ALIGN(addr, align); } -static int -aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t *flags) +static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, + unsigned int key_len, u32 *flags) { - struct aes_ctx *ctx = aes_ctx(ctx_arg); + struct aes_ctx *ctx = aes_ctx(tfm); const __le32 *key = (const __le32 *)in_key; uint32_t i, t, u, v, w; uint32_t P[AES_EXTENDED_KEY_SIZE]; @@ -414,24 +415,22 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, return iv; } -static void -aes_encrypt(void *ctx_arg, uint8_t *out, const uint8_t *in) +static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { - struct aes_ctx *ctx = aes_ctx(ctx_arg); + struct aes_ctx *ctx = aes_ctx(tfm); padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, 1); } -static void -aes_decrypt(void *ctx_arg, uint8_t *out, const uint8_t *in) +static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) { - struct aes_ctx *ctx = aes_ctx(ctx_arg); + struct aes_ctx *ctx = aes_ctx(tfm); padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1); } static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out, const u8 *in, unsigned int nbytes) { - struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm)); + struct aes_ctx *ctx = aes_ctx(desc->tfm); padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt, nbytes / AES_BLOCK_SIZE); return nbytes & ~(AES_BLOCK_SIZE - 1); @@ -440,7 +439,7 @@ static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out, static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out, const u8 *in, unsigned int nbytes) { - struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm)); + struct aes_ctx *ctx = aes_ctx(desc->tfm); padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, nbytes / AES_BLOCK_SIZE); return nbytes & ~(AES_BLOCK_SIZE - 1); @@ -449,7 +448,7 @@ static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out, static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out, const u8 *in, unsigned int nbytes) { - struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm)); + struct aes_ctx *ctx = aes_ctx(desc->tfm); u8 *iv; iv = padlock_xcrypt_cbc(in, out, ctx->E, desc->info, @@ -462,7 +461,7 @@ static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out, static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out, const u8 *in, unsigned int nbytes) { - struct aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(desc->tfm)); + struct aes_ctx *ctx = aes_ctx(desc->tfm); padlock_xcrypt_cbc(in, out, ctx->D, desc->info, &ctx->cword.decrypt, nbytes / AES_BLOCK_SIZE); return nbytes & ~(AES_BLOCK_SIZE - 1); diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 5a0470e36111..ef918803ec30 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -66,7 +66,7 @@ struct crypto_tfm; struct cipher_desc { struct crypto_tfm *tfm; - void (*crfn)(void *ctx, u8 *dst, const u8 *src); + void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst, const u8 *src, unsigned int nbytes); void *info; @@ -79,10 +79,10 @@ struct cipher_desc { struct cipher_alg { unsigned int cia_min_keysize; unsigned int cia_max_keysize; - int (*cia_setkey)(void *ctx, const u8 *key, + int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen, u32 *flags); - void (*cia_encrypt)(void *ctx, u8 *dst, const u8 *src); - void (*cia_decrypt)(void *ctx, u8 *dst, const u8 *src); + void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); + void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); unsigned int (*cia_encrypt_ecb)(const struct cipher_desc *desc, u8 *dst, const u8 *src, @@ -100,20 +100,21 @@ struct cipher_alg { struct digest_alg { unsigned int dia_digestsize; - void (*dia_init)(void *ctx); - void (*dia_update)(void *ctx, const u8 *data, unsigned int len); - void (*dia_final)(void *ctx, u8 *out); - int (*dia_setkey)(void *ctx, const u8 *key, + void (*dia_init)(struct crypto_tfm *tfm); + void (*dia_update)(struct crypto_tfm *tfm, const u8 *data, + unsigned int len); + void (*dia_final)(struct crypto_tfm *tfm, u8 *out); + int (*dia_setkey)(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen, u32 *flags); }; struct compress_alg { - int (*coa_init)(void *ctx); - void (*coa_exit)(void *ctx); - int (*coa_compress)(void *ctx, const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen); - int (*coa_decompress)(void *ctx, const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen); + int (*coa_init)(struct crypto_tfm *tfm); + void (*coa_exit)(struct crypto_tfm *tfm); + int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen); + int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen); }; #define cra_cipher cra_u.cipher From 82062c72cd643c99a9e1c231270acbab986fd23f Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 16 May 2006 22:20:34 +1000 Subject: [PATCH 06/14] [CRYPTO] padlock: Rearrange context structure to reduce code size i386 assembly has more compact instructions for accessing 7-bit offsets. So by moving the large members to the end of the structure we can save quite a bit of code size. This patch shaves about 10% or 300 bytes off the padlock-aes file. Signed-off-by: Herbert Xu --- drivers/crypto/padlock-aes.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index b98ad203d6cb..17ee684144f9 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -60,15 +60,14 @@ #define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t)) struct aes_ctx { - uint32_t e_data[AES_EXTENDED_KEY_SIZE]; - uint32_t d_data[AES_EXTENDED_KEY_SIZE]; struct { struct cword encrypt; struct cword decrypt; } cword; - uint32_t *E; - uint32_t *D; + u32 *D; int key_length; + u32 E[AES_EXTENDED_KEY_SIZE]; + u32 d_data[AES_EXTENDED_KEY_SIZE]; }; /* ====== Key management routines ====== */ @@ -313,8 +312,7 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, * itself we must supply the plain key for both encryption * and decryption. */ - ctx->E = ctx->e_data; - ctx->D = ctx->e_data; + ctx->D = ctx->E; E_KEY[0] = le32_to_cpu(key[0]); E_KEY[1] = le32_to_cpu(key[1]); From 110bf1c0e932615cbe43a8af8a07bc3750ae4295 Mon Sep 17 00:00:00 2001 From: Michal Ludvig Date: Mon, 22 May 2006 08:28:06 +1000 Subject: [PATCH 07/14] [CRYPTO] api: Fixed incorrect passing of context instead of tfm Fix a few omissions in passing TFM instead of CTX to algorithms. Signed-off-by: Michal Ludvig Signed-off-by: Herbert Xu --- crypto/compress.c | 4 ++-- crypto/digest.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/crypto/compress.c b/crypto/compress.c index c12fc0c41dac..f3e07334afd0 100644 --- a/crypto/compress.c +++ b/crypto/compress.c @@ -44,7 +44,7 @@ int crypto_init_compress_ops(struct crypto_tfm *tfm) int ret = 0; struct compress_tfm *ops = &tfm->crt_compress; - ret = tfm->__crt_alg->cra_compress.coa_init(crypto_tfm_ctx(tfm)); + ret = tfm->__crt_alg->cra_compress.coa_init(tfm); if (ret) goto out; @@ -57,5 +57,5 @@ out: void crypto_exit_compress_ops(struct crypto_tfm *tfm) { - tfm->__crt_alg->cra_compress.coa_exit(crypto_tfm_ctx(tfm)); + tfm->__crt_alg->cra_compress.coa_exit(tfm); } diff --git a/crypto/digest.c b/crypto/digest.c index 2d9d509c2c51..603006a7bef2 100644 --- a/crypto/digest.c +++ b/crypto/digest.c @@ -70,10 +70,10 @@ static void final(struct crypto_tfm *tfm, u8 *out) unsigned int size = crypto_tfm_alg_digestsize(tfm); u8 buffer[size + alignmask]; u8 *dst = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); - tfm->__crt_alg->cra_digest.dia_final(crypto_tfm_ctx(tfm), dst); + tfm->__crt_alg->cra_digest.dia_final(tfm, dst); memcpy(out, dst, size); } else - tfm->__crt_alg->cra_digest.dia_final(crypto_tfm_ctx(tfm), out); + tfm->__crt_alg->cra_digest.dia_final(tfm, out); } static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) From c7fc05992afcf1d63d6d5fb6142c8d39094dbca9 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 24 May 2006 13:02:26 +1000 Subject: [PATCH 08/14] [CRYPTO] api: Added cra_init/cra_exit This patch adds the hooks cra_init/cra_exit which are called during a tfm's construction and destruction respectively. This will be used by the instances to allocate child tfm's. For now this lets us get rid of the coa_init/coa_exit functions which are used for exactly that purpose (unlike the dia_init function which is called for each transaction). In fact the coa_exit path is currently buggy as it may get called twice when an error is encountered during initialisation. Signed-off-by: Herbert Xu --- crypto/api.c | 11 ++++++++--- crypto/compress.c | 9 +-------- crypto/deflate.c | 4 ++-- include/linux/crypto.h | 5 +++-- 4 files changed, 14 insertions(+), 15 deletions(-) diff --git a/crypto/api.c b/crypto/api.c index 80bba637fba7..8145310d7985 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -188,13 +188,16 @@ struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags) if (crypto_init_flags(tfm, flags)) goto out_free_tfm; - if (crypto_init_ops(tfm)) { - crypto_exit_ops(tfm); + if (crypto_init_ops(tfm)) goto out_free_tfm; - } + + if (alg->cra_init && alg->cra_init(tfm)) + goto cra_init_failed; goto out; +cra_init_failed: + crypto_exit_ops(tfm); out_free_tfm: kfree(tfm); tfm = NULL; @@ -215,6 +218,8 @@ void crypto_free_tfm(struct crypto_tfm *tfm) alg = tfm->__crt_alg; size = sizeof(*tfm) + alg->cra_ctxsize; + if (alg->cra_exit) + alg->cra_exit(tfm); crypto_exit_ops(tfm); crypto_alg_put(alg); memset(tfm, 0, size); diff --git a/crypto/compress.c b/crypto/compress.c index f3e07334afd0..eca182aa3380 100644 --- a/crypto/compress.c +++ b/crypto/compress.c @@ -41,21 +41,14 @@ int crypto_init_compress_flags(struct crypto_tfm *tfm, u32 flags) int crypto_init_compress_ops(struct crypto_tfm *tfm) { - int ret = 0; struct compress_tfm *ops = &tfm->crt_compress; - - ret = tfm->__crt_alg->cra_compress.coa_init(tfm); - if (ret) - goto out; ops->cot_compress = crypto_compress; ops->cot_decompress = crypto_decompress; -out: - return ret; + return 0; } void crypto_exit_compress_ops(struct crypto_tfm *tfm) { - tfm->__crt_alg->cra_compress.coa_exit(tfm); } diff --git a/crypto/deflate.c b/crypto/deflate.c index 5dd2404ae5b2..6588bbf82e9b 100644 --- a/crypto/deflate.c +++ b/crypto/deflate.c @@ -201,9 +201,9 @@ static struct crypto_alg alg = { .cra_ctxsize = sizeof(struct deflate_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(alg.cra_list), + .cra_init = deflate_init, + .cra_exit = deflate_exit, .cra_u = { .compress = { - .coa_init = deflate_init, - .coa_exit = deflate_exit, .coa_compress = deflate_compress, .coa_decompress = deflate_decompress } } }; diff --git a/include/linux/crypto.h b/include/linux/crypto.h index ef918803ec30..6c013c88080f 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -109,8 +109,6 @@ struct digest_alg { }; struct compress_alg { - int (*coa_init)(struct crypto_tfm *tfm); - void (*coa_exit)(struct crypto_tfm *tfm); int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen); int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src, @@ -138,6 +136,9 @@ struct crypto_alg { struct digest_alg digest; struct compress_alg compress; } cra_u; + + int (*cra_init)(struct crypto_tfm *tfm); + void (*cra_exit)(struct crypto_tfm *tfm); struct module *cra_module; }; From d913ea0d6b6a48dd6eed8fc5e299b8b10e049186 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 21 May 2006 08:45:26 +1000 Subject: [PATCH 09/14] [CRYPTO] api: Removed const from cra_name/cra_driver_name We do need to change these names now and even more so in future with instantiated algorithms. So let's stop lying to the compiler and get rid of the const modifiers. Signed-off-by: Herbert Xu --- crypto/api.c | 2 +- include/linux/crypto.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crypto/api.c b/crypto/api.c index 8145310d7985..735fdedd8217 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -229,7 +229,7 @@ void crypto_free_tfm(struct crypto_tfm *tfm) static inline int crypto_set_driver_name(struct crypto_alg *alg) { static const char suffix[] = "-generic"; - char *driver_name = (char *)alg->cra_driver_name; + char *driver_name = alg->cra_driver_name; int len; if (*driver_name) diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 6c013c88080f..7f946241b879 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -128,8 +128,8 @@ struct crypto_alg { int cra_priority; - const char cra_name[CRYPTO_MAX_ALG_NAME]; - const char cra_driver_name[CRYPTO_MAX_ALG_NAME]; + char cra_name[CRYPTO_MAX_ALG_NAME]; + char cra_driver_name[CRYPTO_MAX_ALG_NAME]; union { struct cipher_alg cipher; From 996e2523cc347cc98237d2da3454aedc779fdcba Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sun, 21 May 2006 11:57:20 +1000 Subject: [PATCH 10/14] [CRYPTO] api: Allow replacement when registering new algorithms We already allow asynchronous removal of existing algorithm modules. By allowing the replacement of existing algorithms, we can replace algorithms without having to wait for for all existing users to complete. Signed-off-by: Herbert Xu --- crypto/api.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crypto/api.c b/crypto/api.c index 735fdedd8217..c11ec1fd4f18 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -267,13 +267,13 @@ int crypto_register_alg(struct crypto_alg *alg) down_write(&crypto_alg_sem); list_for_each_entry(q, &crypto_alg_list, cra_list) { - if (!strcmp(q->cra_driver_name, alg->cra_driver_name)) { + if (q == alg) { ret = -EEXIST; goto out; } } - list_add_tail(&alg->cra_list, &crypto_alg_list); + list_add(&alg->cra_list, &crypto_alg_list); out: up_write(&crypto_alg_sem); return ret; From 14fdf477a7e3ff54f8e67fe506dd2677a36c56e4 Mon Sep 17 00:00:00 2001 From: Michal Ludvig Date: Tue, 30 May 2006 14:49:38 +1000 Subject: [PATCH 11/14] [CRYPTO] tcrypt: Return -EAGAIN from module_init() Intentionaly return -EAGAIN from module_init() to ensure it doesn't stay loaded in the kernel. The module does all its work from init() and doesn't offer any runtime functionality => we don't need it in the memory, do we? Signed-off-by: Michal Ludvig Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 49e344f00806..7bf93c5decfe 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1113,7 +1113,14 @@ static int __init init(void) kfree(xbuf); kfree(tvmem); - return 0; + + /* We intentionaly return -EAGAIN to prevent keeping + * the module. It does all its work from init() + * and doesn't offer any runtime functionality + * => we don't need it in the memory, do we? + * -- mludvig + */ + return -EAGAIN; } /* From e805792851bcb0bb42f0c8a352be64564c13e374 Mon Sep 17 00:00:00 2001 From: Michal Ludvig Date: Tue, 30 May 2006 22:04:19 +1000 Subject: [PATCH 12/14] [CRYPTO] tcrypt: Speed benchmark support for digest algorithms This patch adds speed tests (benchmarks) for digest algorithms. Tests are run with different buffer sizes (16 bytes, ... 8 kBytes) and with each buffer multiple tests are run with different update() sizes (e.g. hash 64 bytes buffer in four 16 byte updates). There is no correctness checking of the result and all tests and algorithms use the same input buffer. Signed-off-by: Michal Ludvig Signed-off-by: Herbert Xu --- crypto/tcrypt.c | 170 ++++++++++++++++++++++++++++++++++++++++++++++++ crypto/tcrypt.h | 36 ++++++++++ 2 files changed, 206 insertions(+) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 7bf93c5decfe..e52f56c5bd5e 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -570,6 +570,122 @@ out: crypto_free_tfm(tfm); } +static void test_digest_jiffies(struct crypto_tfm *tfm, char *p, int blen, + int plen, char *out, int sec) +{ + struct scatterlist sg[1]; + unsigned long start, end; + int bcount, pcount; + + for (start = jiffies, end = start + sec * HZ, bcount = 0; + time_before(jiffies, end); bcount++) { + crypto_digest_init(tfm); + for (pcount = 0; pcount < blen; pcount += plen) { + sg_set_buf(sg, p + pcount, plen); + crypto_digest_update(tfm, sg, 1); + } + /* we assume there is enough space in 'out' for the result */ + crypto_digest_final(tfm, out); + } + + printk("%6u opers/sec, %9lu bytes/sec\n", + bcount / sec, ((long)bcount * blen) / sec); + + return; +} + +static void test_digest_cycles(struct crypto_tfm *tfm, char *p, int blen, + int plen, char *out) +{ + struct scatterlist sg[1]; + unsigned long cycles = 0; + int i, pcount; + + local_bh_disable(); + local_irq_disable(); + + /* Warm-up run. */ + for (i = 0; i < 4; i++) { + crypto_digest_init(tfm); + for (pcount = 0; pcount < blen; pcount += plen) { + sg_set_buf(sg, p + pcount, plen); + crypto_digest_update(tfm, sg, 1); + } + crypto_digest_final(tfm, out); + } + + /* The real thing. */ + for (i = 0; i < 8; i++) { + cycles_t start, end; + + crypto_digest_init(tfm); + + start = get_cycles(); + + for (pcount = 0; pcount < blen; pcount += plen) { + sg_set_buf(sg, p + pcount, plen); + crypto_digest_update(tfm, sg, 1); + } + crypto_digest_final(tfm, out); + + end = get_cycles(); + + cycles += end - start; + } + + local_irq_enable(); + local_bh_enable(); + + printk("%6lu cycles/operation, %4lu cycles/byte\n", + cycles / 8, cycles / (8 * blen)); + + return; +} + +static void test_digest_speed(char *algo, unsigned int sec, + struct digest_speed *speed) +{ + struct crypto_tfm *tfm; + char output[1024]; + int i; + + printk("\ntesting speed of %s\n", algo); + + tfm = crypto_alloc_tfm(algo, 0); + + if (tfm == NULL) { + printk("failed to load transform for %s\n", algo); + return; + } + + if (crypto_tfm_alg_digestsize(tfm) > sizeof(output)) { + printk("digestsize(%u) > outputbuffer(%zu)\n", + crypto_tfm_alg_digestsize(tfm), sizeof(output)); + goto out; + } + + for (i = 0; speed[i].blen != 0; i++) { + if (speed[i].blen > TVMEMSIZE) { + printk("template (%u) too big for tvmem (%u)\n", + speed[i].blen, TVMEMSIZE); + goto out; + } + + printk("test%3u (%5u byte blocks,%5u bytes per update,%4u updates): ", + i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen); + + memset(tvmem, 0xff, speed[i].blen); + + if (sec) + test_digest_jiffies(tfm, tvmem, speed[i].blen, speed[i].plen, output, sec); + else + test_digest_cycles(tfm, tvmem, speed[i].blen, speed[i].plen, output); + } + +out: + crypto_free_tfm(tfm); +} + static void test_deflate(void) { unsigned int i; @@ -1086,6 +1202,60 @@ static void do_test(void) des_speed_template); break; + case 300: + /* fall through */ + + case 301: + test_digest_speed("md4", sec, generic_digest_speed_template); + if (mode > 300 && mode < 400) break; + + case 302: + test_digest_speed("md5", sec, generic_digest_speed_template); + if (mode > 300 && mode < 400) break; + + case 303: + test_digest_speed("sha1", sec, generic_digest_speed_template); + if (mode > 300 && mode < 400) break; + + case 304: + test_digest_speed("sha256", sec, generic_digest_speed_template); + if (mode > 300 && mode < 400) break; + + case 305: + test_digest_speed("sha384", sec, generic_digest_speed_template); + if (mode > 300 && mode < 400) break; + + case 306: + test_digest_speed("sha512", sec, generic_digest_speed_template); + if (mode > 300 && mode < 400) break; + + case 307: + test_digest_speed("wp256", sec, generic_digest_speed_template); + if (mode > 300 && mode < 400) break; + + case 308: + test_digest_speed("wp384", sec, generic_digest_speed_template); + if (mode > 300 && mode < 400) break; + + case 309: + test_digest_speed("wp512", sec, generic_digest_speed_template); + if (mode > 300 && mode < 400) break; + + case 310: + test_digest_speed("tgr128", sec, generic_digest_speed_template); + if (mode > 300 && mode < 400) break; + + case 311: + test_digest_speed("tgr160", sec, generic_digest_speed_template); + if (mode > 300 && mode < 400) break; + + case 312: + test_digest_speed("tgr192", sec, generic_digest_speed_template); + if (mode > 300 && mode < 400) break; + + case 399: + break; + case 1000: test_available(); break; diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h index 1f683ba794ee..1fac5602f633 100644 --- a/crypto/tcrypt.h +++ b/crypto/tcrypt.h @@ -65,6 +65,11 @@ struct cipher_speed { unsigned int blen; }; +struct digest_speed { + unsigned int blen; /* buffer length */ + unsigned int plen; /* per-update length */ +}; + /* * MD4 test vectors from RFC1320 */ @@ -2975,4 +2980,35 @@ static struct cipher_speed des_speed_template[] = { { .klen = 0, .blen = 0, } }; +/* + * Digest speed tests + */ +static struct digest_speed generic_digest_speed_template[] = { + { .blen = 16, .plen = 16, }, + { .blen = 64, .plen = 16, }, + { .blen = 64, .plen = 64, }, + { .blen = 256, .plen = 16, }, + { .blen = 256, .plen = 64, }, + { .blen = 256, .plen = 256, }, + { .blen = 1024, .plen = 16, }, + { .blen = 1024, .plen = 256, }, + { .blen = 1024, .plen = 1024, }, + { .blen = 2048, .plen = 16, }, + { .blen = 2048, .plen = 256, }, + { .blen = 2048, .plen = 1024, }, + { .blen = 2048, .plen = 2048, }, + { .blen = 4096, .plen = 16, }, + { .blen = 4096, .plen = 256, }, + { .blen = 4096, .plen = 1024, }, + { .blen = 4096, .plen = 4096, }, + { .blen = 8192, .plen = 16, }, + { .blen = 8192, .plen = 256, }, + { .blen = 8192, .plen = 1024, }, + { .blen = 8192, .plen = 4096, }, + { .blen = 8192, .plen = 8192, }, + + /* End marker */ + { .blen = 0, .plen = 0, } +}; + #endif /* _CRYPTO_TCRYPT_H */ From e90b1a2be6010acf01673b0625cfbf18240f7744 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 2 Jun 2006 08:42:25 +1000 Subject: [PATCH 13/14] [CRYPTO] aes: Add wrappers for assembly routines The wrapper routines are required when asmlinkage differs from the usual calling convention. So we need to have them. However, by rearranging the parameters, they will get optimised away to a single jump for most people. Signed-off-by: Herbert Xu --- arch/i386/crypto/aes.c | 14 ++++++++++++-- arch/x86_64/crypto/aes-x86_64-asm.S | 8 ++++---- arch/x86_64/crypto/aes.c | 14 ++++++++++++-- 3 files changed, 28 insertions(+), 8 deletions(-) diff --git a/arch/i386/crypto/aes.c b/arch/i386/crypto/aes.c index b9c7d99160f1..d3806daa3de3 100644 --- a/arch/i386/crypto/aes.c +++ b/arch/i386/crypto/aes.c @@ -464,6 +464,16 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, return 0; } +static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +{ + aes_enc_blk(tfm, dst, src); +} + +static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +{ + aes_dec_blk(tfm, dst, src); +} + static struct crypto_alg aes_alg = { .cra_name = "aes", .cra_driver_name = "aes-i586", @@ -478,8 +488,8 @@ static struct crypto_alg aes_alg = { .cia_min_keysize = AES_MIN_KEY_SIZE, .cia_max_keysize = AES_MAX_KEY_SIZE, .cia_setkey = aes_set_key, - .cia_encrypt = aes_enc_blk, - .cia_decrypt = aes_dec_blk + .cia_encrypt = aes_encrypt, + .cia_decrypt = aes_decrypt } } }; diff --git a/arch/x86_64/crypto/aes-x86_64-asm.S b/arch/x86_64/crypto/aes-x86_64-asm.S index f3ba643e144d..26b40de4d0b0 100644 --- a/arch/x86_64/crypto/aes-x86_64-asm.S +++ b/arch/x86_64/crypto/aes-x86_64-asm.S @@ -151,9 +151,9 @@ FUNC: movq r1,r2; \ #define decrypt_final(TAB,OFFSET) \ round(TAB,OFFSET,R2,R1,R4,R3,R6,R5,R7,R10,R5,R6,R3,R4) -/* void aes_encrypt(stuct crypto_tfm *tfm, u8 *out, const u8 *in) */ +/* void aes_enc_blk(stuct crypto_tfm *tfm, u8 *out, const u8 *in) */ - entry(aes_encrypt,0,enc128,enc192) + entry(aes_enc_blk,0,enc128,enc192) encrypt_round(aes_ft_tab,-96) encrypt_round(aes_ft_tab,-80) enc192: encrypt_round(aes_ft_tab,-64) @@ -170,9 +170,9 @@ enc128: encrypt_round(aes_ft_tab,-32) encrypt_final(aes_fl_tab,112) return -/* void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) */ +/* void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in) */ - entry(aes_decrypt,240,dec128,dec192) + entry(aes_dec_blk,240,dec128,dec192) decrypt_round(aes_it_tab,-96) decrypt_round(aes_it_tab,-80) dec192: decrypt_round(aes_it_tab,-64) diff --git a/arch/x86_64/crypto/aes.c b/arch/x86_64/crypto/aes.c index d6f8e0463b5d..68866fab37aa 100644 --- a/arch/x86_64/crypto/aes.c +++ b/arch/x86_64/crypto/aes.c @@ -283,8 +283,18 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, return 0; } -extern void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in); -extern void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in); +asmlinkage void aes_enc_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in); +asmlinkage void aes_dec_blk(struct crypto_tfm *tfm, u8 *out, const u8 *in); + +static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +{ + aes_enc_blk(tfm, dst, src); +} + +static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) +{ + aes_dec_blk(tfm, dst, src); +} static struct crypto_alg aes_alg = { .cra_name = "aes", From b9d0a25a484a90c1d60b974d115eff2fe580ce16 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Sat, 10 Jun 2006 18:06:34 +1000 Subject: [PATCH 14/14] [CRYPTO] tcrypt: Forbid tcrypt from being built-in It makes no sense to build tcrypt into the kernel. In fact, now that the driver init function's return status is being checked, it is in fact harmful to do so. Signed-off-by: Herbert Xu --- crypto/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index c442f2e7ce46..ba133d557045 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -337,7 +337,7 @@ config CRYPTO_CRC32C config CRYPTO_TEST tristate "Testing module" - depends on CRYPTO + depends on CRYPTO && m help Quick & dirty crypto test module.