Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (64 commits)
  [BLOCK] dm-crypt: trivial comment improvements
  [CRYPTO] api: Deprecate crypto_digest_* and crypto_alg_available
  [CRYPTO] padlock: Convert padlock-sha to use crypto_hash
  [CRYPTO] users: Use crypto_comp and crypto_has_*
  [CRYPTO] api: Add crypto_comp and crypto_has_*
  [CRYPTO] users: Use crypto_hash interface instead of crypto_digest
  [SCSI] iscsi: Use crypto_hash interface instead of crypto_digest
  [CRYPTO] digest: Remove old HMAC implementation
  [CRYPTO] doc: Update documentation for hash and me
  [SCTP]: Use HMAC template and hash interface
  [IPSEC]: Use HMAC template and hash interface
  [CRYPTO] tcrypt: Use HMAC template and hash interface
  [CRYPTO] hmac: Add crypto template implementation
  [CRYPTO] digest: Added user API for new hash type
  [CRYPTO] api: Mark parts of cipher interface as deprecated
  [PATCH] scatterlist: Add const to sg_set_buf/sg_init_one pointer argument
  [CRYPTO] drivers: Remove obsolete block cipher operations
  [CRYPTO] users: Use block ciphers where applicable
  [SUNRPC] GSS: Use block ciphers where applicable
  [IPSEC] ESP: Use block ciphers where applicable
  ...
This commit is contained in:
Linus Torvalds 2006-09-22 12:51:33 -07:00
commit 6bbd9b6d69
98 changed files with 7806 additions and 2860 deletions

View File

@ -19,15 +19,14 @@ At the lowest level are algorithms, which register dynamically with the
API.
'Transforms' are user-instantiated objects, which maintain state, handle all
of the implementation logic (e.g. manipulating page vectors), provide an
abstraction to the underlying algorithms, and handle common logical
operations (e.g. cipher modes, HMAC for digests). However, at the user
of the implementation logic (e.g. manipulating page vectors) and provide an
abstraction to the underlying algorithms. However, at the user
level they are very simple.
Conceptually, the API layering looks like this:
[transform api] (user interface)
[transform ops] (per-type logic glue e.g. cipher.c, digest.c)
[transform ops] (per-type logic glue e.g. cipher.c, compress.c)
[algorithm api] (for registering algorithms)
The idea is to make the user interface and algorithm registration API
@ -44,22 +43,27 @@ under development.
Here's an example of how to use the API:
#include <linux/crypto.h>
#include <linux/err.h>
#include <linux/scatterlist.h>
struct scatterlist sg[2];
char result[128];
struct crypto_tfm *tfm;
struct crypto_hash *tfm;
struct hash_desc desc;
tfm = crypto_alloc_tfm("md5", 0);
if (tfm == NULL)
tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
fail();
/* ... set up the scatterlists ... */
desc.tfm = tfm;
desc.flags = 0;
crypto_digest_init(tfm);
crypto_digest_update(tfm, &sg, 2);
crypto_digest_final(tfm, result);
if (crypto_hash_digest(&desc, &sg, 2, result))
fail();
crypto_free_tfm(tfm);
crypto_free_hash(tfm);
Many real examples are available in the regression test module (tcrypt.c).
@ -126,7 +130,7 @@ might already be working on.
BUGS
Send bug reports to:
James Morris <jmorris@redhat.com>
Herbert Xu <herbert@gondor.apana.org.au>
Cc: David S. Miller <davem@redhat.com>
@ -134,13 +138,14 @@ FURTHER INFORMATION
For further patches and various updates, including the current TODO
list, see:
http://samba.org/~jamesm/crypto/
http://gondor.apana.org.au/~herbert/crypto/
AUTHORS
James Morris
David S. Miller
Herbert Xu
CREDITS
@ -238,8 +243,11 @@ Anubis algorithm contributors:
Tiger algorithm contributors:
Aaron Grothe
VIA PadLock contributors:
Michal Ludvig
Generic scatterwalk code by Adam J. Richter <adam@yggdrasil.com>
Please send any credits updates or corrections to:
James Morris <jmorris@redhat.com>
Herbert Xu <herbert@gondor.apana.org.au>

View File

@ -5,5 +5,8 @@
#
obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
aes-i586-y := aes-i586-asm.o aes.o
twofish-i586-y := twofish-i586-asm.o twofish.o

View File

@ -379,12 +379,13 @@ static void gen_tabs(void)
}
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len, u32 *flags)
unsigned int key_len)
{
int i;
u32 ss[8];
struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
u32 *flags = &tfm->crt_flags;
/* encryption schedule */

View File

@ -0,0 +1,335 @@
/***************************************************************************
* Copyright (C) 2006 by Joachim Fritschi, <jfritschi@freenet.de> *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
.file "twofish-i586-asm.S"
.text
#include <asm/asm-offsets.h>
/* return adress at 0 */
#define in_blk 12 /* input byte array address parameter*/
#define out_blk 8 /* output byte array address parameter*/
#define tfm 4 /* Twofish context structure */
#define a_offset 0
#define b_offset 4
#define c_offset 8
#define d_offset 12
/* Structure of the crypto context struct*/
#define s0 0 /* S0 Array 256 Words each */
#define s1 1024 /* S1 Array */
#define s2 2048 /* S2 Array */
#define s3 3072 /* S3 Array */
#define w 4096 /* 8 whitening keys (word) */
#define k 4128 /* key 1-32 ( word ) */
/* define a few register aliases to allow macro substitution */
#define R0D %eax
#define R0B %al
#define R0H %ah
#define R1D %ebx
#define R1B %bl
#define R1H %bh
#define R2D %ecx
#define R2B %cl
#define R2H %ch
#define R3D %edx
#define R3B %dl
#define R3H %dh
/* performs input whitening */
#define input_whitening(src,context,offset)\
xor w+offset(context), src;
/* performs input whitening */
#define output_whitening(src,context,offset)\
xor w+16+offset(context), src;
/*
* a input register containing a (rotated 16)
* b input register containing b
* c input register containing c
* d input register containing d (already rol $1)
* operations on a and b are interleaved to increase performance
*/
#define encrypt_round(a,b,c,d,round)\
push d ## D;\
movzx b ## B, %edi;\
mov s1(%ebp,%edi,4),d ## D;\
movzx a ## B, %edi;\
mov s2(%ebp,%edi,4),%esi;\
movzx b ## H, %edi;\
ror $16, b ## D;\
xor s2(%ebp,%edi,4),d ## D;\
movzx a ## H, %edi;\
ror $16, a ## D;\
xor s3(%ebp,%edi,4),%esi;\
movzx b ## B, %edi;\
xor s3(%ebp,%edi,4),d ## D;\
movzx a ## B, %edi;\
xor (%ebp,%edi,4), %esi;\
movzx b ## H, %edi;\
ror $15, b ## D;\
xor (%ebp,%edi,4), d ## D;\
movzx a ## H, %edi;\
xor s1(%ebp,%edi,4),%esi;\
pop %edi;\
add d ## D, %esi;\
add %esi, d ## D;\
add k+round(%ebp), %esi;\
xor %esi, c ## D;\
rol $15, c ## D;\
add k+4+round(%ebp),d ## D;\
xor %edi, d ## D;
/*
* a input register containing a (rotated 16)
* b input register containing b
* c input register containing c
* d input register containing d (already rol $1)
* operations on a and b are interleaved to increase performance
* last round has different rotations for the output preparation
*/
#define encrypt_last_round(a,b,c,d,round)\
push d ## D;\
movzx b ## B, %edi;\
mov s1(%ebp,%edi,4),d ## D;\
movzx a ## B, %edi;\
mov s2(%ebp,%edi,4),%esi;\
movzx b ## H, %edi;\
ror $16, b ## D;\
xor s2(%ebp,%edi,4),d ## D;\
movzx a ## H, %edi;\
ror $16, a ## D;\
xor s3(%ebp,%edi,4),%esi;\
movzx b ## B, %edi;\
xor s3(%ebp,%edi,4),d ## D;\
movzx a ## B, %edi;\
xor (%ebp,%edi,4), %esi;\
movzx b ## H, %edi;\
ror $16, b ## D;\
xor (%ebp,%edi,4), d ## D;\
movzx a ## H, %edi;\
xor s1(%ebp,%edi,4),%esi;\
pop %edi;\
add d ## D, %esi;\
add %esi, d ## D;\
add k+round(%ebp), %esi;\
xor %esi, c ## D;\
ror $1, c ## D;\
add k+4+round(%ebp),d ## D;\
xor %edi, d ## D;
/*
* a input register containing a
* b input register containing b (rotated 16)
* c input register containing c
* d input register containing d (already rol $1)
* operations on a and b are interleaved to increase performance
*/
#define decrypt_round(a,b,c,d,round)\
push c ## D;\
movzx a ## B, %edi;\
mov (%ebp,%edi,4), c ## D;\
movzx b ## B, %edi;\
mov s3(%ebp,%edi,4),%esi;\
movzx a ## H, %edi;\
ror $16, a ## D;\
xor s1(%ebp,%edi,4),c ## D;\
movzx b ## H, %edi;\
ror $16, b ## D;\
xor (%ebp,%edi,4), %esi;\
movzx a ## B, %edi;\
xor s2(%ebp,%edi,4),c ## D;\
movzx b ## B, %edi;\
xor s1(%ebp,%edi,4),%esi;\
movzx a ## H, %edi;\
ror $15, a ## D;\
xor s3(%ebp,%edi,4),c ## D;\
movzx b ## H, %edi;\
xor s2(%ebp,%edi,4),%esi;\
pop %edi;\
add %esi, c ## D;\
add c ## D, %esi;\
add k+round(%ebp), c ## D;\
xor %edi, c ## D;\
add k+4+round(%ebp),%esi;\
xor %esi, d ## D;\
rol $15, d ## D;
/*
* a input register containing a
* b input register containing b (rotated 16)
* c input register containing c
* d input register containing d (already rol $1)
* operations on a and b are interleaved to increase performance
* last round has different rotations for the output preparation
*/
#define decrypt_last_round(a,b,c,d,round)\
push c ## D;\
movzx a ## B, %edi;\
mov (%ebp,%edi,4), c ## D;\
movzx b ## B, %edi;\
mov s3(%ebp,%edi,4),%esi;\
movzx a ## H, %edi;\
ror $16, a ## D;\
xor s1(%ebp,%edi,4),c ## D;\
movzx b ## H, %edi;\
ror $16, b ## D;\
xor (%ebp,%edi,4), %esi;\
movzx a ## B, %edi;\
xor s2(%ebp,%edi,4),c ## D;\
movzx b ## B, %edi;\
xor s1(%ebp,%edi,4),%esi;\
movzx a ## H, %edi;\
ror $16, a ## D;\
xor s3(%ebp,%edi,4),c ## D;\
movzx b ## H, %edi;\
xor s2(%ebp,%edi,4),%esi;\
pop %edi;\
add %esi, c ## D;\
add c ## D, %esi;\
add k+round(%ebp), c ## D;\
xor %edi, c ## D;\
add k+4+round(%ebp),%esi;\
xor %esi, d ## D;\
ror $1, d ## D;
.align 4
.global twofish_enc_blk
.global twofish_dec_blk
twofish_enc_blk:
push %ebp /* save registers according to calling convention*/
push %ebx
push %esi
push %edi
mov tfm + 16(%esp), %ebp /* abuse the base pointer: set new base bointer to the crypto tfm */
add $crypto_tfm_ctx_offset, %ebp /* ctx adress */
mov in_blk+16(%esp),%edi /* input adress in edi */
mov (%edi), %eax
mov b_offset(%edi), %ebx
mov c_offset(%edi), %ecx
mov d_offset(%edi), %edx
input_whitening(%eax,%ebp,a_offset)
ror $16, %eax
input_whitening(%ebx,%ebp,b_offset)
input_whitening(%ecx,%ebp,c_offset)
input_whitening(%edx,%ebp,d_offset)
rol $1, %edx
encrypt_round(R0,R1,R2,R3,0);
encrypt_round(R2,R3,R0,R1,8);
encrypt_round(R0,R1,R2,R3,2*8);
encrypt_round(R2,R3,R0,R1,3*8);
encrypt_round(R0,R1,R2,R3,4*8);
encrypt_round(R2,R3,R0,R1,5*8);
encrypt_round(R0,R1,R2,R3,6*8);
encrypt_round(R2,R3,R0,R1,7*8);
encrypt_round(R0,R1,R2,R3,8*8);
encrypt_round(R2,R3,R0,R1,9*8);
encrypt_round(R0,R1,R2,R3,10*8);
encrypt_round(R2,R3,R0,R1,11*8);
encrypt_round(R0,R1,R2,R3,12*8);
encrypt_round(R2,R3,R0,R1,13*8);
encrypt_round(R0,R1,R2,R3,14*8);
encrypt_last_round(R2,R3,R0,R1,15*8);
output_whitening(%eax,%ebp,c_offset)
output_whitening(%ebx,%ebp,d_offset)
output_whitening(%ecx,%ebp,a_offset)
output_whitening(%edx,%ebp,b_offset)
mov out_blk+16(%esp),%edi;
mov %eax, c_offset(%edi)
mov %ebx, d_offset(%edi)
mov %ecx, (%edi)
mov %edx, b_offset(%edi)
pop %edi
pop %esi
pop %ebx
pop %ebp
mov $1, %eax
ret
twofish_dec_blk:
push %ebp /* save registers according to calling convention*/
push %ebx
push %esi
push %edi
mov tfm + 16(%esp), %ebp /* abuse the base pointer: set new base bointer to the crypto tfm */
add $crypto_tfm_ctx_offset, %ebp /* ctx adress */
mov in_blk+16(%esp),%edi /* input adress in edi */
mov (%edi), %eax
mov b_offset(%edi), %ebx
mov c_offset(%edi), %ecx
mov d_offset(%edi), %edx
output_whitening(%eax,%ebp,a_offset)
output_whitening(%ebx,%ebp,b_offset)
ror $16, %ebx
output_whitening(%ecx,%ebp,c_offset)
output_whitening(%edx,%ebp,d_offset)
rol $1, %ecx
decrypt_round(R0,R1,R2,R3,15*8);
decrypt_round(R2,R3,R0,R1,14*8);
decrypt_round(R0,R1,R2,R3,13*8);
decrypt_round(R2,R3,R0,R1,12*8);
decrypt_round(R0,R1,R2,R3,11*8);
decrypt_round(R2,R3,R0,R1,10*8);
decrypt_round(R0,R1,R2,R3,9*8);
decrypt_round(R2,R3,R0,R1,8*8);
decrypt_round(R0,R1,R2,R3,7*8);
decrypt_round(R2,R3,R0,R1,6*8);
decrypt_round(R0,R1,R2,R3,5*8);
decrypt_round(R2,R3,R0,R1,4*8);
decrypt_round(R0,R1,R2,R3,3*8);
decrypt_round(R2,R3,R0,R1,2*8);
decrypt_round(R0,R1,R2,R3,1*8);
decrypt_last_round(R2,R3,R0,R1,0);
input_whitening(%eax,%ebp,c_offset)
input_whitening(%ebx,%ebp,d_offset)
input_whitening(%ecx,%ebp,a_offset)
input_whitening(%edx,%ebp,b_offset)
mov out_blk+16(%esp),%edi;
mov %eax, c_offset(%edi)
mov %ebx, d_offset(%edi)
mov %ecx, (%edi)
mov %edx, b_offset(%edi)
pop %edi
pop %esi
pop %ebx
pop %ebp
mov $1, %eax
ret

View File

@ -0,0 +1,97 @@
/*
* Glue Code for optimized 586 assembler version of TWOFISH
*
* Originally Twofish for GPG
* By Matthew Skala <mskala@ansuz.sooke.bc.ca>, July 26, 1998
* 256-bit key length added March 20, 1999
* Some modifications to reduce the text size by Werner Koch, April, 1998
* Ported to the kerneli patch by Marc Mutz <Marc@Mutz.com>
* Ported to CryptoAPI by Colin Slater <hoho@tacomeat.net>
*
* The original author has disclaimed all copyright interest in this
* code and thus put it in the public domain. The subsequent authors
* have put this under the GNU General Public License.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
* This code is a "clean room" implementation, written from the paper
* _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
* Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available
* through http://www.counterpane.com/twofish.html
*
* For background information on multiplication in finite fields, used for
* the matrix operations in the key schedule, see the book _Contemporary
* Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the
* Third Edition.
*/
#include <crypto/twofish.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
asmlinkage void twofish_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
asmlinkage void twofish_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
static void twofish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
twofish_enc_blk(tfm, dst, src);
}
static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
twofish_dec_blk(tfm, dst, src);
}
static struct crypto_alg alg = {
.cra_name = "twofish",
.cra_driver_name = "twofish-i586",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct twofish_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = TF_MIN_KEY_SIZE,
.cia_max_keysize = TF_MAX_KEY_SIZE,
.cia_setkey = twofish_setkey,
.cia_encrypt = twofish_encrypt,
.cia_decrypt = twofish_decrypt
}
}
};
static int __init init(void)
{
return crypto_register_alg(&alg);
}
static void __exit fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(init);
module_exit(fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION ("Twofish Cipher Algorithm, i586 asm optimized");
MODULE_ALIAS("twofish");

View File

@ -16,9 +16,9 @@
*
*/
#include <crypto/algapi.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/crypto.h>
#include "crypt_s390.h"
#define AES_MIN_KEY_SIZE 16
@ -34,13 +34,16 @@ int has_aes_256 = 0;
struct s390_aes_ctx {
u8 iv[AES_BLOCK_SIZE];
u8 key[AES_MAX_KEY_SIZE];
long enc;
long dec;
int key_len;
};
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len, u32 *flags)
unsigned int key_len)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
switch (key_len) {
case 16:
@ -110,117 +113,11 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
}
}
static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out,
const u8 *in, unsigned int nbytes)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm);
int ret;
/* only use complete blocks */
nbytes &= ~(AES_BLOCK_SIZE - 1);
switch (sctx->key_len) {
case 16:
ret = crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in, nbytes);
BUG_ON((ret < 0) || (ret != nbytes));
break;
case 24:
ret = crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in, nbytes);
BUG_ON((ret < 0) || (ret != nbytes));
break;
case 32:
ret = crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in, nbytes);
BUG_ON((ret < 0) || (ret != nbytes));
break;
}
return nbytes;
}
static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out,
const u8 *in, unsigned int nbytes)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm);
int ret;
/* only use complete blocks */
nbytes &= ~(AES_BLOCK_SIZE - 1);
switch (sctx->key_len) {
case 16:
ret = crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in, nbytes);
BUG_ON((ret < 0) || (ret != nbytes));
break;
case 24:
ret = crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in, nbytes);
BUG_ON((ret < 0) || (ret != nbytes));
break;
case 32:
ret = crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in, nbytes);
BUG_ON((ret < 0) || (ret != nbytes));
break;
}
return nbytes;
}
static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out,
const u8 *in, unsigned int nbytes)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm);
int ret;
/* only use complete blocks */
nbytes &= ~(AES_BLOCK_SIZE - 1);
memcpy(&sctx->iv, desc->info, AES_BLOCK_SIZE);
switch (sctx->key_len) {
case 16:
ret = crypt_s390_kmc(KMC_AES_128_ENCRYPT, &sctx->iv, out, in, nbytes);
BUG_ON((ret < 0) || (ret != nbytes));
break;
case 24:
ret = crypt_s390_kmc(KMC_AES_192_ENCRYPT, &sctx->iv, out, in, nbytes);
BUG_ON((ret < 0) || (ret != nbytes));
break;
case 32:
ret = crypt_s390_kmc(KMC_AES_256_ENCRYPT, &sctx->iv, out, in, nbytes);
BUG_ON((ret < 0) || (ret != nbytes));
break;
}
memcpy(desc->info, &sctx->iv, AES_BLOCK_SIZE);
return nbytes;
}
static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out,
const u8 *in, unsigned int nbytes)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(desc->tfm);
int ret;
/* only use complete blocks */
nbytes &= ~(AES_BLOCK_SIZE - 1);
memcpy(&sctx->iv, desc->info, AES_BLOCK_SIZE);
switch (sctx->key_len) {
case 16:
ret = crypt_s390_kmc(KMC_AES_128_DECRYPT, &sctx->iv, out, in, nbytes);
BUG_ON((ret < 0) || (ret != nbytes));
break;
case 24:
ret = crypt_s390_kmc(KMC_AES_192_DECRYPT, &sctx->iv, out, in, nbytes);
BUG_ON((ret < 0) || (ret != nbytes));
break;
case 32:
ret = crypt_s390_kmc(KMC_AES_256_DECRYPT, &sctx->iv, out, in, nbytes);
BUG_ON((ret < 0) || (ret != nbytes));
break;
}
return nbytes;
}
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_aes_ctx),
@ -233,10 +130,189 @@ static struct crypto_alg aes_alg = {
.cia_setkey = aes_set_key,
.cia_encrypt = aes_encrypt,
.cia_decrypt = aes_decrypt,
.cia_encrypt_ecb = aes_encrypt_ecb,
.cia_decrypt_ecb = aes_decrypt_ecb,
.cia_encrypt_cbc = aes_encrypt_cbc,
.cia_decrypt_cbc = aes_decrypt_cbc,
}
}
};
static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
switch (key_len) {
case 16:
sctx->enc = KM_AES_128_ENCRYPT;
sctx->dec = KM_AES_128_DECRYPT;
break;
case 24:
sctx->enc = KM_AES_192_ENCRYPT;
sctx->dec = KM_AES_192_DECRYPT;
break;
case 32:
sctx->enc = KM_AES_256_ENCRYPT;
sctx->dec = KM_AES_256_DECRYPT;
break;
}
return aes_set_key(tfm, in_key, key_len);
}
static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
struct blkcipher_walk *walk)
{
int ret = blkcipher_walk_virt(desc, walk);
unsigned int nbytes;
while ((nbytes = walk->nbytes)) {
/* only use complete blocks */
unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
ret = crypt_s390_km(func, param, out, in, n);
BUG_ON((ret < 0) || (ret != n));
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
}
return ret;
}
static int ecb_aes_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk);
}
static int ecb_aes_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk);
}
static struct crypto_alg ecb_aes_alg = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_aes_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = ecb_aes_set_key,
.encrypt = ecb_aes_encrypt,
.decrypt = ecb_aes_decrypt,
}
}
};
static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
switch (key_len) {
case 16:
sctx->enc = KMC_AES_128_ENCRYPT;
sctx->dec = KMC_AES_128_DECRYPT;
break;
case 24:
sctx->enc = KMC_AES_192_ENCRYPT;
sctx->dec = KMC_AES_192_DECRYPT;
break;
case 32:
sctx->enc = KMC_AES_256_ENCRYPT;
sctx->dec = KMC_AES_256_DECRYPT;
break;
}
return aes_set_key(tfm, in_key, key_len);
}
static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
struct blkcipher_walk *walk)
{
int ret = blkcipher_walk_virt(desc, walk);
unsigned int nbytes = walk->nbytes;
if (!nbytes)
goto out;
memcpy(param, walk->iv, AES_BLOCK_SIZE);
do {
/* only use complete blocks */
unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
ret = crypt_s390_kmc(func, param, out, in, n);
BUG_ON((ret < 0) || (ret != n));
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
} while ((nbytes = walk->nbytes));
memcpy(walk->iv, param, AES_BLOCK_SIZE);
out:
return ret;
}
static int cbc_aes_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk);
}
static int cbc_aes_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk);
}
static struct crypto_alg cbc_aes_alg = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_aes_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = cbc_aes_set_key,
.encrypt = cbc_aes_encrypt,
.decrypt = cbc_aes_decrypt,
}
}
};
@ -256,13 +332,40 @@ static int __init aes_init(void)
return -ENOSYS;
ret = crypto_register_alg(&aes_alg);
if (ret != 0)
printk(KERN_INFO "crypt_s390: aes_s390 couldn't be loaded.\n");
if (ret != 0) {
printk(KERN_INFO "crypt_s390: aes-s390 couldn't be loaded.\n");
goto aes_err;
}
ret = crypto_register_alg(&ecb_aes_alg);
if (ret != 0) {
printk(KERN_INFO
"crypt_s390: ecb-aes-s390 couldn't be loaded.\n");
goto ecb_aes_err;
}
ret = crypto_register_alg(&cbc_aes_alg);
if (ret != 0) {
printk(KERN_INFO
"crypt_s390: cbc-aes-s390 couldn't be loaded.\n");
goto cbc_aes_err;
}
out:
return ret;
cbc_aes_err:
crypto_unregister_alg(&ecb_aes_alg);
ecb_aes_err:
crypto_unregister_alg(&aes_alg);
aes_err:
goto out;
}
static void __exit aes_fini(void)
{
crypto_unregister_alg(&cbc_aes_alg);
crypto_unregister_alg(&ecb_aes_alg);
crypto_unregister_alg(&aes_alg);
}

View File

@ -20,6 +20,9 @@
#define CRYPT_S390_OP_MASK 0xFF00
#define CRYPT_S390_FUNC_MASK 0x00FF
#define CRYPT_S390_PRIORITY 300
#define CRYPT_S390_COMPOSITE_PRIORITY 400
/* s930 cryptographic operations */
enum crypt_s390_operations {
CRYPT_S390_KM = 0x0100,

View File

@ -13,9 +13,10 @@
* (at your option) any later version.
*
*/
#include <crypto/algapi.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/crypto.h>
#include "crypt_s390.h"
#include "crypto_des.h"
@ -45,9 +46,10 @@ struct crypt_s390_des3_192_ctx {
};
static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen, u32 *flags)
unsigned int keylen)
{
struct crypt_s390_des_ctx *dctx = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
int ret;
/* test if key is valid (not a weak key) */
@ -71,69 +73,10 @@ static void des_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
crypt_s390_km(KM_DEA_DECRYPT, dctx->key, out, in, DES_BLOCK_SIZE);
}
static unsigned int des_encrypt_ecb(const struct cipher_desc *desc, u8 *out,
const u8 *in, unsigned int nbytes)
{
struct crypt_s390_des_ctx *sctx = crypto_tfm_ctx(desc->tfm);
int ret;
/* only use complete blocks */
nbytes &= ~(DES_BLOCK_SIZE - 1);
ret = crypt_s390_km(KM_DEA_ENCRYPT, sctx->key, out, in, nbytes);
BUG_ON((ret < 0) || (ret != nbytes));
return nbytes;
}
static unsigned int des_decrypt_ecb(const struct cipher_desc *desc, u8 *out,
const u8 *in, unsigned int nbytes)
{
struct crypt_s390_des_ctx *sctx = crypto_tfm_ctx(desc->tfm);
int ret;
/* only use complete blocks */
nbytes &= ~(DES_BLOCK_SIZE - 1);
ret = crypt_s390_km(KM_DEA_DECRYPT, sctx->key, out, in, nbytes);
BUG_ON((ret < 0) || (ret != nbytes));
return nbytes;
}
static unsigned int des_encrypt_cbc(const struct cipher_desc *desc, u8 *out,
const u8 *in, unsigned int nbytes)
{
struct crypt_s390_des_ctx *sctx = crypto_tfm_ctx(desc->tfm);
int ret;
/* only use complete blocks */
nbytes &= ~(DES_BLOCK_SIZE - 1);
memcpy(sctx->iv, desc->info, DES_BLOCK_SIZE);
ret = crypt_s390_kmc(KMC_DEA_ENCRYPT, &sctx->iv, out, in, nbytes);
BUG_ON((ret < 0) || (ret != nbytes));
memcpy(desc->info, sctx->iv, DES_BLOCK_SIZE);
return nbytes;
}
static unsigned int des_decrypt_cbc(const struct cipher_desc *desc, u8 *out,
const u8 *in, unsigned int nbytes)
{
struct crypt_s390_des_ctx *sctx = crypto_tfm_ctx(desc->tfm);
int ret;
/* only use complete blocks */
nbytes &= ~(DES_BLOCK_SIZE - 1);
memcpy(&sctx->iv, desc->info, DES_BLOCK_SIZE);
ret = crypt_s390_kmc(KMC_DEA_DECRYPT, &sctx->iv, out, in, nbytes);
BUG_ON((ret < 0) || (ret != nbytes));
return nbytes;
}
static struct crypto_alg des_alg = {
.cra_name = "des",
.cra_driver_name = "des-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypt_s390_des_ctx),
@ -146,10 +89,143 @@ static struct crypto_alg des_alg = {
.cia_setkey = des_setkey,
.cia_encrypt = des_encrypt,
.cia_decrypt = des_decrypt,
.cia_encrypt_ecb = des_encrypt_ecb,
.cia_decrypt_ecb = des_decrypt_ecb,
.cia_encrypt_cbc = des_encrypt_cbc,
.cia_decrypt_cbc = des_decrypt_cbc,
}
}
};
static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
void *param, struct blkcipher_walk *walk)
{
int ret = blkcipher_walk_virt(desc, walk);
unsigned int nbytes;
while ((nbytes = walk->nbytes)) {
/* only use complete blocks */
unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
ret = crypt_s390_km(func, param, out, in, n);
BUG_ON((ret < 0) || (ret != n));
nbytes &= DES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
}
return ret;
}
static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
void *param, struct blkcipher_walk *walk)
{
int ret = blkcipher_walk_virt(desc, walk);
unsigned int nbytes = walk->nbytes;
if (!nbytes)
goto out;
memcpy(param, walk->iv, DES_BLOCK_SIZE);
do {
/* only use complete blocks */
unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
ret = crypt_s390_kmc(func, param, out, in, n);
BUG_ON((ret < 0) || (ret != n));
nbytes &= DES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
} while ((nbytes = walk->nbytes));
memcpy(walk->iv, param, DES_BLOCK_SIZE);
out:
return ret;
}
static int ecb_des_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_desall_crypt(desc, KM_DEA_ENCRYPT, sctx->key, &walk);
}
static int ecb_des_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_desall_crypt(desc, KM_DEA_DECRYPT, sctx->key, &walk);
}
static struct crypto_alg ecb_des_alg = {
.cra_name = "ecb(des)",
.cra_driver_name = "ecb-des-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypt_s390_des_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ecb_des_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.setkey = des_setkey,
.encrypt = ecb_des_encrypt,
.decrypt = ecb_des_decrypt,
}
}
};
static int cbc_des_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, sctx->iv, &walk);
}
static int cbc_des_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct crypt_s390_des_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, sctx->iv, &walk);
}
static struct crypto_alg cbc_des_alg = {
.cra_name = "cbc(des)",
.cra_driver_name = "cbc-des-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypt_s390_des_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(cbc_des_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
.setkey = des_setkey,
.encrypt = cbc_des_encrypt,
.decrypt = cbc_des_decrypt,
}
}
};
@ -167,11 +243,12 @@ static struct crypto_alg des_alg = {
*
*/
static int des3_128_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen, u32 *flags)
unsigned int keylen)
{
int i, ret;
struct crypt_s390_des3_128_ctx *dctx = crypto_tfm_ctx(tfm);
const u8* temp_key = key;
const u8 *temp_key = key;
u32 *flags = &tfm->crt_flags;
if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE))) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
@ -202,73 +279,10 @@ static void des3_128_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
DES3_128_BLOCK_SIZE);
}
static unsigned int des3_128_encrypt_ecb(const struct cipher_desc *desc,
u8 *out, const u8 *in,
unsigned int nbytes)
{
struct crypt_s390_des3_128_ctx *sctx = crypto_tfm_ctx(desc->tfm);
int ret;
/* only use complete blocks */
nbytes &= ~(DES3_128_BLOCK_SIZE - 1);
ret = crypt_s390_km(KM_TDEA_128_ENCRYPT, sctx->key, out, in, nbytes);
BUG_ON((ret < 0) || (ret != nbytes));
return nbytes;
}
static unsigned int des3_128_decrypt_ecb(const struct cipher_desc *desc,
u8 *out, const u8 *in,
unsigned int nbytes)
{
struct crypt_s390_des3_128_ctx *sctx = crypto_tfm_ctx(desc->tfm);
int ret;
/* only use complete blocks */
nbytes &= ~(DES3_128_BLOCK_SIZE - 1);
ret = crypt_s390_km(KM_TDEA_128_DECRYPT, sctx->key, out, in, nbytes);
BUG_ON((ret < 0) || (ret != nbytes));
return nbytes;
}
static unsigned int des3_128_encrypt_cbc(const struct cipher_desc *desc,
u8 *out, const u8 *in,
unsigned int nbytes)
{
struct crypt_s390_des3_128_ctx *sctx = crypto_tfm_ctx(desc->tfm);
int ret;
/* only use complete blocks */
nbytes &= ~(DES3_128_BLOCK_SIZE - 1);
memcpy(sctx->iv, desc->info, DES3_128_BLOCK_SIZE);
ret = crypt_s390_kmc(KMC_TDEA_128_ENCRYPT, &sctx->iv, out, in, nbytes);
BUG_ON((ret < 0) || (ret != nbytes));
memcpy(desc->info, sctx->iv, DES3_128_BLOCK_SIZE);
return nbytes;
}
static unsigned int des3_128_decrypt_cbc(const struct cipher_desc *desc,
u8 *out, const u8 *in,
unsigned int nbytes)
{
struct crypt_s390_des3_128_ctx *sctx = crypto_tfm_ctx(desc->tfm);
int ret;
/* only use complete blocks */
nbytes &= ~(DES3_128_BLOCK_SIZE - 1);
memcpy(&sctx->iv, desc->info, DES3_128_BLOCK_SIZE);
ret = crypt_s390_kmc(KMC_TDEA_128_DECRYPT, &sctx->iv, out, in, nbytes);
BUG_ON((ret < 0) || (ret != nbytes));
return nbytes;
}
static struct crypto_alg des3_128_alg = {
.cra_name = "des3_ede128",
.cra_driver_name = "des3_ede128-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES3_128_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx),
@ -281,10 +295,95 @@ static struct crypto_alg des3_128_alg = {
.cia_setkey = des3_128_setkey,
.cia_encrypt = des3_128_encrypt,
.cia_decrypt = des3_128_decrypt,
.cia_encrypt_ecb = des3_128_encrypt_ecb,
.cia_decrypt_ecb = des3_128_decrypt_ecb,
.cia_encrypt_cbc = des3_128_encrypt_cbc,
.cia_decrypt_cbc = des3_128_decrypt_cbc,
}
}
};
static int ecb_des3_128_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_desall_crypt(desc, KM_TDEA_128_ENCRYPT, sctx->key, &walk);
}
static int ecb_des3_128_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_desall_crypt(desc, KM_TDEA_128_DECRYPT, sctx->key, &walk);
}
static struct crypto_alg ecb_des3_128_alg = {
.cra_name = "ecb(des3_ede128)",
.cra_driver_name = "ecb-des3_ede128-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = DES3_128_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(
ecb_des3_128_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = DES3_128_KEY_SIZE,
.max_keysize = DES3_128_KEY_SIZE,
.setkey = des3_128_setkey,
.encrypt = ecb_des3_128_encrypt,
.decrypt = ecb_des3_128_decrypt,
}
}
};
static int cbc_des3_128_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_desall_crypt(desc, KMC_TDEA_128_ENCRYPT, sctx->iv, &walk);
}
static int cbc_des3_128_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct crypt_s390_des3_128_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_desall_crypt(desc, KMC_TDEA_128_DECRYPT, sctx->iv, &walk);
}
static struct crypto_alg cbc_des3_128_alg = {
.cra_name = "cbc(des3_ede128)",
.cra_driver_name = "cbc-des3_ede128-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = DES3_128_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypt_s390_des3_128_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(
cbc_des3_128_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = DES3_128_KEY_SIZE,
.max_keysize = DES3_128_KEY_SIZE,
.ivsize = DES3_128_BLOCK_SIZE,
.setkey = des3_128_setkey,
.encrypt = cbc_des3_128_encrypt,
.decrypt = cbc_des3_128_decrypt,
}
}
};
@ -303,11 +402,12 @@ static struct crypto_alg des3_128_alg = {
*
*/
static int des3_192_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen, u32 *flags)
unsigned int keylen)
{
int i, ret;
struct crypt_s390_des3_192_ctx *dctx = crypto_tfm_ctx(tfm);
const u8* temp_key = key;
const u8 *temp_key = key;
u32 *flags = &tfm->crt_flags;
if (!(memcmp(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
memcmp(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
@ -341,73 +441,10 @@ static void des3_192_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
DES3_192_BLOCK_SIZE);
}
static unsigned int des3_192_encrypt_ecb(const struct cipher_desc *desc,
u8 *out, const u8 *in,
unsigned int nbytes)
{
struct crypt_s390_des3_192_ctx *sctx = crypto_tfm_ctx(desc->tfm);
int ret;
/* only use complete blocks */
nbytes &= ~(DES3_192_BLOCK_SIZE - 1);
ret = crypt_s390_km(KM_TDEA_192_ENCRYPT, sctx->key, out, in, nbytes);
BUG_ON((ret < 0) || (ret != nbytes));
return nbytes;
}
static unsigned int des3_192_decrypt_ecb(const struct cipher_desc *desc,
u8 *out, const u8 *in,
unsigned int nbytes)
{
struct crypt_s390_des3_192_ctx *sctx = crypto_tfm_ctx(desc->tfm);
int ret;
/* only use complete blocks */
nbytes &= ~(DES3_192_BLOCK_SIZE - 1);
ret = crypt_s390_km(KM_TDEA_192_DECRYPT, sctx->key, out, in, nbytes);
BUG_ON((ret < 0) || (ret != nbytes));
return nbytes;
}
static unsigned int des3_192_encrypt_cbc(const struct cipher_desc *desc,
u8 *out, const u8 *in,
unsigned int nbytes)
{
struct crypt_s390_des3_192_ctx *sctx = crypto_tfm_ctx(desc->tfm);
int ret;
/* only use complete blocks */
nbytes &= ~(DES3_192_BLOCK_SIZE - 1);
memcpy(sctx->iv, desc->info, DES3_192_BLOCK_SIZE);
ret = crypt_s390_kmc(KMC_TDEA_192_ENCRYPT, &sctx->iv, out, in, nbytes);
BUG_ON((ret < 0) || (ret != nbytes));
memcpy(desc->info, sctx->iv, DES3_192_BLOCK_SIZE);
return nbytes;
}
static unsigned int des3_192_decrypt_cbc(const struct cipher_desc *desc,
u8 *out, const u8 *in,
unsigned int nbytes)
{
struct crypt_s390_des3_192_ctx *sctx = crypto_tfm_ctx(desc->tfm);
int ret;
/* only use complete blocks */
nbytes &= ~(DES3_192_BLOCK_SIZE - 1);
memcpy(&sctx->iv, desc->info, DES3_192_BLOCK_SIZE);
ret = crypt_s390_kmc(KMC_TDEA_192_DECRYPT, &sctx->iv, out, in, nbytes);
BUG_ON((ret < 0) || (ret != nbytes));
return nbytes;
}
static struct crypto_alg des3_192_alg = {
.cra_name = "des3_ede",
.cra_driver_name = "des3_ede-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES3_192_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx),
@ -420,10 +457,95 @@ static struct crypto_alg des3_192_alg = {
.cia_setkey = des3_192_setkey,
.cia_encrypt = des3_192_encrypt,
.cia_decrypt = des3_192_decrypt,
.cia_encrypt_ecb = des3_192_encrypt_ecb,
.cia_decrypt_ecb = des3_192_decrypt_ecb,
.cia_encrypt_cbc = des3_192_encrypt_cbc,
.cia_decrypt_cbc = des3_192_decrypt_cbc,
}
}
};
static int ecb_des3_192_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_desall_crypt(desc, KM_TDEA_192_ENCRYPT, sctx->key, &walk);
}
static int ecb_des3_192_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return ecb_desall_crypt(desc, KM_TDEA_192_DECRYPT, sctx->key, &walk);
}
static struct crypto_alg ecb_des3_192_alg = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "ecb-des3_ede-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = DES3_192_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(
ecb_des3_192_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = DES3_192_KEY_SIZE,
.max_keysize = DES3_192_KEY_SIZE,
.setkey = des3_192_setkey,
.encrypt = ecb_des3_192_encrypt,
.decrypt = ecb_des3_192_decrypt,
}
}
};
static int cbc_des3_192_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, sctx->iv, &walk);
}
static int cbc_des3_192_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct crypt_s390_des3_192_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, sctx->iv, &walk);
}
static struct crypto_alg cbc_des3_192_alg = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "cbc-des3_ede-s390",
.cra_priority = CRYPT_S390_COMPOSITE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = DES3_192_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypt_s390_des3_192_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(
cbc_des3_192_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = DES3_192_KEY_SIZE,
.max_keysize = DES3_192_KEY_SIZE,
.ivsize = DES3_192_BLOCK_SIZE,
.setkey = des3_192_setkey,
.encrypt = cbc_des3_192_encrypt,
.decrypt = cbc_des3_192_decrypt,
}
}
};
@ -437,22 +559,69 @@ static int init(void)
!crypt_s390_func_available(KM_TDEA_192_ENCRYPT))
return -ENOSYS;
ret |= (crypto_register_alg(&des_alg) == 0) ? 0:1;
ret |= (crypto_register_alg(&des3_128_alg) == 0) ? 0:2;
ret |= (crypto_register_alg(&des3_192_alg) == 0) ? 0:4;
if (ret) {
crypto_unregister_alg(&des3_192_alg);
crypto_unregister_alg(&des3_128_alg);
crypto_unregister_alg(&des_alg);
return -EEXIST;
}
return 0;
ret = crypto_register_alg(&des_alg);
if (ret)
goto des_err;
ret = crypto_register_alg(&ecb_des_alg);
if (ret)
goto ecb_des_err;
ret = crypto_register_alg(&cbc_des_alg);
if (ret)
goto cbc_des_err;
ret = crypto_register_alg(&des3_128_alg);
if (ret)
goto des3_128_err;
ret = crypto_register_alg(&ecb_des3_128_alg);
if (ret)
goto ecb_des3_128_err;
ret = crypto_register_alg(&cbc_des3_128_alg);
if (ret)
goto cbc_des3_128_err;
ret = crypto_register_alg(&des3_192_alg);
if (ret)
goto des3_192_err;
ret = crypto_register_alg(&ecb_des3_192_alg);
if (ret)
goto ecb_des3_192_err;
ret = crypto_register_alg(&cbc_des3_192_alg);
if (ret)
goto cbc_des3_192_err;
out:
return ret;
cbc_des3_192_err:
crypto_unregister_alg(&ecb_des3_192_alg);
ecb_des3_192_err:
crypto_unregister_alg(&des3_192_alg);
des3_192_err:
crypto_unregister_alg(&cbc_des3_128_alg);
cbc_des3_128_err:
crypto_unregister_alg(&ecb_des3_128_alg);
ecb_des3_128_err:
crypto_unregister_alg(&des3_128_alg);
des3_128_err:
crypto_unregister_alg(&cbc_des_alg);
cbc_des_err:
crypto_unregister_alg(&ecb_des_alg);
ecb_des_err:
crypto_unregister_alg(&des_alg);
des_err:
goto out;
}
static void __exit fini(void)
{
crypto_unregister_alg(&cbc_des3_192_alg);
crypto_unregister_alg(&ecb_des3_192_alg);
crypto_unregister_alg(&des3_192_alg);
crypto_unregister_alg(&cbc_des3_128_alg);
crypto_unregister_alg(&ecb_des3_128_alg);
crypto_unregister_alg(&des3_128_alg);
crypto_unregister_alg(&cbc_des_alg);
crypto_unregister_alg(&ecb_des_alg);
crypto_unregister_alg(&des_alg);
}

View File

@ -126,6 +126,8 @@ static void sha1_final(struct crypto_tfm *tfm, u8 *out)
static struct crypto_alg alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx),

View File

@ -127,6 +127,8 @@ static void sha256_final(struct crypto_tfm *tfm, u8 *out)
static struct crypto_alg alg = {
.cra_name = "sha256",
.cra_driver_name = "sha256-s390",
.cra_priority = CRYPT_S390_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s390_sha256_ctx),

View File

@ -5,5 +5,8 @@
#
obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
aes-x86_64-y := aes-x86_64-asm.o aes.o
twofish-x86_64-y := twofish-x86_64-asm.o twofish.o

View File

@ -228,13 +228,14 @@ static void __init gen_tabs(void)
}
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len, u32 *flags)
unsigned int key_len)
{
struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
u32 *flags = &tfm->crt_flags;
u32 i, j, t, u, v, w;
if (key_len != 16 && key_len != 24 && key_len != 32) {
if (key_len % 8) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}

View File

@ -0,0 +1,324 @@
/***************************************************************************
* Copyright (C) 2006 by Joachim Fritschi, <jfritschi@freenet.de> *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
.file "twofish-x86_64-asm.S"
.text
#include <asm/asm-offsets.h>
#define a_offset 0
#define b_offset 4
#define c_offset 8
#define d_offset 12
/* Structure of the crypto context struct*/
#define s0 0 /* S0 Array 256 Words each */
#define s1 1024 /* S1 Array */
#define s2 2048 /* S2 Array */
#define s3 3072 /* S3 Array */
#define w 4096 /* 8 whitening keys (word) */
#define k 4128 /* key 1-32 ( word ) */
/* define a few register aliases to allow macro substitution */
#define R0 %rax
#define R0D %eax
#define R0B %al
#define R0H %ah
#define R1 %rbx
#define R1D %ebx
#define R1B %bl
#define R1H %bh
#define R2 %rcx
#define R2D %ecx
#define R2B %cl
#define R2H %ch
#define R3 %rdx
#define R3D %edx
#define R3B %dl
#define R3H %dh
/* performs input whitening */
#define input_whitening(src,context,offset)\
xor w+offset(context), src;
/* performs input whitening */
#define output_whitening(src,context,offset)\
xor w+16+offset(context), src;
/*
* a input register containing a (rotated 16)
* b input register containing b
* c input register containing c
* d input register containing d (already rol $1)
* operations on a and b are interleaved to increase performance
*/
#define encrypt_round(a,b,c,d,round)\
movzx b ## B, %edi;\
mov s1(%r11,%rdi,4),%r8d;\
movzx a ## B, %edi;\
mov s2(%r11,%rdi,4),%r9d;\
movzx b ## H, %edi;\
ror $16, b ## D;\
xor s2(%r11,%rdi,4),%r8d;\
movzx a ## H, %edi;\
ror $16, a ## D;\
xor s3(%r11,%rdi,4),%r9d;\
movzx b ## B, %edi;\
xor s3(%r11,%rdi,4),%r8d;\
movzx a ## B, %edi;\
xor (%r11,%rdi,4), %r9d;\
movzx b ## H, %edi;\
ror $15, b ## D;\
xor (%r11,%rdi,4), %r8d;\
movzx a ## H, %edi;\
xor s1(%r11,%rdi,4),%r9d;\
add %r8d, %r9d;\
add %r9d, %r8d;\
add k+round(%r11), %r9d;\
xor %r9d, c ## D;\
rol $15, c ## D;\
add k+4+round(%r11),%r8d;\
xor %r8d, d ## D;
/*
* a input register containing a(rotated 16)
* b input register containing b
* c input register containing c
* d input register containing d (already rol $1)
* operations on a and b are interleaved to increase performance
* during the round a and b are prepared for the output whitening
*/
#define encrypt_last_round(a,b,c,d,round)\
mov b ## D, %r10d;\
shl $32, %r10;\
movzx b ## B, %edi;\
mov s1(%r11,%rdi,4),%r8d;\
movzx a ## B, %edi;\
mov s2(%r11,%rdi,4),%r9d;\
movzx b ## H, %edi;\
ror $16, b ## D;\
xor s2(%r11,%rdi,4),%r8d;\
movzx a ## H, %edi;\
ror $16, a ## D;\
xor s3(%r11,%rdi,4),%r9d;\
movzx b ## B, %edi;\
xor s3(%r11,%rdi,4),%r8d;\
movzx a ## B, %edi;\
xor (%r11,%rdi,4), %r9d;\
xor a, %r10;\
movzx b ## H, %edi;\
xor (%r11,%rdi,4), %r8d;\
movzx a ## H, %edi;\
xor s1(%r11,%rdi,4),%r9d;\
add %r8d, %r9d;\
add %r9d, %r8d;\
add k+round(%r11), %r9d;\
xor %r9d, c ## D;\
ror $1, c ## D;\
add k+4+round(%r11),%r8d;\
xor %r8d, d ## D
/*
* a input register containing a
* b input register containing b (rotated 16)
* c input register containing c (already rol $1)
* d input register containing d
* operations on a and b are interleaved to increase performance
*/
#define decrypt_round(a,b,c,d,round)\
movzx a ## B, %edi;\
mov (%r11,%rdi,4), %r9d;\
movzx b ## B, %edi;\
mov s3(%r11,%rdi,4),%r8d;\
movzx a ## H, %edi;\
ror $16, a ## D;\
xor s1(%r11,%rdi,4),%r9d;\
movzx b ## H, %edi;\
ror $16, b ## D;\
xor (%r11,%rdi,4), %r8d;\
movzx a ## B, %edi;\
xor s2(%r11,%rdi,4),%r9d;\
movzx b ## B, %edi;\
xor s1(%r11,%rdi,4),%r8d;\
movzx a ## H, %edi;\
ror $15, a ## D;\
xor s3(%r11,%rdi,4),%r9d;\
movzx b ## H, %edi;\
xor s2(%r11,%rdi,4),%r8d;\
add %r8d, %r9d;\
add %r9d, %r8d;\
add k+round(%r11), %r9d;\
xor %r9d, c ## D;\
add k+4+round(%r11),%r8d;\
xor %r8d, d ## D;\
rol $15, d ## D;
/*
* a input register containing a
* b input register containing b
* c input register containing c (already rol $1)
* d input register containing d
* operations on a and b are interleaved to increase performance
* during the round a and b are prepared for the output whitening
*/
#define decrypt_last_round(a,b,c,d,round)\
movzx a ## B, %edi;\
mov (%r11,%rdi,4), %r9d;\
movzx b ## B, %edi;\
mov s3(%r11,%rdi,4),%r8d;\
movzx b ## H, %edi;\
ror $16, b ## D;\
xor (%r11,%rdi,4), %r8d;\
movzx a ## H, %edi;\
mov b ## D, %r10d;\
shl $32, %r10;\
xor a, %r10;\
ror $16, a ## D;\
xor s1(%r11,%rdi,4),%r9d;\
movzx b ## B, %edi;\
xor s1(%r11,%rdi,4),%r8d;\
movzx a ## B, %edi;\
xor s2(%r11,%rdi,4),%r9d;\
movzx b ## H, %edi;\
xor s2(%r11,%rdi,4),%r8d;\
movzx a ## H, %edi;\
xor s3(%r11,%rdi,4),%r9d;\
add %r8d, %r9d;\
add %r9d, %r8d;\
add k+round(%r11), %r9d;\
xor %r9d, c ## D;\
add k+4+round(%r11),%r8d;\
xor %r8d, d ## D;\
ror $1, d ## D;
.align 8
.global twofish_enc_blk
.global twofish_dec_blk
twofish_enc_blk:
pushq R1
/* %rdi contains the crypto tfm adress */
/* %rsi contains the output adress */
/* %rdx contains the input adress */
add $crypto_tfm_ctx_offset, %rdi /* set ctx adress */
/* ctx adress is moved to free one non-rex register
as target for the 8bit high operations */
mov %rdi, %r11
movq (R3), R1
movq 8(R3), R3
input_whitening(R1,%r11,a_offset)
input_whitening(R3,%r11,c_offset)
mov R1D, R0D
rol $16, R0D
shr $32, R1
mov R3D, R2D
shr $32, R3
rol $1, R3D
encrypt_round(R0,R1,R2,R3,0);
encrypt_round(R2,R3,R0,R1,8);
encrypt_round(R0,R1,R2,R3,2*8);
encrypt_round(R2,R3,R0,R1,3*8);
encrypt_round(R0,R1,R2,R3,4*8);
encrypt_round(R2,R3,R0,R1,5*8);
encrypt_round(R0,R1,R2,R3,6*8);
encrypt_round(R2,R3,R0,R1,7*8);
encrypt_round(R0,R1,R2,R3,8*8);
encrypt_round(R2,R3,R0,R1,9*8);
encrypt_round(R0,R1,R2,R3,10*8);
encrypt_round(R2,R3,R0,R1,11*8);
encrypt_round(R0,R1,R2,R3,12*8);
encrypt_round(R2,R3,R0,R1,13*8);
encrypt_round(R0,R1,R2,R3,14*8);
encrypt_last_round(R2,R3,R0,R1,15*8);
output_whitening(%r10,%r11,a_offset)
movq %r10, (%rsi)
shl $32, R1
xor R0, R1
output_whitening(R1,%r11,c_offset)
movq R1, 8(%rsi)
popq R1
movq $1,%rax
ret
twofish_dec_blk:
pushq R1
/* %rdi contains the crypto tfm adress */
/* %rsi contains the output adress */
/* %rdx contains the input adress */
add $crypto_tfm_ctx_offset, %rdi /* set ctx adress */
/* ctx adress is moved to free one non-rex register
as target for the 8bit high operations */
mov %rdi, %r11
movq (R3), R1
movq 8(R3), R3
output_whitening(R1,%r11,a_offset)
output_whitening(R3,%r11,c_offset)
mov R1D, R0D
shr $32, R1
rol $16, R1D
mov R3D, R2D
shr $32, R3
rol $1, R2D
decrypt_round(R0,R1,R2,R3,15*8);
decrypt_round(R2,R3,R0,R1,14*8);
decrypt_round(R0,R1,R2,R3,13*8);
decrypt_round(R2,R3,R0,R1,12*8);
decrypt_round(R0,R1,R2,R3,11*8);
decrypt_round(R2,R3,R0,R1,10*8);
decrypt_round(R0,R1,R2,R3,9*8);
decrypt_round(R2,R3,R0,R1,8*8);
decrypt_round(R0,R1,R2,R3,7*8);
decrypt_round(R2,R3,R0,R1,6*8);
decrypt_round(R0,R1,R2,R3,5*8);
decrypt_round(R2,R3,R0,R1,4*8);
decrypt_round(R0,R1,R2,R3,3*8);
decrypt_round(R2,R3,R0,R1,2*8);
decrypt_round(R0,R1,R2,R3,1*8);
decrypt_last_round(R2,R3,R0,R1,0);
input_whitening(%r10,%r11,a_offset)
movq %r10, (%rsi)
shl $32, R1
xor R0, R1
input_whitening(R1,%r11,c_offset)
movq R1, 8(%rsi)
popq R1
movq $1,%rax
ret

View File

@ -0,0 +1,97 @@
/*
* Glue Code for optimized x86_64 assembler version of TWOFISH
*
* Originally Twofish for GPG
* By Matthew Skala <mskala@ansuz.sooke.bc.ca>, July 26, 1998
* 256-bit key length added March 20, 1999
* Some modifications to reduce the text size by Werner Koch, April, 1998
* Ported to the kerneli patch by Marc Mutz <Marc@Mutz.com>
* Ported to CryptoAPI by Colin Slater <hoho@tacomeat.net>
*
* The original author has disclaimed all copyright interest in this
* code and thus put it in the public domain. The subsequent authors
* have put this under the GNU General Public License.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
* This code is a "clean room" implementation, written from the paper
* _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
* Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available
* through http://www.counterpane.com/twofish.html
*
* For background information on multiplication in finite fields, used for
* the matrix operations in the key schedule, see the book _Contemporary
* Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the
* Third Edition.
*/
#include <crypto/twofish.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
asmlinkage void twofish_enc_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
asmlinkage void twofish_dec_blk(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
static void twofish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
twofish_enc_blk(tfm, dst, src);
}
static void twofish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
twofish_dec_blk(tfm, dst, src);
}
static struct crypto_alg alg = {
.cra_name = "twofish",
.cra_driver_name = "twofish-x86_64",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct twofish_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_u = {
.cipher = {
.cia_min_keysize = TF_MIN_KEY_SIZE,
.cia_max_keysize = TF_MAX_KEY_SIZE,
.cia_setkey = twofish_setkey,
.cia_encrypt = twofish_encrypt,
.cia_decrypt = twofish_decrypt
}
}
};
static int __init init(void)
{
return crypto_register_alg(&alg);
}
static void __exit fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(init);
module_exit(fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION ("Twofish Cipher Algorithm, x86_64 asm optimized");
MODULE_ALIAS("twofish");

View File

@ -9,47 +9,71 @@ config CRYPTO
help
This option provides the core Cryptographic API.
if CRYPTO
config CRYPTO_ALGAPI
tristate
help
This option provides the API for cryptographic algorithms.
config CRYPTO_BLKCIPHER
tristate
select CRYPTO_ALGAPI
config CRYPTO_HASH
tristate
select CRYPTO_ALGAPI
config CRYPTO_MANAGER
tristate "Cryptographic algorithm manager"
select CRYPTO_ALGAPI
default m
help
Create default cryptographic template instantiations such as
cbc(aes).
config CRYPTO_HMAC
bool "HMAC support"
depends on CRYPTO
tristate "HMAC support"
select CRYPTO_HASH
help
HMAC: Keyed-Hashing for Message Authentication (RFC2104).
This is required for IPSec.
config CRYPTO_NULL
tristate "Null algorithms"
depends on CRYPTO
select CRYPTO_ALGAPI
help
These are 'Null' algorithms, used by IPsec, which do nothing.
config CRYPTO_MD4
tristate "MD4 digest algorithm"
depends on CRYPTO
select CRYPTO_ALGAPI
help
MD4 message digest algorithm (RFC1320).
config CRYPTO_MD5
tristate "MD5 digest algorithm"
depends on CRYPTO
select CRYPTO_ALGAPI
help
MD5 message digest algorithm (RFC1321).
config CRYPTO_SHA1
tristate "SHA1 digest algorithm"
depends on CRYPTO
select CRYPTO_ALGAPI
help
SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
config CRYPTO_SHA1_S390
tristate "SHA1 digest algorithm (s390)"
depends on CRYPTO && S390
depends on S390
select CRYPTO_ALGAPI
help
This is the s390 hardware accelerated implementation of the
SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
config CRYPTO_SHA256
tristate "SHA256 digest algorithm"
depends on CRYPTO
select CRYPTO_ALGAPI
help
SHA256 secure hash standard (DFIPS 180-2).
@ -58,7 +82,8 @@ config CRYPTO_SHA256
config CRYPTO_SHA256_S390
tristate "SHA256 digest algorithm (s390)"
depends on CRYPTO && S390
depends on S390
select CRYPTO_ALGAPI
help
This is the s390 hardware accelerated implementation of the
SHA256 secure hash standard (DFIPS 180-2).
@ -68,7 +93,7 @@ config CRYPTO_SHA256_S390
config CRYPTO_SHA512
tristate "SHA384 and SHA512 digest algorithms"
depends on CRYPTO
select CRYPTO_ALGAPI
help
SHA512 secure hash standard (DFIPS 180-2).
@ -80,7 +105,7 @@ config CRYPTO_SHA512
config CRYPTO_WP512
tristate "Whirlpool digest algorithms"
depends on CRYPTO
select CRYPTO_ALGAPI
help
Whirlpool hash algorithm 512, 384 and 256-bit hashes
@ -92,7 +117,7 @@ config CRYPTO_WP512
config CRYPTO_TGR192
tristate "Tiger digest algorithms"
depends on CRYPTO
select CRYPTO_ALGAPI
help
Tiger hash algorithm 192, 160 and 128-bit hashes
@ -103,21 +128,40 @@ config CRYPTO_TGR192
See also:
<http://www.cs.technion.ac.il/~biham/Reports/Tiger/>.
config CRYPTO_ECB
tristate "ECB support"
select CRYPTO_BLKCIPHER
default m
help
ECB: Electronic CodeBook mode
This is the simplest block cipher algorithm. It simply encrypts
the input block by block.
config CRYPTO_CBC
tristate "CBC support"
select CRYPTO_BLKCIPHER
default m
help
CBC: Cipher Block Chaining mode
This block cipher algorithm is required for IPSec.
config CRYPTO_DES
tristate "DES and Triple DES EDE cipher algorithms"
depends on CRYPTO
select CRYPTO_ALGAPI
help
DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
config CRYPTO_DES_S390
tristate "DES and Triple DES cipher algorithms (s390)"
depends on CRYPTO && S390
depends on S390
select CRYPTO_ALGAPI
select CRYPTO_BLKCIPHER
help
DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
config CRYPTO_BLOWFISH
tristate "Blowfish cipher algorithm"
depends on CRYPTO
select CRYPTO_ALGAPI
help
Blowfish cipher algorithm, by Bruce Schneier.
@ -130,7 +174,8 @@ config CRYPTO_BLOWFISH
config CRYPTO_TWOFISH
tristate "Twofish cipher algorithm"
depends on CRYPTO
select CRYPTO_ALGAPI
select CRYPTO_TWOFISH_COMMON
help
Twofish cipher algorithm.
@ -142,9 +187,47 @@ config CRYPTO_TWOFISH
See also:
<http://www.schneier.com/twofish.html>
config CRYPTO_TWOFISH_COMMON
tristate
help
Common parts of the Twofish cipher algorithm shared by the
generic c and the assembler implementations.
config CRYPTO_TWOFISH_586
tristate "Twofish cipher algorithms (i586)"
depends on (X86 || UML_X86) && !64BIT
select CRYPTO_ALGAPI
select CRYPTO_TWOFISH_COMMON
help
Twofish cipher algorithm.
Twofish was submitted as an AES (Advanced Encryption Standard)
candidate cipher by researchers at CounterPane Systems. It is a
16 round block cipher supporting key sizes of 128, 192, and 256
bits.
See also:
<http://www.schneier.com/twofish.html>
config CRYPTO_TWOFISH_X86_64
tristate "Twofish cipher algorithm (x86_64)"
depends on (X86 || UML_X86) && 64BIT
select CRYPTO_ALGAPI
select CRYPTO_TWOFISH_COMMON
help
Twofish cipher algorithm (x86_64).
Twofish was submitted as an AES (Advanced Encryption Standard)
candidate cipher by researchers at CounterPane Systems. It is a
16 round block cipher supporting key sizes of 128, 192, and 256
bits.
See also:
<http://www.schneier.com/twofish.html>
config CRYPTO_SERPENT
tristate "Serpent cipher algorithm"
depends on CRYPTO
select CRYPTO_ALGAPI
help
Serpent cipher algorithm, by Anderson, Biham & Knudsen.
@ -157,7 +240,7 @@ config CRYPTO_SERPENT
config CRYPTO_AES
tristate "AES cipher algorithms"
depends on CRYPTO
select CRYPTO_ALGAPI
help
AES cipher algorithms (FIPS-197). AES uses the Rijndael
algorithm.
@ -177,7 +260,8 @@ config CRYPTO_AES
config CRYPTO_AES_586
tristate "AES cipher algorithms (i586)"
depends on CRYPTO && ((X86 || UML_X86) && !64BIT)
depends on (X86 || UML_X86) && !64BIT
select CRYPTO_ALGAPI
help
AES cipher algorithms (FIPS-197). AES uses the Rijndael
algorithm.
@ -197,7 +281,8 @@ config CRYPTO_AES_586
config CRYPTO_AES_X86_64
tristate "AES cipher algorithms (x86_64)"
depends on CRYPTO && ((X86 || UML_X86) && 64BIT)
depends on (X86 || UML_X86) && 64BIT
select CRYPTO_ALGAPI
help
AES cipher algorithms (FIPS-197). AES uses the Rijndael
algorithm.
@ -217,7 +302,9 @@ config CRYPTO_AES_X86_64
config CRYPTO_AES_S390
tristate "AES cipher algorithms (s390)"
depends on CRYPTO && S390
depends on S390
select CRYPTO_ALGAPI
select CRYPTO_BLKCIPHER
help
This is the s390 hardware accelerated implementation of the
AES cipher algorithms (FIPS-197). AES uses the Rijndael
@ -237,21 +324,21 @@ config CRYPTO_AES_S390
config CRYPTO_CAST5
tristate "CAST5 (CAST-128) cipher algorithm"
depends on CRYPTO
select CRYPTO_ALGAPI
help
The CAST5 encryption algorithm (synonymous with CAST-128) is
described in RFC2144.
config CRYPTO_CAST6
tristate "CAST6 (CAST-256) cipher algorithm"
depends on CRYPTO
select CRYPTO_ALGAPI
help
The CAST6 encryption algorithm (synonymous with CAST-256) is
described in RFC2612.
config CRYPTO_TEA
tristate "TEA, XTEA and XETA cipher algorithms"
depends on CRYPTO
select CRYPTO_ALGAPI
help
TEA cipher algorithm.
@ -268,7 +355,7 @@ config CRYPTO_TEA
config CRYPTO_ARC4
tristate "ARC4 cipher algorithm"
depends on CRYPTO
select CRYPTO_ALGAPI
help
ARC4 cipher algorithm.
@ -279,7 +366,7 @@ config CRYPTO_ARC4
config CRYPTO_KHAZAD
tristate "Khazad cipher algorithm"
depends on CRYPTO
select CRYPTO_ALGAPI
help
Khazad cipher algorithm.
@ -292,7 +379,7 @@ config CRYPTO_KHAZAD
config CRYPTO_ANUBIS
tristate "Anubis cipher algorithm"
depends on CRYPTO
select CRYPTO_ALGAPI
help
Anubis cipher algorithm.
@ -307,7 +394,7 @@ config CRYPTO_ANUBIS
config CRYPTO_DEFLATE
tristate "Deflate compression algorithm"
depends on CRYPTO
select CRYPTO_ALGAPI
select ZLIB_INFLATE
select ZLIB_DEFLATE
help
@ -318,7 +405,7 @@ config CRYPTO_DEFLATE
config CRYPTO_MICHAEL_MIC
tristate "Michael MIC keyed digest algorithm"
depends on CRYPTO
select CRYPTO_ALGAPI
help
Michael MIC is used for message integrity protection in TKIP
(IEEE 802.11i). This algorithm is required for TKIP, but it
@ -327,7 +414,7 @@ config CRYPTO_MICHAEL_MIC
config CRYPTO_CRC32C
tristate "CRC32c CRC algorithm"
depends on CRYPTO
select CRYPTO_ALGAPI
select LIBCRC32C
help
Castagnoli, et al Cyclic Redundancy-Check Algorithm. Used
@ -337,10 +424,13 @@ config CRYPTO_CRC32C
config CRYPTO_TEST
tristate "Testing module"
depends on CRYPTO && m
depends on m
select CRYPTO_ALGAPI
help
Quick & dirty crypto test module.
source "drivers/crypto/Kconfig"
endmenu
endif # if CRYPTO
endmenu

View File

@ -2,11 +2,18 @@
# Cryptographic API
#
proc-crypto-$(CONFIG_PROC_FS) = proc.o
obj-$(CONFIG_CRYPTO) += api.o scatterwalk.o cipher.o digest.o compress.o
obj-$(CONFIG_CRYPTO) += api.o scatterwalk.o cipher.o digest.o compress.o \
$(proc-crypto-y)
crypto_algapi-$(CONFIG_PROC_FS) += proc.o
crypto_algapi-objs := algapi.o $(crypto_algapi-y)
obj-$(CONFIG_CRYPTO_ALGAPI) += crypto_algapi.o
obj-$(CONFIG_CRYPTO_BLKCIPHER) += blkcipher.o
crypto_hash-objs := hash.o
obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o
obj-$(CONFIG_CRYPTO_MANAGER) += cryptomgr.o
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
obj-$(CONFIG_CRYPTO_MD4) += md4.o
@ -16,9 +23,12 @@ obj-$(CONFIG_CRYPTO_SHA256) += sha256.o
obj-$(CONFIG_CRYPTO_SHA512) += sha512.o
obj-$(CONFIG_CRYPTO_WP512) += wp512.o
obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
obj-$(CONFIG_CRYPTO_ECB) += ecb.o
obj-$(CONFIG_CRYPTO_CBC) += cbc.o
obj-$(CONFIG_CRYPTO_DES) += des.o
obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o
obj-$(CONFIG_CRYPTO_TWOFISH) += twofish.o
obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o
obj-$(CONFIG_CRYPTO_AES) += aes.o
obj-$(CONFIG_CRYPTO_CAST5) += cast5.o

View File

@ -249,13 +249,14 @@ gen_tabs (void)
}
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len, u32 *flags)
unsigned int key_len)
{
struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
u32 *flags = &tfm->crt_flags;
u32 i, t, u, v, w;
if (key_len != 16 && key_len != 24 && key_len != 32) {
if (key_len % 8) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}

486
crypto/algapi.c Normal file
View File

@ -0,0 +1,486 @@
/*
* Cryptographic API for algorithms (i.e., low-level API).
*
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
#include <linux/string.h>
#include "internal.h"
static LIST_HEAD(crypto_template_list);
void crypto_larval_error(const char *name, u32 type, u32 mask)
{
struct crypto_alg *alg;
down_read(&crypto_alg_sem);
alg = __crypto_alg_lookup(name, type, mask);
up_read(&crypto_alg_sem);
if (alg) {
if (crypto_is_larval(alg)) {
struct crypto_larval *larval = (void *)alg;
complete(&larval->completion);
}
crypto_mod_put(alg);
}
}
EXPORT_SYMBOL_GPL(crypto_larval_error);
static inline int crypto_set_driver_name(struct crypto_alg *alg)
{
static const char suffix[] = "-generic";
char *driver_name = alg->cra_driver_name;
int len;
if (*driver_name)
return 0;
len = strlcpy(driver_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
if (len + sizeof(suffix) > CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
memcpy(driver_name + len, suffix, sizeof(suffix));
return 0;
}
static int crypto_check_alg(struct crypto_alg *alg)
{
if (alg->cra_alignmask & (alg->cra_alignmask + 1))
return -EINVAL;
if (alg->cra_alignmask & alg->cra_blocksize)
return -EINVAL;
if (alg->cra_blocksize > PAGE_SIZE / 8)
return -EINVAL;
if (alg->cra_priority < 0)
return -EINVAL;
return crypto_set_driver_name(alg);
}
static void crypto_destroy_instance(struct crypto_alg *alg)
{
struct crypto_instance *inst = (void *)alg;
struct crypto_template *tmpl = inst->tmpl;
tmpl->free(inst);
crypto_tmpl_put(tmpl);
}
static void crypto_remove_spawns(struct list_head *spawns,
struct list_head *list)
{
struct crypto_spawn *spawn, *n;
list_for_each_entry_safe(spawn, n, spawns, list) {
struct crypto_instance *inst = spawn->inst;
struct crypto_template *tmpl = inst->tmpl;
list_del_init(&spawn->list);
spawn->alg = NULL;
if (crypto_is_dead(&inst->alg))
continue;
inst->alg.cra_flags |= CRYPTO_ALG_DEAD;
if (!tmpl || !crypto_tmpl_get(tmpl))
continue;
crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, &inst->alg);
list_move(&inst->alg.cra_list, list);
hlist_del(&inst->list);
inst->alg.cra_destroy = crypto_destroy_instance;
if (!list_empty(&inst->alg.cra_users)) {
if (&n->list == spawns)
n = list_entry(inst->alg.cra_users.next,
typeof(*n), list);
__list_splice(&inst->alg.cra_users, spawns->prev);
}
}
}
static int __crypto_register_alg(struct crypto_alg *alg,
struct list_head *list)
{
struct crypto_alg *q;
int ret = -EAGAIN;
if (crypto_is_dead(alg))
goto out;
INIT_LIST_HEAD(&alg->cra_users);
ret = -EEXIST;
atomic_set(&alg->cra_refcnt, 1);
list_for_each_entry(q, &crypto_alg_list, cra_list) {
if (q == alg)
goto out;
if (crypto_is_moribund(q))
continue;
if (crypto_is_larval(q)) {
struct crypto_larval *larval = (void *)q;
if (strcmp(alg->cra_name, q->cra_name) &&
strcmp(alg->cra_driver_name, q->cra_name))
continue;
if (larval->adult)
continue;
if ((q->cra_flags ^ alg->cra_flags) & larval->mask)
continue;
if (!crypto_mod_get(alg))
continue;
larval->adult = alg;
complete(&larval->completion);
continue;
}
if (strcmp(alg->cra_name, q->cra_name))
continue;
if (strcmp(alg->cra_driver_name, q->cra_driver_name) &&
q->cra_priority > alg->cra_priority)
continue;
crypto_remove_spawns(&q->cra_users, list);
}
list_add(&alg->cra_list, &crypto_alg_list);
crypto_notify(CRYPTO_MSG_ALG_REGISTER, alg);
ret = 0;
out:
return ret;
}
static void crypto_remove_final(struct list_head *list)
{
struct crypto_alg *alg;
struct crypto_alg *n;
list_for_each_entry_safe(alg, n, list, cra_list) {
list_del_init(&alg->cra_list);
crypto_alg_put(alg);
}
}
int crypto_register_alg(struct crypto_alg *alg)
{
LIST_HEAD(list);
int err;
err = crypto_check_alg(alg);
if (err)
return err;
down_write(&crypto_alg_sem);
err = __crypto_register_alg(alg, &list);
up_write(&crypto_alg_sem);
crypto_remove_final(&list);
return err;
}
EXPORT_SYMBOL_GPL(crypto_register_alg);
static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list)
{
if (unlikely(list_empty(&alg->cra_list)))
return -ENOENT;
alg->cra_flags |= CRYPTO_ALG_DEAD;
crypto_notify(CRYPTO_MSG_ALG_UNREGISTER, alg);
list_del_init(&alg->cra_list);
crypto_remove_spawns(&alg->cra_users, list);
return 0;
}
int crypto_unregister_alg(struct crypto_alg *alg)
{
int ret;
LIST_HEAD(list);
down_write(&crypto_alg_sem);
ret = crypto_remove_alg(alg, &list);
up_write(&crypto_alg_sem);
if (ret)
return ret;
BUG_ON(atomic_read(&alg->cra_refcnt) != 1);
if (alg->cra_destroy)
alg->cra_destroy(alg);
crypto_remove_final(&list);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_unregister_alg);
int crypto_register_template(struct crypto_template *tmpl)
{
struct crypto_template *q;
int err = -EEXIST;
down_write(&crypto_alg_sem);
list_for_each_entry(q, &crypto_template_list, list) {
if (q == tmpl)
goto out;
}
list_add(&tmpl->list, &crypto_template_list);
crypto_notify(CRYPTO_MSG_TMPL_REGISTER, tmpl);
err = 0;
out:
up_write(&crypto_alg_sem);
return err;
}
EXPORT_SYMBOL_GPL(crypto_register_template);
void crypto_unregister_template(struct crypto_template *tmpl)
{
struct crypto_instance *inst;
struct hlist_node *p, *n;
struct hlist_head *list;
LIST_HEAD(users);
down_write(&crypto_alg_sem);
BUG_ON(list_empty(&tmpl->list));
list_del_init(&tmpl->list);
list = &tmpl->instances;
hlist_for_each_entry(inst, p, list, list) {
int err = crypto_remove_alg(&inst->alg, &users);
BUG_ON(err);
}
crypto_notify(CRYPTO_MSG_TMPL_UNREGISTER, tmpl);
up_write(&crypto_alg_sem);
hlist_for_each_entry_safe(inst, p, n, list, list) {
BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1);
tmpl->free(inst);
}
crypto_remove_final(&users);
}
EXPORT_SYMBOL_GPL(crypto_unregister_template);
static struct crypto_template *__crypto_lookup_template(const char *name)
{
struct crypto_template *q, *tmpl = NULL;
down_read(&crypto_alg_sem);
list_for_each_entry(q, &crypto_template_list, list) {
if (strcmp(q->name, name))
continue;
if (unlikely(!crypto_tmpl_get(q)))
continue;
tmpl = q;
break;
}
up_read(&crypto_alg_sem);
return tmpl;
}
struct crypto_template *crypto_lookup_template(const char *name)
{
return try_then_request_module(__crypto_lookup_template(name), name);
}
EXPORT_SYMBOL_GPL(crypto_lookup_template);
int crypto_register_instance(struct crypto_template *tmpl,
struct crypto_instance *inst)
{
LIST_HEAD(list);
int err = -EINVAL;
if (inst->alg.cra_destroy)
goto err;
err = crypto_check_alg(&inst->alg);
if (err)
goto err;
inst->alg.cra_module = tmpl->module;
down_write(&crypto_alg_sem);
err = __crypto_register_alg(&inst->alg, &list);
if (err)
goto unlock;
hlist_add_head(&inst->list, &tmpl->instances);
inst->tmpl = tmpl;
unlock:
up_write(&crypto_alg_sem);
crypto_remove_final(&list);
err:
return err;
}
EXPORT_SYMBOL_GPL(crypto_register_instance);
int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
struct crypto_instance *inst)
{
int err = -EAGAIN;
spawn->inst = inst;
down_write(&crypto_alg_sem);
if (!crypto_is_moribund(alg)) {
list_add(&spawn->list, &alg->cra_users);
spawn->alg = alg;
err = 0;
}
up_write(&crypto_alg_sem);
return err;
}
EXPORT_SYMBOL_GPL(crypto_init_spawn);
void crypto_drop_spawn(struct crypto_spawn *spawn)
{
down_write(&crypto_alg_sem);
list_del(&spawn->list);
up_write(&crypto_alg_sem);
}
EXPORT_SYMBOL_GPL(crypto_drop_spawn);
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn)
{
struct crypto_alg *alg;
struct crypto_alg *alg2;
struct crypto_tfm *tfm;
down_read(&crypto_alg_sem);
alg = spawn->alg;
alg2 = alg;
if (alg2)
alg2 = crypto_mod_get(alg2);
up_read(&crypto_alg_sem);
if (!alg2) {
if (alg)
crypto_shoot_alg(alg);
return ERR_PTR(-EAGAIN);
}
tfm = __crypto_alloc_tfm(alg, 0);
if (IS_ERR(tfm))
crypto_mod_put(alg);
return tfm;
}
EXPORT_SYMBOL_GPL(crypto_spawn_tfm);
int crypto_register_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&crypto_chain, nb);
}
EXPORT_SYMBOL_GPL(crypto_register_notifier);
int crypto_unregister_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&crypto_chain, nb);
}
EXPORT_SYMBOL_GPL(crypto_unregister_notifier);
struct crypto_alg *crypto_get_attr_alg(void *param, unsigned int len,
u32 type, u32 mask)
{
struct rtattr *rta = param;
struct crypto_attr_alg *alga;
if (!RTA_OK(rta, len))
return ERR_PTR(-EBADR);
if (rta->rta_type != CRYPTOA_ALG || RTA_PAYLOAD(rta) < sizeof(*alga))
return ERR_PTR(-EINVAL);
alga = RTA_DATA(rta);
alga->name[CRYPTO_MAX_ALG_NAME - 1] = 0;
return crypto_alg_mod_lookup(alga->name, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_get_attr_alg);
struct crypto_instance *crypto_alloc_instance(const char *name,
struct crypto_alg *alg)
{
struct crypto_instance *inst;
struct crypto_spawn *spawn;
int err;
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return ERR_PTR(-ENOMEM);
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name,
alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
spawn = crypto_instance_ctx(inst);
err = crypto_init_spawn(spawn, alg, inst);
if (err)
goto err_free_inst;
return inst;
err_free_inst:
kfree(inst);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(crypto_alloc_instance);
static int __init crypto_algapi_init(void)
{
crypto_init_proc();
return 0;
}
static void __exit crypto_algapi_exit(void)
{
crypto_exit_proc();
}
module_init(crypto_algapi_init);
module_exit(crypto_algapi_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cryptographic algorithms API");

View File

@ -461,10 +461,11 @@ static const u32 rc[] = {
};
static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len, u32 *flags)
unsigned int key_len)
{
struct anubis_ctx *ctx = crypto_tfm_ctx(tfm);
const __be32 *key = (const __be32 *)in_key;
u32 *flags = &tfm->crt_flags;
int N, R, i, r;
u32 kappa[ANUBIS_MAX_N];
u32 inter[ANUBIS_MAX_N];

View File

@ -15,71 +15,203 @@
*
*/
#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/rwsem.h>
#include <linux/module.h>
#include <linux/param.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "internal.h"
LIST_HEAD(crypto_alg_list);
EXPORT_SYMBOL_GPL(crypto_alg_list);
DECLARE_RWSEM(crypto_alg_sem);
EXPORT_SYMBOL_GPL(crypto_alg_sem);
static inline int crypto_alg_get(struct crypto_alg *alg)
BLOCKING_NOTIFIER_HEAD(crypto_chain);
EXPORT_SYMBOL_GPL(crypto_chain);
static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
{
return try_module_get(alg->cra_module);
atomic_inc(&alg->cra_refcnt);
return alg;
}
static inline void crypto_alg_put(struct crypto_alg *alg)
struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
{
return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL;
}
EXPORT_SYMBOL_GPL(crypto_mod_get);
void crypto_mod_put(struct crypto_alg *alg)
{
crypto_alg_put(alg);
module_put(alg->cra_module);
}
EXPORT_SYMBOL_GPL(crypto_mod_put);
static struct crypto_alg *crypto_alg_lookup(const char *name)
struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask)
{
struct crypto_alg *q, *alg = NULL;
int best = -1;
int best = -2;
if (!name)
return NULL;
down_read(&crypto_alg_sem);
list_for_each_entry(q, &crypto_alg_list, cra_list) {
int exact, fuzzy;
if (crypto_is_moribund(q))
continue;
if ((q->cra_flags ^ type) & mask)
continue;
if (crypto_is_larval(q) &&
((struct crypto_larval *)q)->mask != mask)
continue;
exact = !strcmp(q->cra_driver_name, name);
fuzzy = !strcmp(q->cra_name, name);
if (!exact && !(fuzzy && q->cra_priority > best))
continue;
if (unlikely(!crypto_alg_get(q)))
if (unlikely(!crypto_mod_get(q)))
continue;
best = q->cra_priority;
if (alg)
crypto_alg_put(alg);
crypto_mod_put(alg);
alg = q;
if (exact)
break;
}
up_read(&crypto_alg_sem);
return alg;
}
EXPORT_SYMBOL_GPL(__crypto_alg_lookup);
static void crypto_larval_destroy(struct crypto_alg *alg)
{
struct crypto_larval *larval = (void *)alg;
BUG_ON(!crypto_is_larval(alg));
if (larval->adult)
crypto_mod_put(larval->adult);
kfree(larval);
}
static struct crypto_alg *crypto_larval_alloc(const char *name, u32 type,
u32 mask)
{
struct crypto_alg *alg;
struct crypto_larval *larval;
larval = kzalloc(sizeof(*larval), GFP_KERNEL);
if (!larval)
return ERR_PTR(-ENOMEM);
larval->mask = mask;
larval->alg.cra_flags = CRYPTO_ALG_LARVAL | type;
larval->alg.cra_priority = -1;
larval->alg.cra_destroy = crypto_larval_destroy;
atomic_set(&larval->alg.cra_refcnt, 2);
strlcpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME);
init_completion(&larval->completion);
down_write(&crypto_alg_sem);
alg = __crypto_alg_lookup(name, type, mask);
if (!alg) {
alg = &larval->alg;
list_add(&alg->cra_list, &crypto_alg_list);
}
up_write(&crypto_alg_sem);
if (alg != &larval->alg)
kfree(larval);
return alg;
}
/* A far more intelligent version of this is planned. For now, just
* try an exact match on the name of the algorithm. */
static inline struct crypto_alg *crypto_alg_mod_lookup(const char *name)
static void crypto_larval_kill(struct crypto_alg *alg)
{
return try_then_request_module(crypto_alg_lookup(name), name);
struct crypto_larval *larval = (void *)alg;
down_write(&crypto_alg_sem);
list_del(&alg->cra_list);
up_write(&crypto_alg_sem);
complete(&larval->completion);
crypto_alg_put(alg);
}
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
{
struct crypto_larval *larval = (void *)alg;
wait_for_completion_interruptible_timeout(&larval->completion, 60 * HZ);
alg = larval->adult;
if (alg) {
if (!crypto_mod_get(alg))
alg = ERR_PTR(-EAGAIN);
} else
alg = ERR_PTR(-ENOENT);
crypto_mod_put(&larval->alg);
return alg;
}
static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
u32 mask)
{
struct crypto_alg *alg;
down_read(&crypto_alg_sem);
alg = __crypto_alg_lookup(name, type, mask);
up_read(&crypto_alg_sem);
return alg;
}
struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
{
struct crypto_alg *alg;
struct crypto_alg *larval;
int ok;
if (!name)
return ERR_PTR(-ENOENT);
mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
type &= mask;
alg = try_then_request_module(crypto_alg_lookup(name, type, mask),
name);
if (alg)
return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg;
larval = crypto_larval_alloc(name, type, mask);
if (IS_ERR(larval) || !crypto_is_larval(larval))
return larval;
ok = crypto_notify(CRYPTO_MSG_ALG_REQUEST, larval);
if (ok == NOTIFY_DONE) {
request_module("cryptomgr");
ok = crypto_notify(CRYPTO_MSG_ALG_REQUEST, larval);
}
if (ok == NOTIFY_STOP)
alg = crypto_larval_wait(larval);
else {
crypto_mod_put(larval);
alg = ERR_PTR(-ENOENT);
}
crypto_larval_kill(larval);
return alg;
}
EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup);
static int crypto_init_flags(struct crypto_tfm *tfm, u32 flags)
{
tfm->crt_flags = flags & CRYPTO_TFM_REQ_MASK;
@ -94,17 +226,18 @@ static int crypto_init_flags(struct crypto_tfm *tfm, u32 flags)
case CRYPTO_ALG_TYPE_COMPRESS:
return crypto_init_compress_flags(tfm, flags);
default:
break;
}
BUG();
return -EINVAL;
return 0;
}
static int crypto_init_ops(struct crypto_tfm *tfm)
{
const struct crypto_type *type = tfm->__crt_alg->cra_type;
if (type)
return type->init(tfm);
switch (crypto_tfm_alg_type(tfm)) {
case CRYPTO_ALG_TYPE_CIPHER:
return crypto_init_cipher_ops(tfm);
@ -125,6 +258,14 @@ static int crypto_init_ops(struct crypto_tfm *tfm)
static void crypto_exit_ops(struct crypto_tfm *tfm)
{
const struct crypto_type *type = tfm->__crt_alg->cra_type;
if (type) {
if (type->exit)
type->exit(tfm);
return;
}
switch (crypto_tfm_alg_type(tfm)) {
case CRYPTO_ALG_TYPE_CIPHER:
crypto_exit_cipher_ops(tfm);
@ -146,53 +287,67 @@ static void crypto_exit_ops(struct crypto_tfm *tfm)
static unsigned int crypto_ctxsize(struct crypto_alg *alg, int flags)
{
const struct crypto_type *type = alg->cra_type;
unsigned int len;
len = alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1);
if (type)
return len + type->ctxsize(alg);
switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
default:
BUG();
case CRYPTO_ALG_TYPE_CIPHER:
len = crypto_cipher_ctxsize(alg, flags);
len += crypto_cipher_ctxsize(alg, flags);
break;
case CRYPTO_ALG_TYPE_DIGEST:
len = crypto_digest_ctxsize(alg, flags);
len += crypto_digest_ctxsize(alg, flags);
break;
case CRYPTO_ALG_TYPE_COMPRESS:
len = crypto_compress_ctxsize(alg, flags);
len += crypto_compress_ctxsize(alg, flags);
break;
}
return len + (alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1));
return len;
}
struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags)
void crypto_shoot_alg(struct crypto_alg *alg)
{
down_write(&crypto_alg_sem);
alg->cra_flags |= CRYPTO_ALG_DYING;
up_write(&crypto_alg_sem);
}
EXPORT_SYMBOL_GPL(crypto_shoot_alg);
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 flags)
{
struct crypto_tfm *tfm = NULL;
struct crypto_alg *alg;
unsigned int tfm_size;
alg = crypto_alg_mod_lookup(name);
if (alg == NULL)
goto out;
int err = -ENOMEM;
tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, flags);
tfm = kzalloc(tfm_size, GFP_KERNEL);
if (tfm == NULL)
goto out_put;
goto out;
tfm->__crt_alg = alg;
if (crypto_init_flags(tfm, flags))
err = crypto_init_flags(tfm, flags);
if (err)
goto out_free_tfm;
if (crypto_init_ops(tfm))
err = crypto_init_ops(tfm);
if (err)
goto out_free_tfm;
if (alg->cra_init && alg->cra_init(tfm))
if (alg->cra_init && (err = alg->cra_init(tfm))) {
if (err == -EAGAIN)
crypto_shoot_alg(alg);
goto cra_init_failed;
}
goto out;
@ -200,13 +355,97 @@ cra_init_failed:
crypto_exit_ops(tfm);
out_free_tfm:
kfree(tfm);
tfm = NULL;
out_put:
crypto_alg_put(alg);
tfm = ERR_PTR(err);
out:
return tfm;
}
EXPORT_SYMBOL_GPL(__crypto_alloc_tfm);
struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags)
{
struct crypto_tfm *tfm = NULL;
int err;
do {
struct crypto_alg *alg;
alg = crypto_alg_mod_lookup(name, 0, CRYPTO_ALG_ASYNC);
err = PTR_ERR(alg);
if (IS_ERR(alg))
continue;
tfm = __crypto_alloc_tfm(alg, flags);
err = 0;
if (IS_ERR(tfm)) {
crypto_mod_put(alg);
err = PTR_ERR(tfm);
tfm = NULL;
}
} while (err == -EAGAIN && !signal_pending(current));
return tfm;
}
/*
* crypto_alloc_base - Locate algorithm and allocate transform
* @alg_name: Name of algorithm
* @type: Type of algorithm
* @mask: Mask for type comparison
*
* crypto_alloc_base() will first attempt to locate an already loaded
* algorithm. If that fails and the kernel supports dynamically loadable
* modules, it will then attempt to load a module of the same name or
* alias. If that fails it will send a query to any loaded crypto manager
* to construct an algorithm on the fly. A refcount is grabbed on the
* algorithm which is then associated with the new transform.
*
* The returned transform is of a non-determinate type. Most people
* should use one of the more specific allocation functions such as
* crypto_alloc_blkcipher.
*
* In case of error the return value is an error pointer.
*/
struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask)
{
struct crypto_tfm *tfm;
int err;
for (;;) {
struct crypto_alg *alg;
alg = crypto_alg_mod_lookup(alg_name, type, mask);
err = PTR_ERR(alg);
tfm = ERR_PTR(err);
if (IS_ERR(alg))
goto err;
tfm = __crypto_alloc_tfm(alg, 0);
if (!IS_ERR(tfm))
break;
crypto_mod_put(alg);
err = PTR_ERR(tfm);
err:
if (err != -EAGAIN)
break;
if (signal_pending(current)) {
err = -EINTR;
break;
}
};
return tfm;
}
EXPORT_SYMBOL_GPL(crypto_alloc_base);
/*
* crypto_free_tfm - Free crypto transform
* @tfm: Transform to free
*
* crypto_free_tfm() frees up the transform and any associated resources,
* then drops the refcount on the associated algorithm.
*/
void crypto_free_tfm(struct crypto_tfm *tfm)
{
struct crypto_alg *alg;
@ -221,108 +460,39 @@ void crypto_free_tfm(struct crypto_tfm *tfm)
if (alg->cra_exit)
alg->cra_exit(tfm);
crypto_exit_ops(tfm);
crypto_alg_put(alg);
crypto_mod_put(alg);
memset(tfm, 0, size);
kfree(tfm);
}
static inline int crypto_set_driver_name(struct crypto_alg *alg)
{
static const char suffix[] = "-generic";
char *driver_name = alg->cra_driver_name;
int len;
if (*driver_name)
return 0;
len = strlcpy(driver_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
if (len + sizeof(suffix) > CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
memcpy(driver_name + len, suffix, sizeof(suffix));
return 0;
}
int crypto_register_alg(struct crypto_alg *alg)
{
int ret;
struct crypto_alg *q;
if (alg->cra_alignmask & (alg->cra_alignmask + 1))
return -EINVAL;
if (alg->cra_alignmask & alg->cra_blocksize)
return -EINVAL;
if (alg->cra_blocksize > PAGE_SIZE / 8)
return -EINVAL;
if (alg->cra_priority < 0)
return -EINVAL;
ret = crypto_set_driver_name(alg);
if (unlikely(ret))
return ret;
down_write(&crypto_alg_sem);
list_for_each_entry(q, &crypto_alg_list, cra_list) {
if (q == alg) {
ret = -EEXIST;
goto out;
}
}
list_add(&alg->cra_list, &crypto_alg_list);
out:
up_write(&crypto_alg_sem);
return ret;
}
int crypto_unregister_alg(struct crypto_alg *alg)
{
int ret = -ENOENT;
struct crypto_alg *q;
BUG_ON(!alg->cra_module);
down_write(&crypto_alg_sem);
list_for_each_entry(q, &crypto_alg_list, cra_list) {
if (alg == q) {
list_del(&alg->cra_list);
ret = 0;
goto out;
}
}
out:
up_write(&crypto_alg_sem);
return ret;
}
int crypto_alg_available(const char *name, u32 flags)
{
int ret = 0;
struct crypto_alg *alg = crypto_alg_mod_lookup(name);
struct crypto_alg *alg = crypto_alg_mod_lookup(name, 0,
CRYPTO_ALG_ASYNC);
if (alg) {
crypto_alg_put(alg);
if (!IS_ERR(alg)) {
crypto_mod_put(alg);
ret = 1;
}
return ret;
}
static int __init init_crypto(void)
{
printk(KERN_INFO "Initializing Cryptographic API\n");
crypto_init_proc();
return 0;
}
__initcall(init_crypto);
EXPORT_SYMBOL_GPL(crypto_register_alg);
EXPORT_SYMBOL_GPL(crypto_unregister_alg);
EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
EXPORT_SYMBOL_GPL(crypto_free_tfm);
EXPORT_SYMBOL_GPL(crypto_alg_available);
int crypto_has_alg(const char *name, u32 type, u32 mask)
{
int ret = 0;
struct crypto_alg *alg = crypto_alg_mod_lookup(name, type, mask);
if (!IS_ERR(alg)) {
crypto_mod_put(alg);
ret = 1;
}
return ret;
}
EXPORT_SYMBOL_GPL(crypto_has_alg);

View File

@ -25,7 +25,7 @@ struct arc4_ctx {
};
static int arc4_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len, u32 *flags)
unsigned int key_len)
{
struct arc4_ctx *ctx = crypto_tfm_ctx(tfm);
int i, j = 0, k = 0;

405
crypto/blkcipher.c Normal file
View File

@ -0,0 +1,405 @@
/*
* Block chaining cipher operations.
*
* Generic encrypt/decrypt wrapper for ciphers, handles operations across
* multiple page boundaries by using temporary blocks. In user context,
* the kernel is given a chance to schedule us once per page.
*
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/crypto.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "internal.h"
#include "scatterwalk.h"
enum {
BLKCIPHER_WALK_PHYS = 1 << 0,
BLKCIPHER_WALK_SLOW = 1 << 1,
BLKCIPHER_WALK_COPY = 1 << 2,
BLKCIPHER_WALK_DIFF = 1 << 3,
};
static int blkcipher_walk_next(struct blkcipher_desc *desc,
struct blkcipher_walk *walk);
static int blkcipher_walk_first(struct blkcipher_desc *desc,
struct blkcipher_walk *walk);
static inline void blkcipher_map_src(struct blkcipher_walk *walk)
{
walk->src.virt.addr = scatterwalk_map(&walk->in, 0);
}
static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
{
walk->dst.virt.addr = scatterwalk_map(&walk->out, 1);
}
static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
{
scatterwalk_unmap(walk->src.virt.addr, 0);
}
static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
{
scatterwalk_unmap(walk->dst.virt.addr, 1);
}
static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
{
if (offset_in_page(start + len) < len)
return (u8 *)((unsigned long)(start + len) & PAGE_MASK);
return start;
}
static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm,
struct blkcipher_walk *walk,
unsigned int bsize)
{
u8 *addr;
unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
addr = blkcipher_get_spot(addr, bsize);
scatterwalk_copychunks(addr, &walk->out, bsize, 1);
return bsize;
}
static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
unsigned int n)
{
n = walk->nbytes - n;
if (walk->flags & BLKCIPHER_WALK_COPY) {
blkcipher_map_dst(walk);
memcpy(walk->dst.virt.addr, walk->page, n);
blkcipher_unmap_dst(walk);
} else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
blkcipher_unmap_src(walk);
if (walk->flags & BLKCIPHER_WALK_DIFF)
blkcipher_unmap_dst(walk);
}
scatterwalk_advance(&walk->in, n);
scatterwalk_advance(&walk->out, n);
return n;
}
int blkcipher_walk_done(struct blkcipher_desc *desc,
struct blkcipher_walk *walk, int err)
{
struct crypto_blkcipher *tfm = desc->tfm;
unsigned int nbytes = 0;
if (likely(err >= 0)) {
unsigned int bsize = crypto_blkcipher_blocksize(tfm);
unsigned int n;
if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
n = blkcipher_done_fast(walk, err);
else
n = blkcipher_done_slow(tfm, walk, bsize);
nbytes = walk->total - n;
err = 0;
}
scatterwalk_done(&walk->in, 0, nbytes);
scatterwalk_done(&walk->out, 1, nbytes);
walk->total = nbytes;
walk->nbytes = nbytes;
if (nbytes) {
crypto_yield(desc->flags);
return blkcipher_walk_next(desc, walk);
}
if (walk->iv != desc->info)
memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm));
if (walk->buffer != walk->page)
kfree(walk->buffer);
if (walk->page)
free_page((unsigned long)walk->page);
return err;
}
EXPORT_SYMBOL_GPL(blkcipher_walk_done);
static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
struct blkcipher_walk *walk,
unsigned int bsize,
unsigned int alignmask)
{
unsigned int n;
if (walk->buffer)
goto ok;
walk->buffer = walk->page;
if (walk->buffer)
goto ok;
n = bsize * 2 + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
walk->buffer = kmalloc(n, GFP_ATOMIC);
if (!walk->buffer)
return blkcipher_walk_done(desc, walk, -ENOMEM);
ok:
walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
alignmask + 1);
walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr + bsize,
bsize);
scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
walk->nbytes = bsize;
walk->flags |= BLKCIPHER_WALK_SLOW;
return 0;
}
static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
{
u8 *tmp = walk->page;
blkcipher_map_src(walk);
memcpy(tmp, walk->src.virt.addr, walk->nbytes);
blkcipher_unmap_src(walk);
walk->src.virt.addr = tmp;
walk->dst.virt.addr = tmp;
return 0;
}
static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
unsigned long diff;
walk->src.phys.page = scatterwalk_page(&walk->in);
walk->src.phys.offset = offset_in_page(walk->in.offset);
walk->dst.phys.page = scatterwalk_page(&walk->out);
walk->dst.phys.offset = offset_in_page(walk->out.offset);
if (walk->flags & BLKCIPHER_WALK_PHYS)
return 0;
diff = walk->src.phys.offset - walk->dst.phys.offset;
diff |= walk->src.virt.page - walk->dst.virt.page;
blkcipher_map_src(walk);
walk->dst.virt.addr = walk->src.virt.addr;
if (diff) {
walk->flags |= BLKCIPHER_WALK_DIFF;
blkcipher_map_dst(walk);
}
return 0;
}
static int blkcipher_walk_next(struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
struct crypto_blkcipher *tfm = desc->tfm;
unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
unsigned int bsize = crypto_blkcipher_blocksize(tfm);
unsigned int n;
int err;
n = walk->total;
if (unlikely(n < bsize)) {
desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
return blkcipher_walk_done(desc, walk, -EINVAL);
}
walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
BLKCIPHER_WALK_DIFF);
if (!scatterwalk_aligned(&walk->in, alignmask) ||
!scatterwalk_aligned(&walk->out, alignmask)) {
walk->flags |= BLKCIPHER_WALK_COPY;
if (!walk->page) {
walk->page = (void *)__get_free_page(GFP_ATOMIC);
if (!walk->page)
n = 0;
}
}
n = scatterwalk_clamp(&walk->in, n);
n = scatterwalk_clamp(&walk->out, n);
if (unlikely(n < bsize)) {
err = blkcipher_next_slow(desc, walk, bsize, alignmask);
goto set_phys_lowmem;
}
walk->nbytes = n;
if (walk->flags & BLKCIPHER_WALK_COPY) {
err = blkcipher_next_copy(walk);
goto set_phys_lowmem;
}
return blkcipher_next_fast(desc, walk);
set_phys_lowmem:
if (walk->flags & BLKCIPHER_WALK_PHYS) {
walk->src.phys.page = virt_to_page(walk->src.virt.addr);
walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
walk->src.phys.offset &= PAGE_SIZE - 1;
walk->dst.phys.offset &= PAGE_SIZE - 1;
}
return err;
}
static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
struct crypto_blkcipher *tfm,
unsigned int alignmask)
{
unsigned bs = crypto_blkcipher_blocksize(tfm);
unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
unsigned int size = bs * 2 + ivsize + max(bs, ivsize) - (alignmask + 1);
u8 *iv;
size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
walk->buffer = kmalloc(size, GFP_ATOMIC);
if (!walk->buffer)
return -ENOMEM;
iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
iv = blkcipher_get_spot(iv, bs) + bs;
iv = blkcipher_get_spot(iv, bs) + bs;
iv = blkcipher_get_spot(iv, ivsize);
walk->iv = memcpy(iv, walk->iv, ivsize);
return 0;
}
int blkcipher_walk_virt(struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
walk->flags &= ~BLKCIPHER_WALK_PHYS;
return blkcipher_walk_first(desc, walk);
}
EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
int blkcipher_walk_phys(struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
walk->flags |= BLKCIPHER_WALK_PHYS;
return blkcipher_walk_first(desc, walk);
}
EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
static int blkcipher_walk_first(struct blkcipher_desc *desc,
struct blkcipher_walk *walk)
{
struct crypto_blkcipher *tfm = desc->tfm;
unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
walk->nbytes = walk->total;
if (unlikely(!walk->total))
return 0;
walk->buffer = NULL;
walk->iv = desc->info;
if (unlikely(((unsigned long)walk->iv & alignmask))) {
int err = blkcipher_copy_iv(walk, tfm, alignmask);
if (err)
return err;
}
scatterwalk_start(&walk->in, walk->in.sg);
scatterwalk_start(&walk->out, walk->out.sg);
walk->page = NULL;
return blkcipher_walk_next(desc, walk);
}
static int setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
return cipher->setkey(tfm, key, keylen);
}
static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg)
{
struct blkcipher_alg *cipher = &alg->cra_blkcipher;
unsigned int len = alg->cra_ctxsize;
if (cipher->ivsize) {
len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
len += cipher->ivsize;
}
return len;
}
static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm)
{
struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
unsigned long addr;
if (alg->ivsize > PAGE_SIZE / 8)
return -EINVAL;
crt->setkey = setkey;
crt->encrypt = alg->encrypt;
crt->decrypt = alg->decrypt;
addr = (unsigned long)crypto_tfm_ctx(tfm);
addr = ALIGN(addr, align);
addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
crt->iv = (void *)addr;
return 0;
}
static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
__attribute_used__;
static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
{
seq_printf(m, "type : blkcipher\n");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize);
seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize);
seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize);
}
const struct crypto_type crypto_blkcipher_type = {
.ctxsize = crypto_blkcipher_ctxsize,
.init = crypto_init_blkcipher_ops,
#ifdef CONFIG_PROC_FS
.show = crypto_blkcipher_show,
#endif
};
EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Generic block chaining cipher type");

View File

@ -399,8 +399,7 @@ static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
/*
* Calculates the blowfish S and P boxes for encryption and decryption.
*/
static int bf_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen, u32 *flags)
static int bf_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
{
struct bf_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *P = ctx->p;

View File

@ -769,8 +769,7 @@ static void key_schedule(u32 * x, u32 * z, u32 * k)
}
static int cast5_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned key_len, u32 *flags)
static int cast5_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned key_len)
{
struct cast5_ctx *c = crypto_tfm_ctx(tfm);
int i;
@ -778,11 +777,6 @@ static int cast5_setkey(struct crypto_tfm *tfm, const u8 *key,
u32 z[4];
u32 k[16];
__be32 p_key[4];
if (key_len < 5 || key_len > 16) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
c->rr = key_len <= 10 ? 1 : 0;

View File

@ -382,14 +382,15 @@ static inline void W(u32 *key, unsigned int i) {
}
static int cast6_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned key_len, u32 *flags)
unsigned key_len)
{
int i;
u32 key[8];
__be32 p_key[8]; /* padded key */
struct cast6_ctx *c = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
if (key_len < 16 || key_len > 32 || key_len % 4 != 0) {
if (key_len % 4 != 0) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}

344
crypto/cbc.c Normal file
View File

@ -0,0 +1,344 @@
/*
* CBC: Cipher Block Chaining mode
*
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/algapi.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
struct crypto_cbc_ctx {
struct crypto_cipher *child;
void (*xor)(u8 *dst, const u8 *src, unsigned int bs);
};
static int crypto_cbc_setkey(struct crypto_tfm *parent, const u8 *key,
unsigned int keylen)
{
struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(parent);
struct crypto_cipher *child = ctx->child;
int err;
crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_cipher_setkey(child, key, keylen);
crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
static int crypto_cbc_encrypt_segment(struct blkcipher_desc *desc,
struct blkcipher_walk *walk,
struct crypto_cipher *tfm,
void (*xor)(u8 *, const u8 *,
unsigned int))
{
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
crypto_cipher_alg(tfm)->cia_encrypt;
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 *iv = walk->iv;
do {
xor(iv, src, bsize);
fn(crypto_cipher_tfm(tfm), dst, iv);
memcpy(iv, dst, bsize);
src += bsize;
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
}
static int crypto_cbc_encrypt_inplace(struct blkcipher_desc *desc,
struct blkcipher_walk *walk,
struct crypto_cipher *tfm,
void (*xor)(u8 *, const u8 *,
unsigned int))
{
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
crypto_cipher_alg(tfm)->cia_encrypt;
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *iv = walk->iv;
do {
xor(src, iv, bsize);
fn(crypto_cipher_tfm(tfm), src, src);
iv = src;
src += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}
static int crypto_cbc_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
struct crypto_blkcipher *tfm = desc->tfm;
struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child;
void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
if (walk.src.virt.addr == walk.dst.virt.addr)
nbytes = crypto_cbc_encrypt_inplace(desc, &walk, child,
xor);
else
nbytes = crypto_cbc_encrypt_segment(desc, &walk, child,
xor);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
return err;
}
static int crypto_cbc_decrypt_segment(struct blkcipher_desc *desc,
struct blkcipher_walk *walk,
struct crypto_cipher *tfm,
void (*xor)(u8 *, const u8 *,
unsigned int))
{
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
crypto_cipher_alg(tfm)->cia_decrypt;
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 *iv = walk->iv;
do {
fn(crypto_cipher_tfm(tfm), dst, src);
xor(dst, iv, bsize);
iv = src;
src += bsize;
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}
static int crypto_cbc_decrypt_inplace(struct blkcipher_desc *desc,
struct blkcipher_walk *walk,
struct crypto_cipher *tfm,
void (*xor)(u8 *, const u8 *,
unsigned int))
{
void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
crypto_cipher_alg(tfm)->cia_decrypt;
int bsize = crypto_cipher_blocksize(tfm);
unsigned long alignmask = crypto_cipher_alignmask(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 stack[bsize + alignmask];
u8 *first_iv = (u8 *)ALIGN((unsigned long)stack, alignmask + 1);
memcpy(first_iv, walk->iv, bsize);
/* Start of the last block. */
src += nbytes - nbytes % bsize - bsize;
memcpy(walk->iv, src, bsize);
for (;;) {
fn(crypto_cipher_tfm(tfm), src, src);
if ((nbytes -= bsize) < bsize)
break;
xor(src, src - bsize, bsize);
src -= bsize;
}
xor(src, first_iv, bsize);
return nbytes;
}
static int crypto_cbc_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
struct crypto_blkcipher *tfm = desc->tfm;
struct crypto_cbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child;
void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
if (walk.src.virt.addr == walk.dst.virt.addr)
nbytes = crypto_cbc_decrypt_inplace(desc, &walk, child,
xor);
else
nbytes = crypto_cbc_decrypt_segment(desc, &walk, child,
xor);
err = blkcipher_walk_done(desc, &walk, nbytes);
}
return err;
}
static void xor_byte(u8 *a, const u8 *b, unsigned int bs)
{
do {
*a++ ^= *b++;
} while (--bs);
}
static void xor_quad(u8 *dst, const u8 *src, unsigned int bs)
{
u32 *a = (u32 *)dst;
u32 *b = (u32 *)src;
do {
*a++ ^= *b++;
} while ((bs -= 4));
}
static void xor_64(u8 *a, const u8 *b, unsigned int bs)
{
((u32 *)a)[0] ^= ((u32 *)b)[0];
((u32 *)a)[1] ^= ((u32 *)b)[1];
}
static void xor_128(u8 *a, const u8 *b, unsigned int bs)
{
((u32 *)a)[0] ^= ((u32 *)b)[0];
((u32 *)a)[1] ^= ((u32 *)b)[1];
((u32 *)a)[2] ^= ((u32 *)b)[2];
((u32 *)a)[3] ^= ((u32 *)b)[3];
}
static int crypto_cbc_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
switch (crypto_tfm_alg_blocksize(tfm)) {
case 8:
ctx->xor = xor_64;
break;
case 16:
ctx->xor = xor_128;
break;
default:
if (crypto_tfm_alg_blocksize(tfm) % 4)
ctx->xor = xor_byte;
else
ctx->xor = xor_quad;
}
tfm = crypto_spawn_tfm(spawn);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
ctx->child = crypto_cipher_cast(tfm);
return 0;
}
static void crypto_cbc_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_cipher(ctx->child);
}
static struct crypto_instance *crypto_cbc_alloc(void *param, unsigned int len)
{
struct crypto_instance *inst;
struct crypto_alg *alg;
alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
if (IS_ERR(alg))
return ERR_PTR(PTR_ERR(alg));
inst = crypto_alloc_instance("cbc", alg);
if (IS_ERR(inst))
goto out_put_alg;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
inst->alg.cra_priority = alg->cra_priority;
inst->alg.cra_blocksize = alg->cra_blocksize;
inst->alg.cra_alignmask = alg->cra_alignmask;
inst->alg.cra_type = &crypto_blkcipher_type;
if (!(alg->cra_blocksize % 4))
inst->alg.cra_alignmask |= 3;
inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
inst->alg.cra_ctxsize = sizeof(struct crypto_cbc_ctx);
inst->alg.cra_init = crypto_cbc_init_tfm;
inst->alg.cra_exit = crypto_cbc_exit_tfm;
inst->alg.cra_blkcipher.setkey = crypto_cbc_setkey;
inst->alg.cra_blkcipher.encrypt = crypto_cbc_encrypt;
inst->alg.cra_blkcipher.decrypt = crypto_cbc_decrypt;
out_put_alg:
crypto_mod_put(alg);
return inst;
}
static void crypto_cbc_free(struct crypto_instance *inst)
{
crypto_drop_spawn(crypto_instance_ctx(inst));
kfree(inst);
}
static struct crypto_template crypto_cbc_tmpl = {
.name = "cbc",
.alloc = crypto_cbc_alloc,
.free = crypto_cbc_free,
.module = THIS_MODULE,
};
static int __init crypto_cbc_module_init(void)
{
return crypto_register_template(&crypto_cbc_tmpl);
}
static void __exit crypto_cbc_module_exit(void)
{
crypto_unregister_template(&crypto_cbc_tmpl);
}
module_init(crypto_cbc_module_init);
module_exit(crypto_cbc_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CBC block cipher algorithm");

View File

@ -23,6 +23,28 @@
#include "internal.h"
#include "scatterwalk.h"
struct cipher_alg_compat {
unsigned int cia_min_keysize;
unsigned int cia_max_keysize;
int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen);
void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
unsigned int (*cia_encrypt_ecb)(const struct cipher_desc *desc,
u8 *dst, const u8 *src,
unsigned int nbytes);
unsigned int (*cia_decrypt_ecb)(const struct cipher_desc *desc,
u8 *dst, const u8 *src,
unsigned int nbytes);
unsigned int (*cia_encrypt_cbc)(const struct cipher_desc *desc,
u8 *dst, const u8 *src,
unsigned int nbytes);
unsigned int (*cia_decrypt_cbc)(const struct cipher_desc *desc,
u8 *dst, const u8 *src,
unsigned int nbytes);
};
static inline void xor_64(u8 *a, const u8 *b)
{
((u32 *)a)[0] ^= ((u32 *)b)[0];
@ -45,15 +67,10 @@ static unsigned int crypt_slow(const struct cipher_desc *desc,
u8 buffer[bsize * 2 + alignmask];
u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
u8 *dst = src + bsize;
unsigned int n;
n = scatterwalk_copychunks(src, in, bsize, 0);
scatterwalk_advance(in, n);
scatterwalk_copychunks(src, in, bsize, 0);
desc->prfn(desc, dst, src, bsize);
n = scatterwalk_copychunks(dst, out, bsize, 1);
scatterwalk_advance(out, n);
scatterwalk_copychunks(dst, out, bsize, 1);
return bsize;
}
@ -64,12 +81,16 @@ static inline unsigned int crypt_fast(const struct cipher_desc *desc,
unsigned int nbytes, u8 *tmp)
{
u8 *src, *dst;
u8 *real_src, *real_dst;
src = in->data;
dst = scatterwalk_samebuf(in, out) ? src : out->data;
real_src = scatterwalk_map(in, 0);
real_dst = scatterwalk_map(out, 1);
src = real_src;
dst = scatterwalk_samebuf(in, out) ? src : real_dst;
if (tmp) {
memcpy(tmp, in->data, nbytes);
memcpy(tmp, src, nbytes);
src = tmp;
dst = tmp;
}
@ -77,7 +98,10 @@ static inline unsigned int crypt_fast(const struct cipher_desc *desc,
nbytes = desc->prfn(desc, dst, src, nbytes);
if (tmp)
memcpy(out->data, tmp, nbytes);
memcpy(real_dst, tmp, nbytes);
scatterwalk_unmap(real_src, 0);
scatterwalk_unmap(real_dst, 1);
scatterwalk_advance(in, nbytes);
scatterwalk_advance(out, nbytes);
@ -126,9 +150,6 @@ static int crypt(const struct cipher_desc *desc,
tmp = (u8 *)buffer;
}
scatterwalk_map(&walk_in, 0);
scatterwalk_map(&walk_out, 1);
n = scatterwalk_clamp(&walk_in, n);
n = scatterwalk_clamp(&walk_out, n);
@ -145,7 +166,7 @@ static int crypt(const struct cipher_desc *desc,
if (!nbytes)
break;
crypto_yield(tfm);
crypto_yield(tfm->crt_flags);
}
if (buffer)
@ -264,12 +285,12 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
{
struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
} else
return cia->cia_setkey(tfm, key, keylen,
&tfm->crt_flags);
return cia->cia_setkey(tfm, key, keylen);
}
static int ecb_encrypt(struct crypto_tfm *tfm,
@ -277,7 +298,7 @@ static int ecb_encrypt(struct crypto_tfm *tfm,
struct scatterlist *src, unsigned int nbytes)
{
struct cipher_desc desc;
struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
desc.tfm = tfm;
desc.crfn = cipher->cia_encrypt;
@ -292,7 +313,7 @@ static int ecb_decrypt(struct crypto_tfm *tfm,
unsigned int nbytes)
{
struct cipher_desc desc;
struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
desc.tfm = tfm;
desc.crfn = cipher->cia_decrypt;
@ -307,7 +328,7 @@ static int cbc_encrypt(struct crypto_tfm *tfm,
unsigned int nbytes)
{
struct cipher_desc desc;
struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
desc.tfm = tfm;
desc.crfn = cipher->cia_encrypt;
@ -323,7 +344,7 @@ static int cbc_encrypt_iv(struct crypto_tfm *tfm,
unsigned int nbytes, u8 *iv)
{
struct cipher_desc desc;
struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
desc.tfm = tfm;
desc.crfn = cipher->cia_encrypt;
@ -339,7 +360,7 @@ static int cbc_decrypt(struct crypto_tfm *tfm,
unsigned int nbytes)
{
struct cipher_desc desc;
struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
desc.tfm = tfm;
desc.crfn = cipher->cia_decrypt;
@ -355,7 +376,7 @@ static int cbc_decrypt_iv(struct crypto_tfm *tfm,
unsigned int nbytes, u8 *iv)
{
struct cipher_desc desc;
struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
desc.tfm = tfm;
desc.crfn = cipher->cia_decrypt;
@ -388,17 +409,67 @@ int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags)
return 0;
}
static void cipher_crypt_unaligned(void (*fn)(struct crypto_tfm *, u8 *,
const u8 *),
struct crypto_tfm *tfm,
u8 *dst, const u8 *src)
{
unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
unsigned int size = crypto_tfm_alg_blocksize(tfm);
u8 buffer[size + alignmask];
u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(tmp, src, size);
fn(tfm, tmp, tmp);
memcpy(dst, tmp, size);
}
static void cipher_encrypt_unaligned(struct crypto_tfm *tfm,
u8 *dst, const u8 *src)
{
unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) {
cipher_crypt_unaligned(cipher->cia_encrypt, tfm, dst, src);
return;
}
cipher->cia_encrypt(tfm, dst, src);
}
static void cipher_decrypt_unaligned(struct crypto_tfm *tfm,
u8 *dst, const u8 *src)
{
unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) {
cipher_crypt_unaligned(cipher->cia_decrypt, tfm, dst, src);
return;
}
cipher->cia_decrypt(tfm, dst, src);
}
int crypto_init_cipher_ops(struct crypto_tfm *tfm)
{
int ret = 0;
struct cipher_tfm *ops = &tfm->crt_cipher;
struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
ops->cit_setkey = setkey;
ops->cit_encrypt_one = crypto_tfm_alg_alignmask(tfm) ?
cipher_encrypt_unaligned : cipher->cia_encrypt;
ops->cit_decrypt_one = crypto_tfm_alg_alignmask(tfm) ?
cipher_decrypt_unaligned : cipher->cia_decrypt;
switch (tfm->crt_cipher.cit_mode) {
case CRYPTO_TFM_MODE_ECB:
ops->cit_encrypt = ecb_encrypt;
ops->cit_decrypt = ecb_decrypt;
ops->cit_encrypt_iv = nocrypt_iv;
ops->cit_decrypt_iv = nocrypt_iv;
break;
case CRYPTO_TFM_MODE_CBC:

View File

@ -16,14 +16,14 @@
#include <linux/string.h>
#include <linux/crypto.h>
#include <linux/crc32c.h>
#include <linux/types.h>
#include <asm/byteorder.h>
#include <linux/kernel.h>
#define CHKSUM_BLOCK_SIZE 32
#define CHKSUM_DIGEST_SIZE 4
struct chksum_ctx {
u32 crc;
u32 key;
};
/*
@ -35,7 +35,7 @@ static void chksum_init(struct crypto_tfm *tfm)
{
struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->crc = ~(u32)0; /* common usage */
mctx->crc = mctx->key;
}
/*
@ -44,16 +44,15 @@ static void chksum_init(struct crypto_tfm *tfm)
* the seed.
*/
static int chksum_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen, u32 *flags)
unsigned int keylen)
{
struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
if (keylen != sizeof(mctx->crc)) {
if (flags)
*flags = CRYPTO_TFM_RES_BAD_KEY_LEN;
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
mctx->crc = __cpu_to_le32(*(u32 *)key);
mctx->key = le32_to_cpu(*(__le32 *)key);
return 0;
}
@ -61,19 +60,23 @@ static void chksum_update(struct crypto_tfm *tfm, const u8 *data,
unsigned int length)
{
struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
u32 mcrc;
mcrc = crc32c(mctx->crc, data, (size_t)length);
mctx->crc = mcrc;
mctx->crc = crc32c(mctx->crc, data, length);
}
static void chksum_final(struct crypto_tfm *tfm, u8 *out)
{
struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
u32 mcrc = (mctx->crc ^ ~(u32)0);
*(u32 *)out = __le32_to_cpu(mcrc);
*(__le32 *)out = ~cpu_to_le32(mctx->crc);
}
static int crc32c_cra_init(struct crypto_tfm *tfm)
{
struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
mctx->key = ~0;
return 0;
}
static struct crypto_alg alg = {
@ -83,6 +86,7 @@ static struct crypto_alg alg = {
.cra_ctxsize = sizeof(struct chksum_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(alg.cra_list),
.cra_init = crc32c_cra_init,
.cra_u = {
.digest = {
.dia_digestsize= CHKSUM_DIGEST_SIZE,

View File

@ -48,7 +48,7 @@ static void null_final(struct crypto_tfm *tfm, u8 *out)
{ }
static int null_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen, u32 *flags)
unsigned int keylen)
{ return 0; }
static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)

156
crypto/cryptomgr.c Normal file
View File

@ -0,0 +1,156 @@
/*
* Create default crypto algorithm instances.
*
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/crypto.h>
#include <linux/ctype.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/rtnetlink.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/workqueue.h>
#include "internal.h"
struct cryptomgr_param {
struct work_struct work;
struct {
struct rtattr attr;
struct crypto_attr_alg data;
} alg;
struct {
u32 type;
u32 mask;
char name[CRYPTO_MAX_ALG_NAME];
} larval;
char template[CRYPTO_MAX_ALG_NAME];
};
static void cryptomgr_probe(void *data)
{
struct cryptomgr_param *param = data;
struct crypto_template *tmpl;
struct crypto_instance *inst;
int err;
tmpl = crypto_lookup_template(param->template);
if (!tmpl)
goto err;
do {
inst = tmpl->alloc(&param->alg, sizeof(param->alg));
if (IS_ERR(inst))
err = PTR_ERR(inst);
else if ((err = crypto_register_instance(tmpl, inst)))
tmpl->free(inst);
} while (err == -EAGAIN && !signal_pending(current));
crypto_tmpl_put(tmpl);
if (err)
goto err;
out:
kfree(param);
return;
err:
crypto_larval_error(param->larval.name, param->larval.type,
param->larval.mask);
goto out;
}
static int cryptomgr_schedule_probe(struct crypto_larval *larval)
{
struct cryptomgr_param *param;
const char *name = larval->alg.cra_name;
const char *p;
unsigned int len;
param = kmalloc(sizeof(*param), GFP_KERNEL);
if (!param)
goto err;
for (p = name; isalnum(*p) || *p == '-' || *p == '_'; p++)
;
len = p - name;
if (!len || *p != '(')
goto err_free_param;
memcpy(param->template, name, len);
param->template[len] = 0;
name = p + 1;
for (p = name; isalnum(*p) || *p == '-' || *p == '_'; p++)
;
len = p - name;
if (!len || *p != ')' || p[1])
goto err_free_param;
param->alg.attr.rta_len = sizeof(param->alg);
param->alg.attr.rta_type = CRYPTOA_ALG;
memcpy(param->alg.data.name, name, len);
param->alg.data.name[len] = 0;
memcpy(param->larval.name, larval->alg.cra_name, CRYPTO_MAX_ALG_NAME);
param->larval.type = larval->alg.cra_flags;
param->larval.mask = larval->mask;
INIT_WORK(&param->work, cryptomgr_probe, param);
schedule_work(&param->work);
return NOTIFY_STOP;
err_free_param:
kfree(param);
err:
return NOTIFY_OK;
}
static int cryptomgr_notify(struct notifier_block *this, unsigned long msg,
void *data)
{
switch (msg) {
case CRYPTO_MSG_ALG_REQUEST:
return cryptomgr_schedule_probe(data);
}
return NOTIFY_DONE;
}
static struct notifier_block cryptomgr_notifier = {
.notifier_call = cryptomgr_notify,
};
static int __init cryptomgr_init(void)
{
return crypto_register_notifier(&cryptomgr_notifier);
}
static void __exit cryptomgr_exit(void)
{
int err = crypto_unregister_notifier(&cryptomgr_notifier);
BUG_ON(err);
}
module_init(cryptomgr_init);
module_exit(cryptomgr_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Crypto Algorithm Manager");

View File

@ -784,9 +784,10 @@ static void dkey(u32 *pe, const u8 *k)
}
static int des_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen, u32 *flags)
unsigned int keylen)
{
struct des_ctx *dctx = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
u32 tmp[DES_EXPKEY_WORDS];
int ret;
@ -864,11 +865,12 @@ static void des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
*
*/
static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen, u32 *flags)
unsigned int keylen)
{
const u32 *K = (const u32 *)key;
struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm);
u32 *expkey = dctx->expkey;
u32 *flags = &tfm->crt_flags;
if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
!((K[2] ^ K[4]) | (K[3] ^ K[5]))))

View File

@ -11,29 +11,89 @@
* any later version.
*
*/
#include <linux/crypto.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/highmem.h>
#include <asm/scatterlist.h>
#include "internal.h"
#include <linux/module.h>
#include <linux/scatterlist.h>
static void init(struct crypto_tfm *tfm)
#include "internal.h"
#include "scatterwalk.h"
void crypto_digest_init(struct crypto_tfm *tfm)
{
struct crypto_hash *hash = crypto_hash_cast(tfm);
struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags };
crypto_hash_init(&desc);
}
EXPORT_SYMBOL_GPL(crypto_digest_init);
void crypto_digest_update(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg)
{
struct crypto_hash *hash = crypto_hash_cast(tfm);
struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags };
unsigned int nbytes = 0;
unsigned int i;
for (i = 0; i < nsg; i++)
nbytes += sg[i].length;
crypto_hash_update(&desc, sg, nbytes);
}
EXPORT_SYMBOL_GPL(crypto_digest_update);
void crypto_digest_final(struct crypto_tfm *tfm, u8 *out)
{
struct crypto_hash *hash = crypto_hash_cast(tfm);
struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags };
crypto_hash_final(&desc, out);
}
EXPORT_SYMBOL_GPL(crypto_digest_final);
void crypto_digest_digest(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg, u8 *out)
{
struct crypto_hash *hash = crypto_hash_cast(tfm);
struct hash_desc desc = { .tfm = hash, .flags = tfm->crt_flags };
unsigned int nbytes = 0;
unsigned int i;
for (i = 0; i < nsg; i++)
nbytes += sg[i].length;
crypto_hash_digest(&desc, sg, nbytes, out);
}
EXPORT_SYMBOL_GPL(crypto_digest_digest);
static int init(struct hash_desc *desc)
{
struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
tfm->__crt_alg->cra_digest.dia_init(tfm);
return 0;
}
static void update(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg)
static int update(struct hash_desc *desc,
struct scatterlist *sg, unsigned int nbytes)
{
unsigned int i;
struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
unsigned int alignmask = crypto_tfm_alg_alignmask(tfm);
for (i = 0; i < nsg; i++) {
if (!nbytes)
return 0;
struct page *pg = sg[i].page;
unsigned int offset = sg[i].offset;
unsigned int l = sg[i].length;
for (;;) {
struct page *pg = sg->page;
unsigned int offset = sg->offset;
unsigned int l = sg->length;
if (unlikely(l > nbytes))
l = nbytes;
nbytes -= l;
do {
unsigned int bytes_from_page = min(l, ((unsigned int)
@ -55,41 +115,60 @@ static void update(struct crypto_tfm *tfm,
tfm->__crt_alg->cra_digest.dia_update(tfm, p,
bytes_from_page);
crypto_kunmap(src, 0);
crypto_yield(tfm);
crypto_yield(desc->flags);
offset = 0;
pg++;
l -= bytes_from_page;
} while (l > 0);
if (!nbytes)
break;
sg = sg_next(sg);
}
return 0;
}
static void final(struct crypto_tfm *tfm, u8 *out)
static int final(struct hash_desc *desc, u8 *out)
{
struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
struct digest_alg *digest = &tfm->__crt_alg->cra_digest;
if (unlikely((unsigned long)out & alignmask)) {
unsigned int size = crypto_tfm_alg_digestsize(tfm);
u8 buffer[size + alignmask];
u8 *dst = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
tfm->__crt_alg->cra_digest.dia_final(tfm, dst);
memcpy(out, dst, size);
unsigned long align = alignmask + 1;
unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm);
u8 *dst = (u8 *)ALIGN(addr, align) +
ALIGN(tfm->__crt_alg->cra_ctxsize, align);
digest->dia_final(tfm, dst);
memcpy(out, dst, digest->dia_digestsize);
} else
tfm->__crt_alg->cra_digest.dia_final(tfm, out);
digest->dia_final(tfm, out);
return 0;
}
static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
static int nosetkey(struct crypto_hash *tfm, const u8 *key, unsigned int keylen)
{
u32 flags;
if (tfm->__crt_alg->cra_digest.dia_setkey == NULL)
return -ENOSYS;
return tfm->__crt_alg->cra_digest.dia_setkey(tfm, key, keylen, &flags);
crypto_hash_clear_flags(tfm, CRYPTO_TFM_RES_MASK);
return -ENOSYS;
}
static void digest(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg, u8 *out)
static int setkey(struct crypto_hash *hash, const u8 *key, unsigned int keylen)
{
init(tfm);
update(tfm, sg, nsg);
final(tfm, out);
struct crypto_tfm *tfm = crypto_hash_tfm(hash);
crypto_hash_clear_flags(hash, CRYPTO_TFM_RES_MASK);
return tfm->__crt_alg->cra_digest.dia_setkey(tfm, key, keylen);
}
static int digest(struct hash_desc *desc,
struct scatterlist *sg, unsigned int nbytes, u8 *out)
{
init(desc);
update(desc, sg, nbytes);
return final(desc, out);
}
int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags)
@ -99,18 +178,22 @@ int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags)
int crypto_init_digest_ops(struct crypto_tfm *tfm)
{
struct digest_tfm *ops = &tfm->crt_digest;
struct hash_tfm *ops = &tfm->crt_hash;
struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
if (dalg->dia_digestsize > crypto_tfm_alg_blocksize(tfm))
return -EINVAL;
ops->dit_init = init;
ops->dit_update = update;
ops->dit_final = final;
ops->dit_digest = digest;
ops->dit_setkey = setkey;
ops->init = init;
ops->update = update;
ops->final = final;
ops->digest = digest;
ops->setkey = dalg->dia_setkey ? setkey : nosetkey;
ops->digestsize = dalg->dia_digestsize;
return crypto_alloc_hmac_block(tfm);
return 0;
}
void crypto_exit_digest_ops(struct crypto_tfm *tfm)
{
crypto_free_hmac_block(tfm);
}

181
crypto/ecb.c Normal file
View File

@ -0,0 +1,181 @@
/*
* ECB: Electronic CodeBook mode
*
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/algapi.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
struct crypto_ecb_ctx {
struct crypto_cipher *child;
};
static int crypto_ecb_setkey(struct crypto_tfm *parent, const u8 *key,
unsigned int keylen)
{
struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(parent);
struct crypto_cipher *child = ctx->child;
int err;
crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_cipher_setkey(child, key, keylen);
crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
static int crypto_ecb_crypt(struct blkcipher_desc *desc,
struct blkcipher_walk *walk,
struct crypto_cipher *tfm,
void (*fn)(struct crypto_tfm *, u8 *, const u8 *))
{
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes;
int err;
err = blkcipher_walk_virt(desc, walk);
while ((nbytes = walk->nbytes)) {
u8 *wsrc = walk->src.virt.addr;
u8 *wdst = walk->dst.virt.addr;
do {
fn(crypto_cipher_tfm(tfm), wdst, wsrc);
wsrc += bsize;
wdst += bsize;
} while ((nbytes -= bsize) >= bsize);
err = blkcipher_walk_done(desc, walk, nbytes);
}
return err;
}
static int crypto_ecb_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
struct crypto_blkcipher *tfm = desc->tfm;
struct crypto_ecb_ctx *ctx = crypto_blkcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child;
blkcipher_walk_init(&walk, dst, src, nbytes);
return crypto_ecb_crypt(desc, &walk, child,
crypto_cipher_alg(child)->cia_encrypt);
}
static int crypto_ecb_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct blkcipher_walk walk;
struct crypto_blkcipher *tfm = desc->tfm;
struct crypto_ecb_ctx *ctx = crypto_blkcipher_ctx(tfm);
struct crypto_cipher *child = ctx->child;
blkcipher_walk_init(&walk, dst, src, nbytes);
return crypto_ecb_crypt(desc, &walk, child,
crypto_cipher_alg(child)->cia_decrypt);
}
static int crypto_ecb_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(tfm);
tfm = crypto_spawn_tfm(spawn);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
ctx->child = crypto_cipher_cast(tfm);
return 0;
}
static void crypto_ecb_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_cipher(ctx->child);
}
static struct crypto_instance *crypto_ecb_alloc(void *param, unsigned int len)
{
struct crypto_instance *inst;
struct crypto_alg *alg;
alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
if (IS_ERR(alg))
return ERR_PTR(PTR_ERR(alg));
inst = crypto_alloc_instance("ecb", alg);
if (IS_ERR(inst))
goto out_put_alg;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
inst->alg.cra_priority = alg->cra_priority;
inst->alg.cra_blocksize = alg->cra_blocksize;
inst->alg.cra_alignmask = alg->cra_alignmask;
inst->alg.cra_type = &crypto_blkcipher_type;
inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
inst->alg.cra_ctxsize = sizeof(struct crypto_ecb_ctx);
inst->alg.cra_init = crypto_ecb_init_tfm;
inst->alg.cra_exit = crypto_ecb_exit_tfm;
inst->alg.cra_blkcipher.setkey = crypto_ecb_setkey;
inst->alg.cra_blkcipher.encrypt = crypto_ecb_encrypt;
inst->alg.cra_blkcipher.decrypt = crypto_ecb_decrypt;
out_put_alg:
crypto_mod_put(alg);
return inst;
}
static void crypto_ecb_free(struct crypto_instance *inst)
{
crypto_drop_spawn(crypto_instance_ctx(inst));
kfree(inst);
}
static struct crypto_template crypto_ecb_tmpl = {
.name = "ecb",
.alloc = crypto_ecb_alloc,
.free = crypto_ecb_free,
.module = THIS_MODULE,
};
static int __init crypto_ecb_module_init(void)
{
return crypto_register_template(&crypto_ecb_tmpl);
}
static void __exit crypto_ecb_module_exit(void)
{
crypto_unregister_template(&crypto_ecb_tmpl);
}
module_init(crypto_ecb_module_init);
module_exit(crypto_ecb_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ECB block cipher algorithm");

61
crypto/hash.c Normal file
View File

@ -0,0 +1,61 @@
/*
* Cryptographic Hash operations.
*
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include "internal.h"
static unsigned int crypto_hash_ctxsize(struct crypto_alg *alg)
{
return alg->cra_ctxsize;
}
static int crypto_init_hash_ops(struct crypto_tfm *tfm)
{
struct hash_tfm *crt = &tfm->crt_hash;
struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
if (alg->digestsize > crypto_tfm_alg_blocksize(tfm))
return -EINVAL;
crt->init = alg->init;
crt->update = alg->update;
crt->final = alg->final;
crt->digest = alg->digest;
crt->setkey = alg->setkey;
crt->digestsize = alg->digestsize;
return 0;
}
static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg)
__attribute_used__;
static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg)
{
seq_printf(m, "type : hash\n");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "digestsize : %u\n", alg->cra_hash.digestsize);
}
const struct crypto_type crypto_hash_type = {
.ctxsize = crypto_hash_ctxsize,
.init = crypto_init_hash_ops,
#ifdef CONFIG_PROC_FS
.show = crypto_hash_show,
#endif
};
EXPORT_SYMBOL_GPL(crypto_hash_type);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Generic cryptographic hash type");

View File

@ -4,121 +4,249 @@
* HMAC: Keyed-Hashing for Message Authentication (RFC2104).
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
*
* The HMAC implementation is derived from USAGI.
* Copyright (c) 2002 Kazunori Miyazawa <miyazawa@linux-ipv6.org> / USAGI
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/crypto.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <crypto/algapi.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include "internal.h"
#include <linux/slab.h>
#include <linux/string.h>
static void hash_key(struct crypto_tfm *tfm, u8 *key, unsigned int keylen)
struct hmac_ctx {
struct crypto_hash *child;
};
static inline void *align_ptr(void *p, unsigned int align)
{
struct scatterlist tmp;
sg_set_buf(&tmp, key, keylen);
crypto_digest_digest(tfm, &tmp, 1, key);
return (void *)ALIGN((unsigned long)p, align);
}
int crypto_alloc_hmac_block(struct crypto_tfm *tfm)
static inline struct hmac_ctx *hmac_ctx(struct crypto_hash *tfm)
{
int ret = 0;
BUG_ON(!crypto_tfm_alg_blocksize(tfm));
tfm->crt_digest.dit_hmac_block = kmalloc(crypto_tfm_alg_blocksize(tfm),
GFP_KERNEL);
if (tfm->crt_digest.dit_hmac_block == NULL)
ret = -ENOMEM;
return ret;
return align_ptr(crypto_hash_ctx_aligned(tfm) +
crypto_hash_blocksize(tfm) * 2 +
crypto_hash_digestsize(tfm), sizeof(void *));
}
void crypto_free_hmac_block(struct crypto_tfm *tfm)
{
kfree(tfm->crt_digest.dit_hmac_block);
}
void crypto_hmac_init(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen)
static int hmac_setkey(struct crypto_hash *parent,
const u8 *inkey, unsigned int keylen)
{
int bs = crypto_hash_blocksize(parent);
int ds = crypto_hash_digestsize(parent);
char *ipad = crypto_hash_ctx_aligned(parent);
char *opad = ipad + bs;
char *digest = opad + bs;
struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *));
struct crypto_hash *tfm = ctx->child;
unsigned int i;
struct scatterlist tmp;
char *ipad = tfm->crt_digest.dit_hmac_block;
if (*keylen > crypto_tfm_alg_blocksize(tfm)) {
hash_key(tfm, key, *keylen);
*keylen = crypto_tfm_alg_digestsize(tfm);
if (keylen > bs) {
struct hash_desc desc;
struct scatterlist tmp;
int err;
desc.tfm = tfm;
desc.flags = crypto_hash_get_flags(parent);
desc.flags &= CRYPTO_TFM_REQ_MAY_SLEEP;
sg_set_buf(&tmp, inkey, keylen);
err = crypto_hash_digest(&desc, &tmp, keylen, digest);
if (err)
return err;
inkey = digest;
keylen = ds;
}
memset(ipad, 0, crypto_tfm_alg_blocksize(tfm));
memcpy(ipad, key, *keylen);
memcpy(ipad, inkey, keylen);
memset(ipad + keylen, 0, bs - keylen);
memcpy(opad, ipad, bs);
for (i = 0; i < crypto_tfm_alg_blocksize(tfm); i++)
for (i = 0; i < bs; i++) {
ipad[i] ^= 0x36;
sg_set_buf(&tmp, ipad, crypto_tfm_alg_blocksize(tfm));
crypto_digest_init(tfm);
crypto_digest_update(tfm, &tmp, 1);
}
void crypto_hmac_update(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg)
{
crypto_digest_update(tfm, sg, nsg);
}
void crypto_hmac_final(struct crypto_tfm *tfm, u8 *key,
unsigned int *keylen, u8 *out)
{
unsigned int i;
struct scatterlist tmp;
char *opad = tfm->crt_digest.dit_hmac_block;
if (*keylen > crypto_tfm_alg_blocksize(tfm)) {
hash_key(tfm, key, *keylen);
*keylen = crypto_tfm_alg_digestsize(tfm);
opad[i] ^= 0x5c;
}
crypto_digest_final(tfm, out);
memset(opad, 0, crypto_tfm_alg_blocksize(tfm));
memcpy(opad, key, *keylen);
for (i = 0; i < crypto_tfm_alg_blocksize(tfm); i++)
opad[i] ^= 0x5c;
sg_set_buf(&tmp, opad, crypto_tfm_alg_blocksize(tfm));
crypto_digest_init(tfm);
crypto_digest_update(tfm, &tmp, 1);
sg_set_buf(&tmp, out, crypto_tfm_alg_digestsize(tfm));
crypto_digest_update(tfm, &tmp, 1);
crypto_digest_final(tfm, out);
return 0;
}
void crypto_hmac(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen,
struct scatterlist *sg, unsigned int nsg, u8 *out)
static int hmac_init(struct hash_desc *pdesc)
{
crypto_hmac_init(tfm, key, keylen);
crypto_hmac_update(tfm, sg, nsg);
crypto_hmac_final(tfm, key, keylen, out);
struct crypto_hash *parent = pdesc->tfm;
int bs = crypto_hash_blocksize(parent);
int ds = crypto_hash_digestsize(parent);
char *ipad = crypto_hash_ctx_aligned(parent);
struct hmac_ctx *ctx = align_ptr(ipad + bs * 2 + ds, sizeof(void *));
struct hash_desc desc;
struct scatterlist tmp;
desc.tfm = ctx->child;
desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
sg_set_buf(&tmp, ipad, bs);
return unlikely(crypto_hash_init(&desc)) ?:
crypto_hash_update(&desc, &tmp, 1);
}
EXPORT_SYMBOL_GPL(crypto_hmac_init);
EXPORT_SYMBOL_GPL(crypto_hmac_update);
EXPORT_SYMBOL_GPL(crypto_hmac_final);
EXPORT_SYMBOL_GPL(crypto_hmac);
static int hmac_update(struct hash_desc *pdesc,
struct scatterlist *sg, unsigned int nbytes)
{
struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm);
struct hash_desc desc;
desc.tfm = ctx->child;
desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
return crypto_hash_update(&desc, sg, nbytes);
}
static int hmac_final(struct hash_desc *pdesc, u8 *out)
{
struct crypto_hash *parent = pdesc->tfm;
int bs = crypto_hash_blocksize(parent);
int ds = crypto_hash_digestsize(parent);
char *opad = crypto_hash_ctx_aligned(parent) + bs;
char *digest = opad + bs;
struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *));
struct hash_desc desc;
struct scatterlist tmp;
desc.tfm = ctx->child;
desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
sg_set_buf(&tmp, opad, bs + ds);
return unlikely(crypto_hash_final(&desc, digest)) ?:
crypto_hash_digest(&desc, &tmp, bs + ds, out);
}
static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg,
unsigned int nbytes, u8 *out)
{
struct crypto_hash *parent = pdesc->tfm;
int bs = crypto_hash_blocksize(parent);
int ds = crypto_hash_digestsize(parent);
char *ipad = crypto_hash_ctx_aligned(parent);
char *opad = ipad + bs;
char *digest = opad + bs;
struct hmac_ctx *ctx = align_ptr(digest + ds, sizeof(void *));
struct hash_desc desc;
struct scatterlist sg1[2];
struct scatterlist sg2[1];
desc.tfm = ctx->child;
desc.flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
sg_set_buf(sg1, ipad, bs);
sg1[1].page = (void *)sg;
sg1[1].length = 0;
sg_set_buf(sg2, opad, bs + ds);
return unlikely(crypto_hash_digest(&desc, sg1, nbytes + bs, digest)) ?:
crypto_hash_digest(&desc, sg2, bs + ds, out);
}
static int hmac_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm));
tfm = crypto_spawn_tfm(spawn);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
ctx->child = crypto_hash_cast(tfm);
return 0;
}
static void hmac_exit_tfm(struct crypto_tfm *tfm)
{
struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm));
crypto_free_hash(ctx->child);
}
static void hmac_free(struct crypto_instance *inst)
{
crypto_drop_spawn(crypto_instance_ctx(inst));
kfree(inst);
}
static struct crypto_instance *hmac_alloc(void *param, unsigned int len)
{
struct crypto_instance *inst;
struct crypto_alg *alg;
alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_HASH,
CRYPTO_ALG_TYPE_HASH_MASK | CRYPTO_ALG_ASYNC);
if (IS_ERR(alg))
return ERR_PTR(PTR_ERR(alg));
inst = crypto_alloc_instance("hmac", alg);
if (IS_ERR(inst))
goto out_put_alg;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_HASH;
inst->alg.cra_priority = alg->cra_priority;
inst->alg.cra_blocksize = alg->cra_blocksize;
inst->alg.cra_alignmask = alg->cra_alignmask;
inst->alg.cra_type = &crypto_hash_type;
inst->alg.cra_hash.digestsize =
(alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize :
alg->cra_digest.dia_digestsize;
inst->alg.cra_ctxsize = sizeof(struct hmac_ctx) +
ALIGN(inst->alg.cra_blocksize * 2 +
inst->alg.cra_hash.digestsize,
sizeof(void *));
inst->alg.cra_init = hmac_init_tfm;
inst->alg.cra_exit = hmac_exit_tfm;
inst->alg.cra_hash.init = hmac_init;
inst->alg.cra_hash.update = hmac_update;
inst->alg.cra_hash.final = hmac_final;
inst->alg.cra_hash.digest = hmac_digest;
inst->alg.cra_hash.setkey = hmac_setkey;
out_put_alg:
crypto_mod_put(alg);
return inst;
}
static struct crypto_template hmac_tmpl = {
.name = "hmac",
.alloc = hmac_alloc,
.free = hmac_free,
.module = THIS_MODULE,
};
static int __init hmac_module_init(void)
{
return crypto_register_template(&hmac_tmpl);
}
static void __exit hmac_module_exit(void)
{
crypto_unregister_template(&hmac_tmpl);
}
module_init(hmac_module_init);
module_exit(hmac_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("HMAC hash algorithm");

View File

@ -12,19 +12,43 @@
*/
#ifndef _CRYPTO_INTERNAL_H
#define _CRYPTO_INTERNAL_H
#include <linux/crypto.h>
#include <crypto/algapi.h>
#include <linux/completion.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <asm/kmap_types.h>
/* Crypto notification events. */
enum {
CRYPTO_MSG_ALG_REQUEST,
CRYPTO_MSG_ALG_REGISTER,
CRYPTO_MSG_ALG_UNREGISTER,
CRYPTO_MSG_TMPL_REGISTER,
CRYPTO_MSG_TMPL_UNREGISTER,
};
struct crypto_instance;
struct crypto_template;
struct crypto_larval {
struct crypto_alg alg;
struct crypto_alg *adult;
struct completion completion;
u32 mask;
};
extern struct list_head crypto_alg_list;
extern struct rw_semaphore crypto_alg_sem;
extern struct blocking_notifier_head crypto_chain;
extern enum km_type crypto_km_types[];
@ -43,36 +67,33 @@ static inline void crypto_kunmap(void *vaddr, int out)
kunmap_atomic(vaddr, crypto_kmap_type(out));
}
static inline void crypto_yield(struct crypto_tfm *tfm)
static inline void crypto_yield(u32 flags)
{
if (tfm->crt_flags & CRYPTO_TFM_REQ_MAY_SLEEP)
if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
cond_resched();
}
#ifdef CONFIG_CRYPTO_HMAC
int crypto_alloc_hmac_block(struct crypto_tfm *tfm);
void crypto_free_hmac_block(struct crypto_tfm *tfm);
#else
static inline int crypto_alloc_hmac_block(struct crypto_tfm *tfm)
{
return 0;
}
static inline void crypto_free_hmac_block(struct crypto_tfm *tfm)
{ }
#endif
#ifdef CONFIG_PROC_FS
void __init crypto_init_proc(void);
void __exit crypto_exit_proc(void);
#else
static inline void crypto_init_proc(void)
{ }
static inline void crypto_exit_proc(void)
{ }
#endif
static inline unsigned int crypto_digest_ctxsize(struct crypto_alg *alg,
int flags)
{
return alg->cra_ctxsize;
unsigned int len = alg->cra_ctxsize;
if (alg->cra_alignmask) {
len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
len += alg->cra_digest.dia_digestsize;
}
return len;
}
static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg,
@ -96,6 +117,10 @@ static inline unsigned int crypto_compress_ctxsize(struct crypto_alg *alg,
return alg->cra_ctxsize;
}
struct crypto_alg *crypto_mod_get(struct crypto_alg *alg);
struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask);
struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags);
int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags);
int crypto_init_compress_flags(struct crypto_tfm *tfm, u32 flags);
@ -108,5 +133,52 @@ void crypto_exit_digest_ops(struct crypto_tfm *tfm);
void crypto_exit_cipher_ops(struct crypto_tfm *tfm);
void crypto_exit_compress_ops(struct crypto_tfm *tfm);
void crypto_larval_error(const char *name, u32 type, u32 mask);
void crypto_shoot_alg(struct crypto_alg *alg);
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 flags);
int crypto_register_instance(struct crypto_template *tmpl,
struct crypto_instance *inst);
int crypto_register_notifier(struct notifier_block *nb);
int crypto_unregister_notifier(struct notifier_block *nb);
static inline void crypto_alg_put(struct crypto_alg *alg)
{
if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy)
alg->cra_destroy(alg);
}
static inline int crypto_tmpl_get(struct crypto_template *tmpl)
{
return try_module_get(tmpl->module);
}
static inline void crypto_tmpl_put(struct crypto_template *tmpl)
{
module_put(tmpl->module);
}
static inline int crypto_is_larval(struct crypto_alg *alg)
{
return alg->cra_flags & CRYPTO_ALG_LARVAL;
}
static inline int crypto_is_dead(struct crypto_alg *alg)
{
return alg->cra_flags & CRYPTO_ALG_DEAD;
}
static inline int crypto_is_moribund(struct crypto_alg *alg)
{
return alg->cra_flags & (CRYPTO_ALG_DEAD | CRYPTO_ALG_DYING);
}
static inline int crypto_notify(unsigned long val, void *v)
{
return blocking_notifier_call_chain(&crypto_chain, val, v);
}
#endif /* _CRYPTO_INTERNAL_H */

View File

@ -755,19 +755,13 @@ static const u64 c[KHAZAD_ROUNDS + 1] = {
};
static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len, u32 *flags)
unsigned int key_len)
{
struct khazad_ctx *ctx = crypto_tfm_ctx(tfm);
const __be32 *key = (const __be32 *)in_key;
int r;
const u64 *S = T7;
u64 K2, K1;
if (key_len != 16)
{
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
/* key is supposed to be 32-bit aligned */
K2 = ((u64)be32_to_cpu(key[0]) << 32) | be32_to_cpu(key[1]);

View File

@ -123,14 +123,13 @@ static void michael_final(struct crypto_tfm *tfm, u8 *out)
static int michael_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen, u32 *flags)
unsigned int keylen)
{
struct michael_mic_ctx *mctx = crypto_tfm_ctx(tfm);
const __le32 *data = (const __le32 *)key;
if (keylen != 8) {
if (flags)
*flags = CRYPTO_TFM_RES_BAD_KEY_LEN;
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}

View File

@ -12,6 +12,8 @@
* any later version.
*
*/
#include <asm/atomic.h>
#include <linux/init.h>
#include <linux/crypto.h>
#include <linux/rwsem.h>
@ -54,6 +56,7 @@ static int c_show(struct seq_file *m, void *p)
seq_printf(m, "driver : %s\n", alg->cra_driver_name);
seq_printf(m, "module : %s\n", module_name(alg->cra_module));
seq_printf(m, "priority : %d\n", alg->cra_priority);
seq_printf(m, "refcnt : %d\n", atomic_read(&alg->cra_refcnt));
switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_CIPHER:
@ -75,7 +78,10 @@ static int c_show(struct seq_file *m, void *p)
seq_printf(m, "type : compression\n");
break;
default:
seq_printf(m, "type : unknown\n");
if (alg->cra_type && alg->cra_type->show)
alg->cra_type->show(m, alg);
else
seq_printf(m, "type : unknown\n");
break;
}
@ -110,3 +116,8 @@ void __init crypto_init_proc(void)
if (proc)
proc->proc_fops = &proc_crypto_ops;
}
void __exit crypto_exit_proc(void)
{
remove_proc_entry("crypto", NULL);
}

View File

@ -15,9 +15,11 @@
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <asm/scatterlist.h>
#include <linux/scatterlist.h>
#include "internal.h"
#include "scatterwalk.h"
@ -27,88 +29,77 @@ enum km_type crypto_km_types[] = {
KM_SOFTIRQ0,
KM_SOFTIRQ1,
};
EXPORT_SYMBOL_GPL(crypto_km_types);
static void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
{
if (out)
memcpy(sgdata, buf, nbytes);
else
memcpy(buf, sgdata, nbytes);
void *src = out ? buf : sgdata;
void *dst = out ? sgdata : buf;
memcpy(dst, src, nbytes);
}
void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg)
{
unsigned int rest_of_page;
walk->sg = sg;
walk->page = sg->page;
walk->len_this_segment = sg->length;
BUG_ON(!sg->length);
rest_of_page = PAGE_CACHE_SIZE - (sg->offset & (PAGE_CACHE_SIZE - 1));
walk->len_this_page = min(sg->length, rest_of_page);
walk->offset = sg->offset;
}
EXPORT_SYMBOL_GPL(scatterwalk_start);
void scatterwalk_map(struct scatter_walk *walk, int out)
void *scatterwalk_map(struct scatter_walk *walk, int out)
{
walk->data = crypto_kmap(walk->page, out) + walk->offset;
}
static inline void scatterwalk_unmap(struct scatter_walk *walk, int out)
{
/* walk->data may be pointing the first byte of the next page;
however, we know we transfered at least one byte. So,
walk->data - 1 will be a virtual address in the mapped page. */
crypto_kunmap(walk->data - 1, out);
return crypto_kmap(scatterwalk_page(walk), out) +
offset_in_page(walk->offset);
}
EXPORT_SYMBOL_GPL(scatterwalk_map);
static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
unsigned int more)
{
if (out)
flush_dcache_page(walk->page);
flush_dcache_page(scatterwalk_page(walk));
if (more) {
walk->len_this_segment -= walk->len_this_page;
if (walk->len_this_segment) {
walk->page++;
walk->len_this_page = min(walk->len_this_segment,
(unsigned)PAGE_CACHE_SIZE);
walk->offset = 0;
}
else
walk->offset += PAGE_SIZE - 1;
walk->offset &= PAGE_MASK;
if (walk->offset >= walk->sg->offset + walk->sg->length)
scatterwalk_start(walk, sg_next(walk->sg));
}
}
void scatterwalk_done(struct scatter_walk *walk, int out, int more)
{
scatterwalk_unmap(walk, out);
if (walk->len_this_page == 0 || !more)
if (!offset_in_page(walk->offset) || !more)
scatterwalk_pagedone(walk, out, more);
}
EXPORT_SYMBOL_GPL(scatterwalk_done);
/*
* Do not call this unless the total length of all of the fragments
* has been verified as multiple of the block size.
*/
int scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
size_t nbytes, int out)
void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
size_t nbytes, int out)
{
while (nbytes > walk->len_this_page) {
memcpy_dir(buf, walk->data, walk->len_this_page, out);
buf += walk->len_this_page;
nbytes -= walk->len_this_page;
for (;;) {
unsigned int len_this_page = scatterwalk_pagelen(walk);
u8 *vaddr;
if (len_this_page > nbytes)
len_this_page = nbytes;
vaddr = scatterwalk_map(walk, out);
memcpy_dir(buf, vaddr, len_this_page, out);
scatterwalk_unmap(vaddr, out);
if (nbytes == len_this_page)
break;
buf += len_this_page;
nbytes -= len_this_page;
scatterwalk_unmap(walk, out);
scatterwalk_pagedone(walk, out, 1);
scatterwalk_map(walk, out);
}
memcpy_dir(buf, walk->data, nbytes, out);
return nbytes;
scatterwalk_advance(walk, nbytes);
}
EXPORT_SYMBOL_GPL(scatterwalk_copychunks);

View File

@ -14,45 +14,42 @@
#ifndef _CRYPTO_SCATTERWALK_H
#define _CRYPTO_SCATTERWALK_H
#include <linux/mm.h>
#include <asm/scatterlist.h>
#include <linux/scatterlist.h>
struct scatter_walk {
struct scatterlist *sg;
struct page *page;
void *data;
unsigned int len_this_page;
unsigned int len_this_segment;
unsigned int offset;
};
#include "internal.h"
/* Define sg_next is an inline routine now in case we want to change
scatterlist to a linked list later. */
static inline struct scatterlist *sg_next(struct scatterlist *sg)
{
return sg + 1;
return (++sg)->length ? sg : (void *)sg->page;
}
static inline int scatterwalk_samebuf(struct scatter_walk *walk_in,
struct scatter_walk *walk_out)
static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in,
struct scatter_walk *walk_out)
{
return walk_in->page == walk_out->page &&
walk_in->offset == walk_out->offset;
return !(((walk_in->sg->page - walk_out->sg->page) << PAGE_SHIFT) +
(int)(walk_in->offset - walk_out->offset));
}
static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk)
{
unsigned int len = walk->sg->offset + walk->sg->length - walk->offset;
unsigned int len_this_page = offset_in_page(~walk->offset) + 1;
return len_this_page > len ? len : len_this_page;
}
static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk,
unsigned int nbytes)
{
return nbytes > walk->len_this_page ? walk->len_this_page : nbytes;
unsigned int len_this_page = scatterwalk_pagelen(walk);
return nbytes > len_this_page ? len_this_page : nbytes;
}
static inline void scatterwalk_advance(struct scatter_walk *walk,
unsigned int nbytes)
{
walk->data += nbytes;
walk->offset += nbytes;
walk->len_this_page -= nbytes;
walk->len_this_segment -= nbytes;
}
static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk,
@ -61,9 +58,20 @@ static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk,
return !(walk->offset & alignmask);
}
static inline struct page *scatterwalk_page(struct scatter_walk *walk)
{
return walk->sg->page + (walk->offset >> PAGE_SHIFT);
}
static inline void scatterwalk_unmap(void *vaddr, int out)
{
crypto_kunmap(vaddr, out);
}
void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg);
int scatterwalk_copychunks(void *buf, struct scatter_walk *walk, size_t nbytes, int out);
void scatterwalk_map(struct scatter_walk *walk, int out);
void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
size_t nbytes, int out);
void *scatterwalk_map(struct scatter_walk *walk, int out);
void scatterwalk_done(struct scatter_walk *walk, int out, int more);
#endif /* _CRYPTO_SCATTERWALK_H */

View File

@ -216,7 +216,7 @@ struct serpent_ctx {
static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen, u32 *flags)
unsigned int keylen)
{
struct serpent_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *k = ctx->expkey;
@ -224,13 +224,6 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
u32 r0,r1,r2,r3,r4;
int i;
if ((keylen < SERPENT_MIN_KEY_SIZE)
|| (keylen > SERPENT_MAX_KEY_SIZE))
{
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
/* Copy key, add padding */
for (i = 0; i < keylen; ++i)
@ -497,21 +490,15 @@ static struct crypto_alg serpent_alg = {
};
static int tnepres_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen, u32 *flags)
unsigned int keylen)
{
u8 rev_key[SERPENT_MAX_KEY_SIZE];
int i;
if ((keylen < SERPENT_MIN_KEY_SIZE)
|| (keylen > SERPENT_MAX_KEY_SIZE)) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
for (i = 0; i < keylen; ++i)
rev_key[keylen - i - 1] = key[i];
return serpent_setkey(tfm, rev_key, keylen, flags);
return serpent_setkey(tfm, rev_key, keylen);
}
static void tnepres_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)

View File

@ -109,6 +109,7 @@ static void sha1_final(struct crypto_tfm *tfm, u8 *out)
static struct crypto_alg alg = {
.cra_name = "sha1",
.cra_driver_name= "sha1-generic",
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
.cra_blocksize = SHA1_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sha1_ctx),
@ -137,3 +138,5 @@ module_exit(fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
MODULE_ALIAS("sha1-generic");

View File

@ -309,6 +309,7 @@ static void sha256_final(struct crypto_tfm *tfm, u8 *out)
static struct crypto_alg alg = {
.cra_name = "sha256",
.cra_driver_name= "sha256-generic",
.cra_flags = CRYPTO_ALG_TYPE_DIGEST,
.cra_blocksize = SHA256_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sha256_ctx),
@ -337,3 +338,5 @@ module_exit(fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm");
MODULE_ALIAS("sha256-generic");

File diff suppressed because it is too large Load Diff

View File

@ -28,7 +28,7 @@
struct hash_testvec {
/* only used with keyed hash algorithms */
char key[128] __attribute__ ((__aligned__(4)));
char plaintext[128];
char plaintext[240];
char digest[MAX_DIGEST_SIZE];
unsigned char tap[MAX_TAP];
unsigned char psize;
@ -36,16 +36,6 @@ struct hash_testvec {
unsigned char ksize;
};
struct hmac_testvec {
char key[128];
char plaintext[128];
char digest[MAX_DIGEST_SIZE];
unsigned char tap[MAX_TAP];
unsigned char ksize;
unsigned char psize;
unsigned char np;
};
struct cipher_testvec {
char key[MAX_KEYLEN] __attribute__ ((__aligned__(4)));
char iv[MAX_IVLEN];
@ -65,7 +55,7 @@ struct cipher_speed {
unsigned int blen;
};
struct digest_speed {
struct hash_speed {
unsigned int blen; /* buffer length */
unsigned int plen; /* per-update length */
};
@ -697,14 +687,13 @@ static struct hash_testvec tgr128_tv_template[] = {
},
};
#ifdef CONFIG_CRYPTO_HMAC
/*
* HMAC-MD5 test vectors from RFC2202
* (These need to be fixed to not use strlen).
*/
#define HMAC_MD5_TEST_VECTORS 7
static struct hmac_testvec hmac_md5_tv_template[] =
static struct hash_testvec hmac_md5_tv_template[] =
{
{
.key = { [0 ... 15] = 0x0b },
@ -768,7 +757,7 @@ static struct hmac_testvec hmac_md5_tv_template[] =
*/
#define HMAC_SHA1_TEST_VECTORS 7
static struct hmac_testvec hmac_sha1_tv_template[] = {
static struct hash_testvec hmac_sha1_tv_template[] = {
{
.key = { [0 ... 19] = 0x0b },
.ksize = 20,
@ -833,7 +822,7 @@ static struct hmac_testvec hmac_sha1_tv_template[] = {
*/
#define HMAC_SHA256_TEST_VECTORS 10
static struct hmac_testvec hmac_sha256_tv_template[] = {
static struct hash_testvec hmac_sha256_tv_template[] = {
{
.key = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
@ -944,8 +933,6 @@ static struct hmac_testvec hmac_sha256_tv_template[] = {
},
};
#endif /* CONFIG_CRYPTO_HMAC */
/*
* DES test vectors.
*/
@ -2896,6 +2883,183 @@ static struct hash_testvec michael_mic_tv_template[] = {
}
};
/*
* CRC32C test vectors
*/
#define CRC32C_TEST_VECTORS 14
static struct hash_testvec crc32c_tv_template[] = {
{
.psize = 0,
.digest = { 0x00, 0x00, 0x00, 0x00 }
},
{
.key = { 0x87, 0xa9, 0xcb, 0xed },
.ksize = 4,
.psize = 0,
.digest = { 0x78, 0x56, 0x34, 0x12 },
},
{
.key = { 0xff, 0xff, 0xff, 0xff },
.ksize = 4,
.plaintext = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28 },
.psize = 40,
.digest = { 0x7f, 0x15, 0x2c, 0x0e }
},
{
.key = { 0xff, 0xff, 0xff, 0xff },
.ksize = 4,
.plaintext = { 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38,
0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40,
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50 },
.psize = 40,
.digest = { 0xf6, 0xeb, 0x80, 0xe9 }
},
{
.key = { 0xff, 0xff, 0xff, 0xff },
.ksize = 4,
.plaintext = { 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60,
0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70,
0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78 },
.psize = 40,
.digest = { 0xed, 0xbd, 0x74, 0xde }
},
{
.key = { 0xff, 0xff, 0xff, 0xff },
.ksize = 4,
.plaintext = { 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80,
0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88,
0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90,
0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0 },
.psize = 40,
.digest = { 0x62, 0xc8, 0x79, 0xd5 }
},
{
.key = { 0xff, 0xff, 0xff, 0xff },
.ksize = 4,
.plaintext = { 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8,
0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0,
0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8,
0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0,
0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8 },
.psize = 40,
.digest = { 0xd0, 0x9a, 0x97, 0xba }
},
{
.key = { 0xff, 0xff, 0xff, 0xff },
.ksize = 4,
.plaintext = { 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0,
0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8,
0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0,
0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8,
0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0 },
.psize = 40,
.digest = { 0x13, 0xd9, 0x29, 0x2b }
},
{
.key = { 0x80, 0xea, 0xd3, 0xf1 },
.ksize = 4,
.plaintext = { 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38,
0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40,
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50 },
.psize = 40,
.digest = { 0x0c, 0xb5, 0xe2, 0xa2 }
},
{
.key = { 0xf3, 0x4a, 0x1d, 0x5d },
.ksize = 4,
.plaintext = { 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60,
0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70,
0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78 },
.psize = 40,
.digest = { 0xd1, 0x7f, 0xfb, 0xa6 }
},
{
.key = { 0x2e, 0x80, 0x04, 0x59 },
.ksize = 4,
.plaintext = { 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80,
0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88,
0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90,
0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0 },
.psize = 40,
.digest = { 0x59, 0x33, 0xe6, 0x7a }
},
{
.key = { 0xa6, 0xcc, 0x19, 0x85 },
.ksize = 4,
.plaintext = { 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8,
0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0,
0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8,
0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0,
0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8 },
.psize = 40,
.digest = { 0xbe, 0x03, 0x01, 0xd2 }
},
{
.key = { 0x41, 0xfc, 0xfe, 0x2d },
.ksize = 4,
.plaintext = { 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0,
0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8,
0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0,
0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8,
0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0 },
.psize = 40,
.digest = { 0x75, 0xd3, 0xc5, 0x24 }
},
{
.key = { 0xff, 0xff, 0xff, 0xff },
.ksize = 4,
.plaintext = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38,
0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40,
0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50,
0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60,
0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70,
0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80,
0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88,
0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90,
0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0,
0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8,
0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0,
0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8,
0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0,
0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8,
0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0,
0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8,
0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0,
0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8,
0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0 },
.psize = 240,
.digest = { 0x75, 0xd3, 0xc5, 0x24 },
.np = 2,
.tap = { 31, 209 }
},
};
/*
* Cipher speed tests
*/
@ -2983,7 +3147,7 @@ static struct cipher_speed des_speed_template[] = {
/*
* Digest speed tests
*/
static struct digest_speed generic_digest_speed_template[] = {
static struct hash_speed generic_hash_speed_template[] = {
{ .blen = 16, .plen = 16, },
{ .blen = 64, .plen = 16, },
{ .blen = 64, .plen = 64, },

View File

@ -46,16 +46,10 @@ struct xtea_ctx {
};
static int tea_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len, u32 *flags)
unsigned int key_len)
{
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
if (key_len != 16)
{
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
ctx->KEY[0] = le32_to_cpu(key[0]);
ctx->KEY[1] = le32_to_cpu(key[1]);
@ -125,16 +119,10 @@ static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
}
static int xtea_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len, u32 *flags)
unsigned int key_len)
{
struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
if (key_len != 16)
{
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
ctx->KEY[0] = le32_to_cpu(key[0]);
ctx->KEY[1] = le32_to_cpu(key[1]);

View File

@ -39,6 +39,7 @@
*/
#include <asm/byteorder.h>
#include <crypto/twofish.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
@ -46,534 +47,6 @@
#include <linux/crypto.h>
#include <linux/bitops.h>
/* The large precomputed tables for the Twofish cipher (twofish.c)
* Taken from the same source as twofish.c
* Marc Mutz <Marc@Mutz.com>
*/
/* These two tables are the q0 and q1 permutations, exactly as described in
* the Twofish paper. */
static const u8 q0[256] = {
0xA9, 0x67, 0xB3, 0xE8, 0x04, 0xFD, 0xA3, 0x76, 0x9A, 0x92, 0x80, 0x78,
0xE4, 0xDD, 0xD1, 0x38, 0x0D, 0xC6, 0x35, 0x98, 0x18, 0xF7, 0xEC, 0x6C,
0x43, 0x75, 0x37, 0x26, 0xFA, 0x13, 0x94, 0x48, 0xF2, 0xD0, 0x8B, 0x30,
0x84, 0x54, 0xDF, 0x23, 0x19, 0x5B, 0x3D, 0x59, 0xF3, 0xAE, 0xA2, 0x82,
0x63, 0x01, 0x83, 0x2E, 0xD9, 0x51, 0x9B, 0x7C, 0xA6, 0xEB, 0xA5, 0xBE,
0x16, 0x0C, 0xE3, 0x61, 0xC0, 0x8C, 0x3A, 0xF5, 0x73, 0x2C, 0x25, 0x0B,
0xBB, 0x4E, 0x89, 0x6B, 0x53, 0x6A, 0xB4, 0xF1, 0xE1, 0xE6, 0xBD, 0x45,
0xE2, 0xF4, 0xB6, 0x66, 0xCC, 0x95, 0x03, 0x56, 0xD4, 0x1C, 0x1E, 0xD7,
0xFB, 0xC3, 0x8E, 0xB5, 0xE9, 0xCF, 0xBF, 0xBA, 0xEA, 0x77, 0x39, 0xAF,
0x33, 0xC9, 0x62, 0x71, 0x81, 0x79, 0x09, 0xAD, 0x24, 0xCD, 0xF9, 0xD8,
0xE5, 0xC5, 0xB9, 0x4D, 0x44, 0x08, 0x86, 0xE7, 0xA1, 0x1D, 0xAA, 0xED,
0x06, 0x70, 0xB2, 0xD2, 0x41, 0x7B, 0xA0, 0x11, 0x31, 0xC2, 0x27, 0x90,
0x20, 0xF6, 0x60, 0xFF, 0x96, 0x5C, 0xB1, 0xAB, 0x9E, 0x9C, 0x52, 0x1B,
0x5F, 0x93, 0x0A, 0xEF, 0x91, 0x85, 0x49, 0xEE, 0x2D, 0x4F, 0x8F, 0x3B,
0x47, 0x87, 0x6D, 0x46, 0xD6, 0x3E, 0x69, 0x64, 0x2A, 0xCE, 0xCB, 0x2F,
0xFC, 0x97, 0x05, 0x7A, 0xAC, 0x7F, 0xD5, 0x1A, 0x4B, 0x0E, 0xA7, 0x5A,
0x28, 0x14, 0x3F, 0x29, 0x88, 0x3C, 0x4C, 0x02, 0xB8, 0xDA, 0xB0, 0x17,
0x55, 0x1F, 0x8A, 0x7D, 0x57, 0xC7, 0x8D, 0x74, 0xB7, 0xC4, 0x9F, 0x72,
0x7E, 0x15, 0x22, 0x12, 0x58, 0x07, 0x99, 0x34, 0x6E, 0x50, 0xDE, 0x68,
0x65, 0xBC, 0xDB, 0xF8, 0xC8, 0xA8, 0x2B, 0x40, 0xDC, 0xFE, 0x32, 0xA4,
0xCA, 0x10, 0x21, 0xF0, 0xD3, 0x5D, 0x0F, 0x00, 0x6F, 0x9D, 0x36, 0x42,
0x4A, 0x5E, 0xC1, 0xE0
};
static const u8 q1[256] = {
0x75, 0xF3, 0xC6, 0xF4, 0xDB, 0x7B, 0xFB, 0xC8, 0x4A, 0xD3, 0xE6, 0x6B,
0x45, 0x7D, 0xE8, 0x4B, 0xD6, 0x32, 0xD8, 0xFD, 0x37, 0x71, 0xF1, 0xE1,
0x30, 0x0F, 0xF8, 0x1B, 0x87, 0xFA, 0x06, 0x3F, 0x5E, 0xBA, 0xAE, 0x5B,
0x8A, 0x00, 0xBC, 0x9D, 0x6D, 0xC1, 0xB1, 0x0E, 0x80, 0x5D, 0xD2, 0xD5,
0xA0, 0x84, 0x07, 0x14, 0xB5, 0x90, 0x2C, 0xA3, 0xB2, 0x73, 0x4C, 0x54,
0x92, 0x74, 0x36, 0x51, 0x38, 0xB0, 0xBD, 0x5A, 0xFC, 0x60, 0x62, 0x96,
0x6C, 0x42, 0xF7, 0x10, 0x7C, 0x28, 0x27, 0x8C, 0x13, 0x95, 0x9C, 0xC7,
0x24, 0x46, 0x3B, 0x70, 0xCA, 0xE3, 0x85, 0xCB, 0x11, 0xD0, 0x93, 0xB8,
0xA6, 0x83, 0x20, 0xFF, 0x9F, 0x77, 0xC3, 0xCC, 0x03, 0x6F, 0x08, 0xBF,
0x40, 0xE7, 0x2B, 0xE2, 0x79, 0x0C, 0xAA, 0x82, 0x41, 0x3A, 0xEA, 0xB9,
0xE4, 0x9A, 0xA4, 0x97, 0x7E, 0xDA, 0x7A, 0x17, 0x66, 0x94, 0xA1, 0x1D,
0x3D, 0xF0, 0xDE, 0xB3, 0x0B, 0x72, 0xA7, 0x1C, 0xEF, 0xD1, 0x53, 0x3E,
0x8F, 0x33, 0x26, 0x5F, 0xEC, 0x76, 0x2A, 0x49, 0x81, 0x88, 0xEE, 0x21,
0xC4, 0x1A, 0xEB, 0xD9, 0xC5, 0x39, 0x99, 0xCD, 0xAD, 0x31, 0x8B, 0x01,
0x18, 0x23, 0xDD, 0x1F, 0x4E, 0x2D, 0xF9, 0x48, 0x4F, 0xF2, 0x65, 0x8E,
0x78, 0x5C, 0x58, 0x19, 0x8D, 0xE5, 0x98, 0x57, 0x67, 0x7F, 0x05, 0x64,
0xAF, 0x63, 0xB6, 0xFE, 0xF5, 0xB7, 0x3C, 0xA5, 0xCE, 0xE9, 0x68, 0x44,
0xE0, 0x4D, 0x43, 0x69, 0x29, 0x2E, 0xAC, 0x15, 0x59, 0xA8, 0x0A, 0x9E,
0x6E, 0x47, 0xDF, 0x34, 0x35, 0x6A, 0xCF, 0xDC, 0x22, 0xC9, 0xC0, 0x9B,
0x89, 0xD4, 0xED, 0xAB, 0x12, 0xA2, 0x0D, 0x52, 0xBB, 0x02, 0x2F, 0xA9,
0xD7, 0x61, 0x1E, 0xB4, 0x50, 0x04, 0xF6, 0xC2, 0x16, 0x25, 0x86, 0x56,
0x55, 0x09, 0xBE, 0x91
};
/* These MDS tables are actually tables of MDS composed with q0 and q1,
* because it is only ever used that way and we can save some time by
* precomputing. Of course the main saving comes from precomputing the
* GF(2^8) multiplication involved in the MDS matrix multiply; by looking
* things up in these tables we reduce the matrix multiply to four lookups
* and three XORs. Semi-formally, the definition of these tables is:
* mds[0][i] = MDS (q1[i] 0 0 0)^T mds[1][i] = MDS (0 q0[i] 0 0)^T
* mds[2][i] = MDS (0 0 q1[i] 0)^T mds[3][i] = MDS (0 0 0 q0[i])^T
* where ^T means "transpose", the matrix multiply is performed in GF(2^8)
* represented as GF(2)[x]/v(x) where v(x)=x^8+x^6+x^5+x^3+1 as described
* by Schneier et al, and I'm casually glossing over the byte/word
* conversion issues. */
static const u32 mds[4][256] = {
{0xBCBC3275, 0xECEC21F3, 0x202043C6, 0xB3B3C9F4, 0xDADA03DB, 0x02028B7B,
0xE2E22BFB, 0x9E9EFAC8, 0xC9C9EC4A, 0xD4D409D3, 0x18186BE6, 0x1E1E9F6B,
0x98980E45, 0xB2B2387D, 0xA6A6D2E8, 0x2626B74B, 0x3C3C57D6, 0x93938A32,
0x8282EED8, 0x525298FD, 0x7B7BD437, 0xBBBB3771, 0x5B5B97F1, 0x474783E1,
0x24243C30, 0x5151E20F, 0xBABAC6F8, 0x4A4AF31B, 0xBFBF4887, 0x0D0D70FA,
0xB0B0B306, 0x7575DE3F, 0xD2D2FD5E, 0x7D7D20BA, 0x666631AE, 0x3A3AA35B,
0x59591C8A, 0x00000000, 0xCDCD93BC, 0x1A1AE09D, 0xAEAE2C6D, 0x7F7FABC1,
0x2B2BC7B1, 0xBEBEB90E, 0xE0E0A080, 0x8A8A105D, 0x3B3B52D2, 0x6464BAD5,
0xD8D888A0, 0xE7E7A584, 0x5F5FE807, 0x1B1B1114, 0x2C2CC2B5, 0xFCFCB490,
0x3131272C, 0x808065A3, 0x73732AB2, 0x0C0C8173, 0x79795F4C, 0x6B6B4154,
0x4B4B0292, 0x53536974, 0x94948F36, 0x83831F51, 0x2A2A3638, 0xC4C49CB0,
0x2222C8BD, 0xD5D5F85A, 0xBDBDC3FC, 0x48487860, 0xFFFFCE62, 0x4C4C0796,
0x4141776C, 0xC7C7E642, 0xEBEB24F7, 0x1C1C1410, 0x5D5D637C, 0x36362228,
0x6767C027, 0xE9E9AF8C, 0x4444F913, 0x1414EA95, 0xF5F5BB9C, 0xCFCF18C7,
0x3F3F2D24, 0xC0C0E346, 0x7272DB3B, 0x54546C70, 0x29294CCA, 0xF0F035E3,
0x0808FE85, 0xC6C617CB, 0xF3F34F11, 0x8C8CE4D0, 0xA4A45993, 0xCACA96B8,
0x68683BA6, 0xB8B84D83, 0x38382820, 0xE5E52EFF, 0xADAD569F, 0x0B0B8477,
0xC8C81DC3, 0x9999FFCC, 0x5858ED03, 0x19199A6F, 0x0E0E0A08, 0x95957EBF,
0x70705040, 0xF7F730E7, 0x6E6ECF2B, 0x1F1F6EE2, 0xB5B53D79, 0x09090F0C,
0x616134AA, 0x57571682, 0x9F9F0B41, 0x9D9D803A, 0x111164EA, 0x2525CDB9,
0xAFAFDDE4, 0x4545089A, 0xDFDF8DA4, 0xA3A35C97, 0xEAEAD57E, 0x353558DA,
0xEDEDD07A, 0x4343FC17, 0xF8F8CB66, 0xFBFBB194, 0x3737D3A1, 0xFAFA401D,
0xC2C2683D, 0xB4B4CCF0, 0x32325DDE, 0x9C9C71B3, 0x5656E70B, 0xE3E3DA72,
0x878760A7, 0x15151B1C, 0xF9F93AEF, 0x6363BFD1, 0x3434A953, 0x9A9A853E,
0xB1B1428F, 0x7C7CD133, 0x88889B26, 0x3D3DA65F, 0xA1A1D7EC, 0xE4E4DF76,
0x8181942A, 0x91910149, 0x0F0FFB81, 0xEEEEAA88, 0x161661EE, 0xD7D77321,
0x9797F5C4, 0xA5A5A81A, 0xFEFE3FEB, 0x6D6DB5D9, 0x7878AEC5, 0xC5C56D39,
0x1D1DE599, 0x7676A4CD, 0x3E3EDCAD, 0xCBCB6731, 0xB6B6478B, 0xEFEF5B01,
0x12121E18, 0x6060C523, 0x6A6AB0DD, 0x4D4DF61F, 0xCECEE94E, 0xDEDE7C2D,
0x55559DF9, 0x7E7E5A48, 0x2121B24F, 0x03037AF2, 0xA0A02665, 0x5E5E198E,
0x5A5A6678, 0x65654B5C, 0x62624E58, 0xFDFD4519, 0x0606F48D, 0x404086E5,
0xF2F2BE98, 0x3333AC57, 0x17179067, 0x05058E7F, 0xE8E85E05, 0x4F4F7D64,
0x89896AAF, 0x10109563, 0x74742FB6, 0x0A0A75FE, 0x5C5C92F5, 0x9B9B74B7,
0x2D2D333C, 0x3030D6A5, 0x2E2E49CE, 0x494989E9, 0x46467268, 0x77775544,
0xA8A8D8E0, 0x9696044D, 0x2828BD43, 0xA9A92969, 0xD9D97929, 0x8686912E,
0xD1D187AC, 0xF4F44A15, 0x8D8D1559, 0xD6D682A8, 0xB9B9BC0A, 0x42420D9E,
0xF6F6C16E, 0x2F2FB847, 0xDDDD06DF, 0x23233934, 0xCCCC6235, 0xF1F1C46A,
0xC1C112CF, 0x8585EBDC, 0x8F8F9E22, 0x7171A1C9, 0x9090F0C0, 0xAAAA539B,
0x0101F189, 0x8B8BE1D4, 0x4E4E8CED, 0x8E8E6FAB, 0xABABA212, 0x6F6F3EA2,
0xE6E6540D, 0xDBDBF252, 0x92927BBB, 0xB7B7B602, 0x6969CA2F, 0x3939D9A9,
0xD3D30CD7, 0xA7A72361, 0xA2A2AD1E, 0xC3C399B4, 0x6C6C4450, 0x07070504,
0x04047FF6, 0x272746C2, 0xACACA716, 0xD0D07625, 0x50501386, 0xDCDCF756,
0x84841A55, 0xE1E15109, 0x7A7A25BE, 0x1313EF91},
{0xA9D93939, 0x67901717, 0xB3719C9C, 0xE8D2A6A6, 0x04050707, 0xFD985252,
0xA3658080, 0x76DFE4E4, 0x9A084545, 0x92024B4B, 0x80A0E0E0, 0x78665A5A,
0xE4DDAFAF, 0xDDB06A6A, 0xD1BF6363, 0x38362A2A, 0x0D54E6E6, 0xC6432020,
0x3562CCCC, 0x98BEF2F2, 0x181E1212, 0xF724EBEB, 0xECD7A1A1, 0x6C774141,
0x43BD2828, 0x7532BCBC, 0x37D47B7B, 0x269B8888, 0xFA700D0D, 0x13F94444,
0x94B1FBFB, 0x485A7E7E, 0xF27A0303, 0xD0E48C8C, 0x8B47B6B6, 0x303C2424,
0x84A5E7E7, 0x54416B6B, 0xDF06DDDD, 0x23C56060, 0x1945FDFD, 0x5BA33A3A,
0x3D68C2C2, 0x59158D8D, 0xF321ECEC, 0xAE316666, 0xA23E6F6F, 0x82165757,
0x63951010, 0x015BEFEF, 0x834DB8B8, 0x2E918686, 0xD9B56D6D, 0x511F8383,
0x9B53AAAA, 0x7C635D5D, 0xA63B6868, 0xEB3FFEFE, 0xA5D63030, 0xBE257A7A,
0x16A7ACAC, 0x0C0F0909, 0xE335F0F0, 0x6123A7A7, 0xC0F09090, 0x8CAFE9E9,
0x3A809D9D, 0xF5925C5C, 0x73810C0C, 0x2C273131, 0x2576D0D0, 0x0BE75656,
0xBB7B9292, 0x4EE9CECE, 0x89F10101, 0x6B9F1E1E, 0x53A93434, 0x6AC4F1F1,
0xB499C3C3, 0xF1975B5B, 0xE1834747, 0xE66B1818, 0xBDC82222, 0x450E9898,
0xE26E1F1F, 0xF4C9B3B3, 0xB62F7474, 0x66CBF8F8, 0xCCFF9999, 0x95EA1414,
0x03ED5858, 0x56F7DCDC, 0xD4E18B8B, 0x1C1B1515, 0x1EADA2A2, 0xD70CD3D3,
0xFB2BE2E2, 0xC31DC8C8, 0x8E195E5E, 0xB5C22C2C, 0xE9894949, 0xCF12C1C1,
0xBF7E9595, 0xBA207D7D, 0xEA641111, 0x77840B0B, 0x396DC5C5, 0xAF6A8989,
0x33D17C7C, 0xC9A17171, 0x62CEFFFF, 0x7137BBBB, 0x81FB0F0F, 0x793DB5B5,
0x0951E1E1, 0xADDC3E3E, 0x242D3F3F, 0xCDA47676, 0xF99D5555, 0xD8EE8282,
0xE5864040, 0xC5AE7878, 0xB9CD2525, 0x4D049696, 0x44557777, 0x080A0E0E,
0x86135050, 0xE730F7F7, 0xA1D33737, 0x1D40FAFA, 0xAA346161, 0xED8C4E4E,
0x06B3B0B0, 0x706C5454, 0xB22A7373, 0xD2523B3B, 0x410B9F9F, 0x7B8B0202,
0xA088D8D8, 0x114FF3F3, 0x3167CBCB, 0xC2462727, 0x27C06767, 0x90B4FCFC,
0x20283838, 0xF67F0404, 0x60784848, 0xFF2EE5E5, 0x96074C4C, 0x5C4B6565,
0xB1C72B2B, 0xAB6F8E8E, 0x9E0D4242, 0x9CBBF5F5, 0x52F2DBDB, 0x1BF34A4A,
0x5FA63D3D, 0x9359A4A4, 0x0ABCB9B9, 0xEF3AF9F9, 0x91EF1313, 0x85FE0808,
0x49019191, 0xEE611616, 0x2D7CDEDE, 0x4FB22121, 0x8F42B1B1, 0x3BDB7272,
0x47B82F2F, 0x8748BFBF, 0x6D2CAEAE, 0x46E3C0C0, 0xD6573C3C, 0x3E859A9A,
0x6929A9A9, 0x647D4F4F, 0x2A948181, 0xCE492E2E, 0xCB17C6C6, 0x2FCA6969,
0xFCC3BDBD, 0x975CA3A3, 0x055EE8E8, 0x7AD0EDED, 0xAC87D1D1, 0x7F8E0505,
0xD5BA6464, 0x1AA8A5A5, 0x4BB72626, 0x0EB9BEBE, 0xA7608787, 0x5AF8D5D5,
0x28223636, 0x14111B1B, 0x3FDE7575, 0x2979D9D9, 0x88AAEEEE, 0x3C332D2D,
0x4C5F7979, 0x02B6B7B7, 0xB896CACA, 0xDA583535, 0xB09CC4C4, 0x17FC4343,
0x551A8484, 0x1FF64D4D, 0x8A1C5959, 0x7D38B2B2, 0x57AC3333, 0xC718CFCF,
0x8DF40606, 0x74695353, 0xB7749B9B, 0xC4F59797, 0x9F56ADAD, 0x72DAE3E3,
0x7ED5EAEA, 0x154AF4F4, 0x229E8F8F, 0x12A2ABAB, 0x584E6262, 0x07E85F5F,
0x99E51D1D, 0x34392323, 0x6EC1F6F6, 0x50446C6C, 0xDE5D3232, 0x68724646,
0x6526A0A0, 0xBC93CDCD, 0xDB03DADA, 0xF8C6BABA, 0xC8FA9E9E, 0xA882D6D6,
0x2BCF6E6E, 0x40507070, 0xDCEB8585, 0xFE750A0A, 0x328A9393, 0xA48DDFDF,
0xCA4C2929, 0x10141C1C, 0x2173D7D7, 0xF0CCB4B4, 0xD309D4D4, 0x5D108A8A,
0x0FE25151, 0x00000000, 0x6F9A1919, 0x9DE01A1A, 0x368F9494, 0x42E6C7C7,
0x4AECC9C9, 0x5EFDD2D2, 0xC1AB7F7F, 0xE0D8A8A8},
{0xBC75BC32, 0xECF3EC21, 0x20C62043, 0xB3F4B3C9, 0xDADBDA03, 0x027B028B,
0xE2FBE22B, 0x9EC89EFA, 0xC94AC9EC, 0xD4D3D409, 0x18E6186B, 0x1E6B1E9F,
0x9845980E, 0xB27DB238, 0xA6E8A6D2, 0x264B26B7, 0x3CD63C57, 0x9332938A,
0x82D882EE, 0x52FD5298, 0x7B377BD4, 0xBB71BB37, 0x5BF15B97, 0x47E14783,
0x2430243C, 0x510F51E2, 0xBAF8BAC6, 0x4A1B4AF3, 0xBF87BF48, 0x0DFA0D70,
0xB006B0B3, 0x753F75DE, 0xD25ED2FD, 0x7DBA7D20, 0x66AE6631, 0x3A5B3AA3,
0x598A591C, 0x00000000, 0xCDBCCD93, 0x1A9D1AE0, 0xAE6DAE2C, 0x7FC17FAB,
0x2BB12BC7, 0xBE0EBEB9, 0xE080E0A0, 0x8A5D8A10, 0x3BD23B52, 0x64D564BA,
0xD8A0D888, 0xE784E7A5, 0x5F075FE8, 0x1B141B11, 0x2CB52CC2, 0xFC90FCB4,
0x312C3127, 0x80A38065, 0x73B2732A, 0x0C730C81, 0x794C795F, 0x6B546B41,
0x4B924B02, 0x53745369, 0x9436948F, 0x8351831F, 0x2A382A36, 0xC4B0C49C,
0x22BD22C8, 0xD55AD5F8, 0xBDFCBDC3, 0x48604878, 0xFF62FFCE, 0x4C964C07,
0x416C4177, 0xC742C7E6, 0xEBF7EB24, 0x1C101C14, 0x5D7C5D63, 0x36283622,
0x672767C0, 0xE98CE9AF, 0x441344F9, 0x149514EA, 0xF59CF5BB, 0xCFC7CF18,
0x3F243F2D, 0xC046C0E3, 0x723B72DB, 0x5470546C, 0x29CA294C, 0xF0E3F035,
0x088508FE, 0xC6CBC617, 0xF311F34F, 0x8CD08CE4, 0xA493A459, 0xCAB8CA96,
0x68A6683B, 0xB883B84D, 0x38203828, 0xE5FFE52E, 0xAD9FAD56, 0x0B770B84,
0xC8C3C81D, 0x99CC99FF, 0x580358ED, 0x196F199A, 0x0E080E0A, 0x95BF957E,
0x70407050, 0xF7E7F730, 0x6E2B6ECF, 0x1FE21F6E, 0xB579B53D, 0x090C090F,
0x61AA6134, 0x57825716, 0x9F419F0B, 0x9D3A9D80, 0x11EA1164, 0x25B925CD,
0xAFE4AFDD, 0x459A4508, 0xDFA4DF8D, 0xA397A35C, 0xEA7EEAD5, 0x35DA3558,
0xED7AEDD0, 0x431743FC, 0xF866F8CB, 0xFB94FBB1, 0x37A137D3, 0xFA1DFA40,
0xC23DC268, 0xB4F0B4CC, 0x32DE325D, 0x9CB39C71, 0x560B56E7, 0xE372E3DA,
0x87A78760, 0x151C151B, 0xF9EFF93A, 0x63D163BF, 0x345334A9, 0x9A3E9A85,
0xB18FB142, 0x7C337CD1, 0x8826889B, 0x3D5F3DA6, 0xA1ECA1D7, 0xE476E4DF,
0x812A8194, 0x91499101, 0x0F810FFB, 0xEE88EEAA, 0x16EE1661, 0xD721D773,
0x97C497F5, 0xA51AA5A8, 0xFEEBFE3F, 0x6DD96DB5, 0x78C578AE, 0xC539C56D,
0x1D991DE5, 0x76CD76A4, 0x3EAD3EDC, 0xCB31CB67, 0xB68BB647, 0xEF01EF5B,
0x1218121E, 0x602360C5, 0x6ADD6AB0, 0x4D1F4DF6, 0xCE4ECEE9, 0xDE2DDE7C,
0x55F9559D, 0x7E487E5A, 0x214F21B2, 0x03F2037A, 0xA065A026, 0x5E8E5E19,
0x5A785A66, 0x655C654B, 0x6258624E, 0xFD19FD45, 0x068D06F4, 0x40E54086,
0xF298F2BE, 0x335733AC, 0x17671790, 0x057F058E, 0xE805E85E, 0x4F644F7D,
0x89AF896A, 0x10631095, 0x74B6742F, 0x0AFE0A75, 0x5CF55C92, 0x9BB79B74,
0x2D3C2D33, 0x30A530D6, 0x2ECE2E49, 0x49E94989, 0x46684672, 0x77447755,
0xA8E0A8D8, 0x964D9604, 0x284328BD, 0xA969A929, 0xD929D979, 0x862E8691,
0xD1ACD187, 0xF415F44A, 0x8D598D15, 0xD6A8D682, 0xB90AB9BC, 0x429E420D,
0xF66EF6C1, 0x2F472FB8, 0xDDDFDD06, 0x23342339, 0xCC35CC62, 0xF16AF1C4,
0xC1CFC112, 0x85DC85EB, 0x8F228F9E, 0x71C971A1, 0x90C090F0, 0xAA9BAA53,
0x018901F1, 0x8BD48BE1, 0x4EED4E8C, 0x8EAB8E6F, 0xAB12ABA2, 0x6FA26F3E,
0xE60DE654, 0xDB52DBF2, 0x92BB927B, 0xB702B7B6, 0x692F69CA, 0x39A939D9,
0xD3D7D30C, 0xA761A723, 0xA21EA2AD, 0xC3B4C399, 0x6C506C44, 0x07040705,
0x04F6047F, 0x27C22746, 0xAC16ACA7, 0xD025D076, 0x50865013, 0xDC56DCF7,
0x8455841A, 0xE109E151, 0x7ABE7A25, 0x139113EF},
{0xD939A9D9, 0x90176790, 0x719CB371, 0xD2A6E8D2, 0x05070405, 0x9852FD98,
0x6580A365, 0xDFE476DF, 0x08459A08, 0x024B9202, 0xA0E080A0, 0x665A7866,
0xDDAFE4DD, 0xB06ADDB0, 0xBF63D1BF, 0x362A3836, 0x54E60D54, 0x4320C643,
0x62CC3562, 0xBEF298BE, 0x1E12181E, 0x24EBF724, 0xD7A1ECD7, 0x77416C77,
0xBD2843BD, 0x32BC7532, 0xD47B37D4, 0x9B88269B, 0x700DFA70, 0xF94413F9,
0xB1FB94B1, 0x5A7E485A, 0x7A03F27A, 0xE48CD0E4, 0x47B68B47, 0x3C24303C,
0xA5E784A5, 0x416B5441, 0x06DDDF06, 0xC56023C5, 0x45FD1945, 0xA33A5BA3,
0x68C23D68, 0x158D5915, 0x21ECF321, 0x3166AE31, 0x3E6FA23E, 0x16578216,
0x95106395, 0x5BEF015B, 0x4DB8834D, 0x91862E91, 0xB56DD9B5, 0x1F83511F,
0x53AA9B53, 0x635D7C63, 0x3B68A63B, 0x3FFEEB3F, 0xD630A5D6, 0x257ABE25,
0xA7AC16A7, 0x0F090C0F, 0x35F0E335, 0x23A76123, 0xF090C0F0, 0xAFE98CAF,
0x809D3A80, 0x925CF592, 0x810C7381, 0x27312C27, 0x76D02576, 0xE7560BE7,
0x7B92BB7B, 0xE9CE4EE9, 0xF10189F1, 0x9F1E6B9F, 0xA93453A9, 0xC4F16AC4,
0x99C3B499, 0x975BF197, 0x8347E183, 0x6B18E66B, 0xC822BDC8, 0x0E98450E,
0x6E1FE26E, 0xC9B3F4C9, 0x2F74B62F, 0xCBF866CB, 0xFF99CCFF, 0xEA1495EA,
0xED5803ED, 0xF7DC56F7, 0xE18BD4E1, 0x1B151C1B, 0xADA21EAD, 0x0CD3D70C,
0x2BE2FB2B, 0x1DC8C31D, 0x195E8E19, 0xC22CB5C2, 0x8949E989, 0x12C1CF12,
0x7E95BF7E, 0x207DBA20, 0x6411EA64, 0x840B7784, 0x6DC5396D, 0x6A89AF6A,
0xD17C33D1, 0xA171C9A1, 0xCEFF62CE, 0x37BB7137, 0xFB0F81FB, 0x3DB5793D,
0x51E10951, 0xDC3EADDC, 0x2D3F242D, 0xA476CDA4, 0x9D55F99D, 0xEE82D8EE,
0x8640E586, 0xAE78C5AE, 0xCD25B9CD, 0x04964D04, 0x55774455, 0x0A0E080A,
0x13508613, 0x30F7E730, 0xD337A1D3, 0x40FA1D40, 0x3461AA34, 0x8C4EED8C,
0xB3B006B3, 0x6C54706C, 0x2A73B22A, 0x523BD252, 0x0B9F410B, 0x8B027B8B,
0x88D8A088, 0x4FF3114F, 0x67CB3167, 0x4627C246, 0xC06727C0, 0xB4FC90B4,
0x28382028, 0x7F04F67F, 0x78486078, 0x2EE5FF2E, 0x074C9607, 0x4B655C4B,
0xC72BB1C7, 0x6F8EAB6F, 0x0D429E0D, 0xBBF59CBB, 0xF2DB52F2, 0xF34A1BF3,
0xA63D5FA6, 0x59A49359, 0xBCB90ABC, 0x3AF9EF3A, 0xEF1391EF, 0xFE0885FE,
0x01914901, 0x6116EE61, 0x7CDE2D7C, 0xB2214FB2, 0x42B18F42, 0xDB723BDB,
0xB82F47B8, 0x48BF8748, 0x2CAE6D2C, 0xE3C046E3, 0x573CD657, 0x859A3E85,
0x29A96929, 0x7D4F647D, 0x94812A94, 0x492ECE49, 0x17C6CB17, 0xCA692FCA,
0xC3BDFCC3, 0x5CA3975C, 0x5EE8055E, 0xD0ED7AD0, 0x87D1AC87, 0x8E057F8E,
0xBA64D5BA, 0xA8A51AA8, 0xB7264BB7, 0xB9BE0EB9, 0x6087A760, 0xF8D55AF8,
0x22362822, 0x111B1411, 0xDE753FDE, 0x79D92979, 0xAAEE88AA, 0x332D3C33,
0x5F794C5F, 0xB6B702B6, 0x96CAB896, 0x5835DA58, 0x9CC4B09C, 0xFC4317FC,
0x1A84551A, 0xF64D1FF6, 0x1C598A1C, 0x38B27D38, 0xAC3357AC, 0x18CFC718,
0xF4068DF4, 0x69537469, 0x749BB774, 0xF597C4F5, 0x56AD9F56, 0xDAE372DA,
0xD5EA7ED5, 0x4AF4154A, 0x9E8F229E, 0xA2AB12A2, 0x4E62584E, 0xE85F07E8,
0xE51D99E5, 0x39233439, 0xC1F66EC1, 0x446C5044, 0x5D32DE5D, 0x72466872,
0x26A06526, 0x93CDBC93, 0x03DADB03, 0xC6BAF8C6, 0xFA9EC8FA, 0x82D6A882,
0xCF6E2BCF, 0x50704050, 0xEB85DCEB, 0x750AFE75, 0x8A93328A, 0x8DDFA48D,
0x4C29CA4C, 0x141C1014, 0x73D72173, 0xCCB4F0CC, 0x09D4D309, 0x108A5D10,
0xE2510FE2, 0x00000000, 0x9A196F9A, 0xE01A9DE0, 0x8F94368F, 0xE6C742E6,
0xECC94AEC, 0xFDD25EFD, 0xAB7FC1AB, 0xD8A8E0D8}
};
/* The exp_to_poly and poly_to_exp tables are used to perform efficient
* operations in GF(2^8) represented as GF(2)[x]/w(x) where
* w(x)=x^8+x^6+x^3+x^2+1. We care about doing that because it's part of the
* definition of the RS matrix in the key schedule. Elements of that field
* are polynomials of degree not greater than 7 and all coefficients 0 or 1,
* which can be represented naturally by bytes (just substitute x=2). In that
* form, GF(2^8) addition is the same as bitwise XOR, but GF(2^8)
* multiplication is inefficient without hardware support. To multiply
* faster, I make use of the fact x is a generator for the nonzero elements,
* so that every element p of GF(2)[x]/w(x) is either 0 or equal to (x)^n for
* some n in 0..254. Note that that caret is exponentiation in GF(2^8),
* *not* polynomial notation. So if I want to compute pq where p and q are
* in GF(2^8), I can just say:
* 1. if p=0 or q=0 then pq=0
* 2. otherwise, find m and n such that p=x^m and q=x^n
* 3. pq=(x^m)(x^n)=x^(m+n), so add m and n and find pq
* The translations in steps 2 and 3 are looked up in the tables
* poly_to_exp (for step 2) and exp_to_poly (for step 3). To see this
* in action, look at the CALC_S macro. As additional wrinkles, note that
* one of my operands is always a constant, so the poly_to_exp lookup on it
* is done in advance; I included the original values in the comments so
* readers can have some chance of recognizing that this *is* the RS matrix
* from the Twofish paper. I've only included the table entries I actually
* need; I never do a lookup on a variable input of zero and the biggest
* exponents I'll ever see are 254 (variable) and 237 (constant), so they'll
* never sum to more than 491. I'm repeating part of the exp_to_poly table
* so that I don't have to do mod-255 reduction in the exponent arithmetic.
* Since I know my constant operands are never zero, I only have to worry
* about zero values in the variable operand, and I do it with a simple
* conditional branch. I know conditionals are expensive, but I couldn't
* see a non-horrible way of avoiding them, and I did manage to group the
* statements so that each if covers four group multiplications. */
static const u8 poly_to_exp[255] = {
0x00, 0x01, 0x17, 0x02, 0x2E, 0x18, 0x53, 0x03, 0x6A, 0x2F, 0x93, 0x19,
0x34, 0x54, 0x45, 0x04, 0x5C, 0x6B, 0xB6, 0x30, 0xA6, 0x94, 0x4B, 0x1A,
0x8C, 0x35, 0x81, 0x55, 0xAA, 0x46, 0x0D, 0x05, 0x24, 0x5D, 0x87, 0x6C,
0x9B, 0xB7, 0xC1, 0x31, 0x2B, 0xA7, 0xA3, 0x95, 0x98, 0x4C, 0xCA, 0x1B,
0xE6, 0x8D, 0x73, 0x36, 0xCD, 0x82, 0x12, 0x56, 0x62, 0xAB, 0xF0, 0x47,
0x4F, 0x0E, 0xBD, 0x06, 0xD4, 0x25, 0xD2, 0x5E, 0x27, 0x88, 0x66, 0x6D,
0xD6, 0x9C, 0x79, 0xB8, 0x08, 0xC2, 0xDF, 0x32, 0x68, 0x2C, 0xFD, 0xA8,
0x8A, 0xA4, 0x5A, 0x96, 0x29, 0x99, 0x22, 0x4D, 0x60, 0xCB, 0xE4, 0x1C,
0x7B, 0xE7, 0x3B, 0x8E, 0x9E, 0x74, 0xF4, 0x37, 0xD8, 0xCE, 0xF9, 0x83,
0x6F, 0x13, 0xB2, 0x57, 0xE1, 0x63, 0xDC, 0xAC, 0xC4, 0xF1, 0xAF, 0x48,
0x0A, 0x50, 0x42, 0x0F, 0xBA, 0xBE, 0xC7, 0x07, 0xDE, 0xD5, 0x78, 0x26,
0x65, 0xD3, 0xD1, 0x5F, 0xE3, 0x28, 0x21, 0x89, 0x59, 0x67, 0xFC, 0x6E,
0xB1, 0xD7, 0xF8, 0x9D, 0xF3, 0x7A, 0x3A, 0xB9, 0xC6, 0x09, 0x41, 0xC3,
0xAE, 0xE0, 0xDB, 0x33, 0x44, 0x69, 0x92, 0x2D, 0x52, 0xFE, 0x16, 0xA9,
0x0C, 0x8B, 0x80, 0xA5, 0x4A, 0x5B, 0xB5, 0x97, 0xC9, 0x2A, 0xA2, 0x9A,
0xC0, 0x23, 0x86, 0x4E, 0xBC, 0x61, 0xEF, 0xCC, 0x11, 0xE5, 0x72, 0x1D,
0x3D, 0x7C, 0xEB, 0xE8, 0xE9, 0x3C, 0xEA, 0x8F, 0x7D, 0x9F, 0xEC, 0x75,
0x1E, 0xF5, 0x3E, 0x38, 0xF6, 0xD9, 0x3F, 0xCF, 0x76, 0xFA, 0x1F, 0x84,
0xA0, 0x70, 0xED, 0x14, 0x90, 0xB3, 0x7E, 0x58, 0xFB, 0xE2, 0x20, 0x64,
0xD0, 0xDD, 0x77, 0xAD, 0xDA, 0xC5, 0x40, 0xF2, 0x39, 0xB0, 0xF7, 0x49,
0xB4, 0x0B, 0x7F, 0x51, 0x15, 0x43, 0x91, 0x10, 0x71, 0xBB, 0xEE, 0xBF,
0x85, 0xC8, 0xA1
};
static const u8 exp_to_poly[492] = {
0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x4D, 0x9A, 0x79, 0xF2,
0xA9, 0x1F, 0x3E, 0x7C, 0xF8, 0xBD, 0x37, 0x6E, 0xDC, 0xF5, 0xA7, 0x03,
0x06, 0x0C, 0x18, 0x30, 0x60, 0xC0, 0xCD, 0xD7, 0xE3, 0x8B, 0x5B, 0xB6,
0x21, 0x42, 0x84, 0x45, 0x8A, 0x59, 0xB2, 0x29, 0x52, 0xA4, 0x05, 0x0A,
0x14, 0x28, 0x50, 0xA0, 0x0D, 0x1A, 0x34, 0x68, 0xD0, 0xED, 0x97, 0x63,
0xC6, 0xC1, 0xCF, 0xD3, 0xEB, 0x9B, 0x7B, 0xF6, 0xA1, 0x0F, 0x1E, 0x3C,
0x78, 0xF0, 0xAD, 0x17, 0x2E, 0x5C, 0xB8, 0x3D, 0x7A, 0xF4, 0xA5, 0x07,
0x0E, 0x1C, 0x38, 0x70, 0xE0, 0x8D, 0x57, 0xAE, 0x11, 0x22, 0x44, 0x88,
0x5D, 0xBA, 0x39, 0x72, 0xE4, 0x85, 0x47, 0x8E, 0x51, 0xA2, 0x09, 0x12,
0x24, 0x48, 0x90, 0x6D, 0xDA, 0xF9, 0xBF, 0x33, 0x66, 0xCC, 0xD5, 0xE7,
0x83, 0x4B, 0x96, 0x61, 0xC2, 0xC9, 0xDF, 0xF3, 0xAB, 0x1B, 0x36, 0x6C,
0xD8, 0xFD, 0xB7, 0x23, 0x46, 0x8C, 0x55, 0xAA, 0x19, 0x32, 0x64, 0xC8,
0xDD, 0xF7, 0xA3, 0x0B, 0x16, 0x2C, 0x58, 0xB0, 0x2D, 0x5A, 0xB4, 0x25,
0x4A, 0x94, 0x65, 0xCA, 0xD9, 0xFF, 0xB3, 0x2B, 0x56, 0xAC, 0x15, 0x2A,
0x54, 0xA8, 0x1D, 0x3A, 0x74, 0xE8, 0x9D, 0x77, 0xEE, 0x91, 0x6F, 0xDE,
0xF1, 0xAF, 0x13, 0x26, 0x4C, 0x98, 0x7D, 0xFA, 0xB9, 0x3F, 0x7E, 0xFC,
0xB5, 0x27, 0x4E, 0x9C, 0x75, 0xEA, 0x99, 0x7F, 0xFE, 0xB1, 0x2F, 0x5E,
0xBC, 0x35, 0x6A, 0xD4, 0xE5, 0x87, 0x43, 0x86, 0x41, 0x82, 0x49, 0x92,
0x69, 0xD2, 0xE9, 0x9F, 0x73, 0xE6, 0x81, 0x4F, 0x9E, 0x71, 0xE2, 0x89,
0x5F, 0xBE, 0x31, 0x62, 0xC4, 0xC5, 0xC7, 0xC3, 0xCB, 0xDB, 0xFB, 0xBB,
0x3B, 0x76, 0xEC, 0x95, 0x67, 0xCE, 0xD1, 0xEF, 0x93, 0x6B, 0xD6, 0xE1,
0x8F, 0x53, 0xA6, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x4D,
0x9A, 0x79, 0xF2, 0xA9, 0x1F, 0x3E, 0x7C, 0xF8, 0xBD, 0x37, 0x6E, 0xDC,
0xF5, 0xA7, 0x03, 0x06, 0x0C, 0x18, 0x30, 0x60, 0xC0, 0xCD, 0xD7, 0xE3,
0x8B, 0x5B, 0xB6, 0x21, 0x42, 0x84, 0x45, 0x8A, 0x59, 0xB2, 0x29, 0x52,
0xA4, 0x05, 0x0A, 0x14, 0x28, 0x50, 0xA0, 0x0D, 0x1A, 0x34, 0x68, 0xD0,
0xED, 0x97, 0x63, 0xC6, 0xC1, 0xCF, 0xD3, 0xEB, 0x9B, 0x7B, 0xF6, 0xA1,
0x0F, 0x1E, 0x3C, 0x78, 0xF0, 0xAD, 0x17, 0x2E, 0x5C, 0xB8, 0x3D, 0x7A,
0xF4, 0xA5, 0x07, 0x0E, 0x1C, 0x38, 0x70, 0xE0, 0x8D, 0x57, 0xAE, 0x11,
0x22, 0x44, 0x88, 0x5D, 0xBA, 0x39, 0x72, 0xE4, 0x85, 0x47, 0x8E, 0x51,
0xA2, 0x09, 0x12, 0x24, 0x48, 0x90, 0x6D, 0xDA, 0xF9, 0xBF, 0x33, 0x66,
0xCC, 0xD5, 0xE7, 0x83, 0x4B, 0x96, 0x61, 0xC2, 0xC9, 0xDF, 0xF3, 0xAB,
0x1B, 0x36, 0x6C, 0xD8, 0xFD, 0xB7, 0x23, 0x46, 0x8C, 0x55, 0xAA, 0x19,
0x32, 0x64, 0xC8, 0xDD, 0xF7, 0xA3, 0x0B, 0x16, 0x2C, 0x58, 0xB0, 0x2D,
0x5A, 0xB4, 0x25, 0x4A, 0x94, 0x65, 0xCA, 0xD9, 0xFF, 0xB3, 0x2B, 0x56,
0xAC, 0x15, 0x2A, 0x54, 0xA8, 0x1D, 0x3A, 0x74, 0xE8, 0x9D, 0x77, 0xEE,
0x91, 0x6F, 0xDE, 0xF1, 0xAF, 0x13, 0x26, 0x4C, 0x98, 0x7D, 0xFA, 0xB9,
0x3F, 0x7E, 0xFC, 0xB5, 0x27, 0x4E, 0x9C, 0x75, 0xEA, 0x99, 0x7F, 0xFE,
0xB1, 0x2F, 0x5E, 0xBC, 0x35, 0x6A, 0xD4, 0xE5, 0x87, 0x43, 0x86, 0x41,
0x82, 0x49, 0x92, 0x69, 0xD2, 0xE9, 0x9F, 0x73, 0xE6, 0x81, 0x4F, 0x9E,
0x71, 0xE2, 0x89, 0x5F, 0xBE, 0x31, 0x62, 0xC4, 0xC5, 0xC7, 0xC3, 0xCB
};
/* The table constants are indices of
* S-box entries, preprocessed through q0 and q1. */
static const u8 calc_sb_tbl[512] = {
0xA9, 0x75, 0x67, 0xF3, 0xB3, 0xC6, 0xE8, 0xF4,
0x04, 0xDB, 0xFD, 0x7B, 0xA3, 0xFB, 0x76, 0xC8,
0x9A, 0x4A, 0x92, 0xD3, 0x80, 0xE6, 0x78, 0x6B,
0xE4, 0x45, 0xDD, 0x7D, 0xD1, 0xE8, 0x38, 0x4B,
0x0D, 0xD6, 0xC6, 0x32, 0x35, 0xD8, 0x98, 0xFD,
0x18, 0x37, 0xF7, 0x71, 0xEC, 0xF1, 0x6C, 0xE1,
0x43, 0x30, 0x75, 0x0F, 0x37, 0xF8, 0x26, 0x1B,
0xFA, 0x87, 0x13, 0xFA, 0x94, 0x06, 0x48, 0x3F,
0xF2, 0x5E, 0xD0, 0xBA, 0x8B, 0xAE, 0x30, 0x5B,
0x84, 0x8A, 0x54, 0x00, 0xDF, 0xBC, 0x23, 0x9D,
0x19, 0x6D, 0x5B, 0xC1, 0x3D, 0xB1, 0x59, 0x0E,
0xF3, 0x80, 0xAE, 0x5D, 0xA2, 0xD2, 0x82, 0xD5,
0x63, 0xA0, 0x01, 0x84, 0x83, 0x07, 0x2E, 0x14,
0xD9, 0xB5, 0x51, 0x90, 0x9B, 0x2C, 0x7C, 0xA3,
0xA6, 0xB2, 0xEB, 0x73, 0xA5, 0x4C, 0xBE, 0x54,
0x16, 0x92, 0x0C, 0x74, 0xE3, 0x36, 0x61, 0x51,
0xC0, 0x38, 0x8C, 0xB0, 0x3A, 0xBD, 0xF5, 0x5A,
0x73, 0xFC, 0x2C, 0x60, 0x25, 0x62, 0x0B, 0x96,
0xBB, 0x6C, 0x4E, 0x42, 0x89, 0xF7, 0x6B, 0x10,
0x53, 0x7C, 0x6A, 0x28, 0xB4, 0x27, 0xF1, 0x8C,
0xE1, 0x13, 0xE6, 0x95, 0xBD, 0x9C, 0x45, 0xC7,
0xE2, 0x24, 0xF4, 0x46, 0xB6, 0x3B, 0x66, 0x70,
0xCC, 0xCA, 0x95, 0xE3, 0x03, 0x85, 0x56, 0xCB,
0xD4, 0x11, 0x1C, 0xD0, 0x1E, 0x93, 0xD7, 0xB8,
0xFB, 0xA6, 0xC3, 0x83, 0x8E, 0x20, 0xB5, 0xFF,
0xE9, 0x9F, 0xCF, 0x77, 0xBF, 0xC3, 0xBA, 0xCC,
0xEA, 0x03, 0x77, 0x6F, 0x39, 0x08, 0xAF, 0xBF,
0x33, 0x40, 0xC9, 0xE7, 0x62, 0x2B, 0x71, 0xE2,
0x81, 0x79, 0x79, 0x0C, 0x09, 0xAA, 0xAD, 0x82,
0x24, 0x41, 0xCD, 0x3A, 0xF9, 0xEA, 0xD8, 0xB9,
0xE5, 0xE4, 0xC5, 0x9A, 0xB9, 0xA4, 0x4D, 0x97,
0x44, 0x7E, 0x08, 0xDA, 0x86, 0x7A, 0xE7, 0x17,
0xA1, 0x66, 0x1D, 0x94, 0xAA, 0xA1, 0xED, 0x1D,
0x06, 0x3D, 0x70, 0xF0, 0xB2, 0xDE, 0xD2, 0xB3,
0x41, 0x0B, 0x7B, 0x72, 0xA0, 0xA7, 0x11, 0x1C,
0x31, 0xEF, 0xC2, 0xD1, 0x27, 0x53, 0x90, 0x3E,
0x20, 0x8F, 0xF6, 0x33, 0x60, 0x26, 0xFF, 0x5F,
0x96, 0xEC, 0x5C, 0x76, 0xB1, 0x2A, 0xAB, 0x49,
0x9E, 0x81, 0x9C, 0x88, 0x52, 0xEE, 0x1B, 0x21,
0x5F, 0xC4, 0x93, 0x1A, 0x0A, 0xEB, 0xEF, 0xD9,
0x91, 0xC5, 0x85, 0x39, 0x49, 0x99, 0xEE, 0xCD,
0x2D, 0xAD, 0x4F, 0x31, 0x8F, 0x8B, 0x3B, 0x01,
0x47, 0x18, 0x87, 0x23, 0x6D, 0xDD, 0x46, 0x1F,
0xD6, 0x4E, 0x3E, 0x2D, 0x69, 0xF9, 0x64, 0x48,
0x2A, 0x4F, 0xCE, 0xF2, 0xCB, 0x65, 0x2F, 0x8E,
0xFC, 0x78, 0x97, 0x5C, 0x05, 0x58, 0x7A, 0x19,
0xAC, 0x8D, 0x7F, 0xE5, 0xD5, 0x98, 0x1A, 0x57,
0x4B, 0x67, 0x0E, 0x7F, 0xA7, 0x05, 0x5A, 0x64,
0x28, 0xAF, 0x14, 0x63, 0x3F, 0xB6, 0x29, 0xFE,
0x88, 0xF5, 0x3C, 0xB7, 0x4C, 0x3C, 0x02, 0xA5,
0xB8, 0xCE, 0xDA, 0xE9, 0xB0, 0x68, 0x17, 0x44,
0x55, 0xE0, 0x1F, 0x4D, 0x8A, 0x43, 0x7D, 0x69,
0x57, 0x29, 0xC7, 0x2E, 0x8D, 0xAC, 0x74, 0x15,
0xB7, 0x59, 0xC4, 0xA8, 0x9F, 0x0A, 0x72, 0x9E,
0x7E, 0x6E, 0x15, 0x47, 0x22, 0xDF, 0x12, 0x34,
0x58, 0x35, 0x07, 0x6A, 0x99, 0xCF, 0x34, 0xDC,
0x6E, 0x22, 0x50, 0xC9, 0xDE, 0xC0, 0x68, 0x9B,
0x65, 0x89, 0xBC, 0xD4, 0xDB, 0xED, 0xF8, 0xAB,
0xC8, 0x12, 0xA8, 0xA2, 0x2B, 0x0D, 0x40, 0x52,
0xDC, 0xBB, 0xFE, 0x02, 0x32, 0x2F, 0xA4, 0xA9,
0xCA, 0xD7, 0x10, 0x61, 0x21, 0x1E, 0xF0, 0xB4,
0xD3, 0x50, 0x5D, 0x04, 0x0F, 0xF6, 0x00, 0xC2,
0x6F, 0x16, 0x9D, 0x25, 0x36, 0x86, 0x42, 0x56,
0x4A, 0x55, 0x5E, 0x09, 0xC1, 0xBE, 0xE0, 0x91
};
/* Macro to perform one column of the RS matrix multiplication. The
* parameters a, b, c, and d are the four bytes of output; i is the index
* of the key bytes, and w, x, y, and z, are the column of constants from
* the RS matrix, preprocessed through the poly_to_exp table. */
#define CALC_S(a, b, c, d, i, w, x, y, z) \
if (key[i]) { \
tmp = poly_to_exp[key[i] - 1]; \
(a) ^= exp_to_poly[tmp + (w)]; \
(b) ^= exp_to_poly[tmp + (x)]; \
(c) ^= exp_to_poly[tmp + (y)]; \
(d) ^= exp_to_poly[tmp + (z)]; \
}
/* Macros to calculate the key-dependent S-boxes for a 128-bit key using
* the S vector from CALC_S. CALC_SB_2 computes a single entry in all
* four S-boxes, where i is the index of the entry to compute, and a and b
* are the index numbers preprocessed through the q0 and q1 tables
* respectively. */
#define CALC_SB_2(i, a, b) \
ctx->s[0][i] = mds[0][q0[(a) ^ sa] ^ se]; \
ctx->s[1][i] = mds[1][q0[(b) ^ sb] ^ sf]; \
ctx->s[2][i] = mds[2][q1[(a) ^ sc] ^ sg]; \
ctx->s[3][i] = mds[3][q1[(b) ^ sd] ^ sh]
/* Macro exactly like CALC_SB_2, but for 192-bit keys. */
#define CALC_SB192_2(i, a, b) \
ctx->s[0][i] = mds[0][q0[q0[(b) ^ sa] ^ se] ^ si]; \
ctx->s[1][i] = mds[1][q0[q1[(b) ^ sb] ^ sf] ^ sj]; \
ctx->s[2][i] = mds[2][q1[q0[(a) ^ sc] ^ sg] ^ sk]; \
ctx->s[3][i] = mds[3][q1[q1[(a) ^ sd] ^ sh] ^ sl];
/* Macro exactly like CALC_SB_2, but for 256-bit keys. */
#define CALC_SB256_2(i, a, b) \
ctx->s[0][i] = mds[0][q0[q0[q1[(b) ^ sa] ^ se] ^ si] ^ sm]; \
ctx->s[1][i] = mds[1][q0[q1[q1[(a) ^ sb] ^ sf] ^ sj] ^ sn]; \
ctx->s[2][i] = mds[2][q1[q0[q0[(a) ^ sc] ^ sg] ^ sk] ^ so]; \
ctx->s[3][i] = mds[3][q1[q1[q0[(b) ^ sd] ^ sh] ^ sl] ^ sp];
/* Macros to calculate the whitening and round subkeys. CALC_K_2 computes the
* last two stages of the h() function for a given index (either 2i or 2i+1).
* a, b, c, and d are the four bytes going into the last two stages. For
* 128-bit keys, this is the entire h() function and a and c are the index
* preprocessed through q0 and q1 respectively; for longer keys they are the
* output of previous stages. j is the index of the first key byte to use.
* CALC_K computes a pair of subkeys for 128-bit Twofish, by calling CALC_K_2
* twice, doing the Pseudo-Hadamard Transform, and doing the necessary
* rotations. Its parameters are: a, the array to write the results into,
* j, the index of the first output entry, k and l, the preprocessed indices
* for index 2i, and m and n, the preprocessed indices for index 2i+1.
* CALC_K192_2 expands CALC_K_2 to handle 192-bit keys, by doing an
* additional lookup-and-XOR stage. The parameters a, b, c and d are the
* four bytes going into the last three stages. For 192-bit keys, c = d
* are the index preprocessed through q0, and a = b are the index
* preprocessed through q1; j is the index of the first key byte to use.
* CALC_K192 is identical to CALC_K but for using the CALC_K192_2 macro
* instead of CALC_K_2.
* CALC_K256_2 expands CALC_K192_2 to handle 256-bit keys, by doing an
* additional lookup-and-XOR stage. The parameters a and b are the index
* preprocessed through q0 and q1 respectively; j is the index of the first
* key byte to use. CALC_K256 is identical to CALC_K but for using the
* CALC_K256_2 macro instead of CALC_K_2. */
#define CALC_K_2(a, b, c, d, j) \
mds[0][q0[a ^ key[(j) + 8]] ^ key[j]] \
^ mds[1][q0[b ^ key[(j) + 9]] ^ key[(j) + 1]] \
^ mds[2][q1[c ^ key[(j) + 10]] ^ key[(j) + 2]] \
^ mds[3][q1[d ^ key[(j) + 11]] ^ key[(j) + 3]]
#define CALC_K(a, j, k, l, m, n) \
x = CALC_K_2 (k, l, k, l, 0); \
y = CALC_K_2 (m, n, m, n, 4); \
y = rol32(y, 8); \
x += y; y += x; ctx->a[j] = x; \
ctx->a[(j) + 1] = rol32(y, 9)
#define CALC_K192_2(a, b, c, d, j) \
CALC_K_2 (q0[a ^ key[(j) + 16]], \
q1[b ^ key[(j) + 17]], \
q0[c ^ key[(j) + 18]], \
q1[d ^ key[(j) + 19]], j)
#define CALC_K192(a, j, k, l, m, n) \
x = CALC_K192_2 (l, l, k, k, 0); \
y = CALC_K192_2 (n, n, m, m, 4); \
y = rol32(y, 8); \
x += y; y += x; ctx->a[j] = x; \
ctx->a[(j) + 1] = rol32(y, 9)
#define CALC_K256_2(a, b, j) \
CALC_K192_2 (q1[b ^ key[(j) + 24]], \
q1[a ^ key[(j) + 25]], \
q0[a ^ key[(j) + 26]], \
q0[b ^ key[(j) + 27]], j)
#define CALC_K256(a, j, k, l, m, n) \
x = CALC_K256_2 (k, l, 0); \
y = CALC_K256_2 (m, n, 4); \
y = rol32(y, 8); \
x += y; y += x; ctx->a[j] = x; \
ctx->a[(j) + 1] = rol32(y, 9)
/* Macros to compute the g() function in the encryption and decryption
* rounds. G1 is the straight g() function; G2 includes the 8-bit
* rotation for the high 32-bit word. */
@ -630,176 +103,7 @@ static const u8 calc_sb_tbl[512] = {
x ^= ctx->w[m]; \
dst[n] = cpu_to_le32(x)
#define TF_MIN_KEY_SIZE 16
#define TF_MAX_KEY_SIZE 32
#define TF_BLOCK_SIZE 16
/* Structure for an expanded Twofish key. s contains the key-dependent
* S-boxes composed with the MDS matrix; w contains the eight "whitening"
* subkeys, K[0] through K[7]. k holds the remaining, "round" subkeys. Note
* that k[i] corresponds to what the Twofish paper calls K[i+8]. */
struct twofish_ctx {
u32 s[4][256], w[8], k[32];
};
/* Perform the key setup. */
static int twofish_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int key_len, u32 *flags)
{
struct twofish_ctx *ctx = crypto_tfm_ctx(tfm);
int i, j, k;
/* Temporaries for CALC_K. */
u32 x, y;
/* The S vector used to key the S-boxes, split up into individual bytes.
* 128-bit keys use only sa through sh; 256-bit use all of them. */
u8 sa = 0, sb = 0, sc = 0, sd = 0, se = 0, sf = 0, sg = 0, sh = 0;
u8 si = 0, sj = 0, sk = 0, sl = 0, sm = 0, sn = 0, so = 0, sp = 0;
/* Temporary for CALC_S. */
u8 tmp;
/* Check key length. */
if (key_len != 16 && key_len != 24 && key_len != 32)
{
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL; /* unsupported key length */
}
/* Compute the first two words of the S vector. The magic numbers are
* the entries of the RS matrix, preprocessed through poly_to_exp. The
* numbers in the comments are the original (polynomial form) matrix
* entries. */
CALC_S (sa, sb, sc, sd, 0, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */
CALC_S (sa, sb, sc, sd, 1, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */
CALC_S (sa, sb, sc, sd, 2, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */
CALC_S (sa, sb, sc, sd, 3, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */
CALC_S (sa, sb, sc, sd, 4, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */
CALC_S (sa, sb, sc, sd, 5, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */
CALC_S (sa, sb, sc, sd, 6, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */
CALC_S (sa, sb, sc, sd, 7, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */
CALC_S (se, sf, sg, sh, 8, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */
CALC_S (se, sf, sg, sh, 9, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */
CALC_S (se, sf, sg, sh, 10, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */
CALC_S (se, sf, sg, sh, 11, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */
CALC_S (se, sf, sg, sh, 12, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */
CALC_S (se, sf, sg, sh, 13, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */
CALC_S (se, sf, sg, sh, 14, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */
CALC_S (se, sf, sg, sh, 15, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */
if (key_len == 24 || key_len == 32) { /* 192- or 256-bit key */
/* Calculate the third word of the S vector */
CALC_S (si, sj, sk, sl, 16, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */
CALC_S (si, sj, sk, sl, 17, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */
CALC_S (si, sj, sk, sl, 18, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */
CALC_S (si, sj, sk, sl, 19, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */
CALC_S (si, sj, sk, sl, 20, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */
CALC_S (si, sj, sk, sl, 21, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */
CALC_S (si, sj, sk, sl, 22, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */
CALC_S (si, sj, sk, sl, 23, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */
}
if (key_len == 32) { /* 256-bit key */
/* Calculate the fourth word of the S vector */
CALC_S (sm, sn, so, sp, 24, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */
CALC_S (sm, sn, so, sp, 25, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */
CALC_S (sm, sn, so, sp, 26, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */
CALC_S (sm, sn, so, sp, 27, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */
CALC_S (sm, sn, so, sp, 28, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */
CALC_S (sm, sn, so, sp, 29, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */
CALC_S (sm, sn, so, sp, 30, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */
CALC_S (sm, sn, so, sp, 31, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */
/* Compute the S-boxes. */
for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
CALC_SB256_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
}
/* Calculate whitening and round subkeys. The constants are
* indices of subkeys, preprocessed through q0 and q1. */
CALC_K256 (w, 0, 0xA9, 0x75, 0x67, 0xF3);
CALC_K256 (w, 2, 0xB3, 0xC6, 0xE8, 0xF4);
CALC_K256 (w, 4, 0x04, 0xDB, 0xFD, 0x7B);
CALC_K256 (w, 6, 0xA3, 0xFB, 0x76, 0xC8);
CALC_K256 (k, 0, 0x9A, 0x4A, 0x92, 0xD3);
CALC_K256 (k, 2, 0x80, 0xE6, 0x78, 0x6B);
CALC_K256 (k, 4, 0xE4, 0x45, 0xDD, 0x7D);
CALC_K256 (k, 6, 0xD1, 0xE8, 0x38, 0x4B);
CALC_K256 (k, 8, 0x0D, 0xD6, 0xC6, 0x32);
CALC_K256 (k, 10, 0x35, 0xD8, 0x98, 0xFD);
CALC_K256 (k, 12, 0x18, 0x37, 0xF7, 0x71);
CALC_K256 (k, 14, 0xEC, 0xF1, 0x6C, 0xE1);
CALC_K256 (k, 16, 0x43, 0x30, 0x75, 0x0F);
CALC_K256 (k, 18, 0x37, 0xF8, 0x26, 0x1B);
CALC_K256 (k, 20, 0xFA, 0x87, 0x13, 0xFA);
CALC_K256 (k, 22, 0x94, 0x06, 0x48, 0x3F);
CALC_K256 (k, 24, 0xF2, 0x5E, 0xD0, 0xBA);
CALC_K256 (k, 26, 0x8B, 0xAE, 0x30, 0x5B);
CALC_K256 (k, 28, 0x84, 0x8A, 0x54, 0x00);
CALC_K256 (k, 30, 0xDF, 0xBC, 0x23, 0x9D);
} else if (key_len == 24) { /* 192-bit key */
/* Compute the S-boxes. */
for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
CALC_SB192_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
}
/* Calculate whitening and round subkeys. The constants are
* indices of subkeys, preprocessed through q0 and q1. */
CALC_K192 (w, 0, 0xA9, 0x75, 0x67, 0xF3);
CALC_K192 (w, 2, 0xB3, 0xC6, 0xE8, 0xF4);
CALC_K192 (w, 4, 0x04, 0xDB, 0xFD, 0x7B);
CALC_K192 (w, 6, 0xA3, 0xFB, 0x76, 0xC8);
CALC_K192 (k, 0, 0x9A, 0x4A, 0x92, 0xD3);
CALC_K192 (k, 2, 0x80, 0xE6, 0x78, 0x6B);
CALC_K192 (k, 4, 0xE4, 0x45, 0xDD, 0x7D);
CALC_K192 (k, 6, 0xD1, 0xE8, 0x38, 0x4B);
CALC_K192 (k, 8, 0x0D, 0xD6, 0xC6, 0x32);
CALC_K192 (k, 10, 0x35, 0xD8, 0x98, 0xFD);
CALC_K192 (k, 12, 0x18, 0x37, 0xF7, 0x71);
CALC_K192 (k, 14, 0xEC, 0xF1, 0x6C, 0xE1);
CALC_K192 (k, 16, 0x43, 0x30, 0x75, 0x0F);
CALC_K192 (k, 18, 0x37, 0xF8, 0x26, 0x1B);
CALC_K192 (k, 20, 0xFA, 0x87, 0x13, 0xFA);
CALC_K192 (k, 22, 0x94, 0x06, 0x48, 0x3F);
CALC_K192 (k, 24, 0xF2, 0x5E, 0xD0, 0xBA);
CALC_K192 (k, 26, 0x8B, 0xAE, 0x30, 0x5B);
CALC_K192 (k, 28, 0x84, 0x8A, 0x54, 0x00);
CALC_K192 (k, 30, 0xDF, 0xBC, 0x23, 0x9D);
} else { /* 128-bit key */
/* Compute the S-boxes. */
for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
CALC_SB_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
}
/* Calculate whitening and round subkeys. The constants are
* indices of subkeys, preprocessed through q0 and q1. */
CALC_K (w, 0, 0xA9, 0x75, 0x67, 0xF3);
CALC_K (w, 2, 0xB3, 0xC6, 0xE8, 0xF4);
CALC_K (w, 4, 0x04, 0xDB, 0xFD, 0x7B);
CALC_K (w, 6, 0xA3, 0xFB, 0x76, 0xC8);
CALC_K (k, 0, 0x9A, 0x4A, 0x92, 0xD3);
CALC_K (k, 2, 0x80, 0xE6, 0x78, 0x6B);
CALC_K (k, 4, 0xE4, 0x45, 0xDD, 0x7D);
CALC_K (k, 6, 0xD1, 0xE8, 0x38, 0x4B);
CALC_K (k, 8, 0x0D, 0xD6, 0xC6, 0x32);
CALC_K (k, 10, 0x35, 0xD8, 0x98, 0xFD);
CALC_K (k, 12, 0x18, 0x37, 0xF7, 0x71);
CALC_K (k, 14, 0xEC, 0xF1, 0x6C, 0xE1);
CALC_K (k, 16, 0x43, 0x30, 0x75, 0x0F);
CALC_K (k, 18, 0x37, 0xF8, 0x26, 0x1B);
CALC_K (k, 20, 0xFA, 0x87, 0x13, 0xFA);
CALC_K (k, 22, 0x94, 0x06, 0x48, 0x3F);
CALC_K (k, 24, 0xF2, 0x5E, 0xD0, 0xBA);
CALC_K (k, 26, 0x8B, 0xAE, 0x30, 0x5B);
CALC_K (k, 28, 0x84, 0x8A, 0x54, 0x00);
CALC_K (k, 30, 0xDF, 0xBC, 0x23, 0x9D);
}
return 0;
}
/* Encrypt one block. in and out may be the same. */
static void twofish_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
@ -877,6 +181,8 @@ static void twofish_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
static struct crypto_alg alg = {
.cra_name = "twofish",
.cra_driver_name = "twofish-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct twofish_ctx),

744
crypto/twofish_common.c Normal file
View File

@ -0,0 +1,744 @@
/*
* Common Twofish algorithm parts shared between the c and assembler
* implementations
*
* Originally Twofish for GPG
* By Matthew Skala <mskala@ansuz.sooke.bc.ca>, July 26, 1998
* 256-bit key length added March 20, 1999
* Some modifications to reduce the text size by Werner Koch, April, 1998
* Ported to the kerneli patch by Marc Mutz <Marc@Mutz.com>
* Ported to CryptoAPI by Colin Slater <hoho@tacomeat.net>
*
* The original author has disclaimed all copyright interest in this
* code and thus put it in the public domain. The subsequent authors
* have put this under the GNU General Public License.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
* This code is a "clean room" implementation, written from the paper
* _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
* Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available
* through http://www.counterpane.com/twofish.html
*
* For background information on multiplication in finite fields, used for
* the matrix operations in the key schedule, see the book _Contemporary
* Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the
* Third Edition.
*/
#include <crypto/twofish.h>
#include <linux/bitops.h>
#include <linux/crypto.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
/* The large precomputed tables for the Twofish cipher (twofish.c)
* Taken from the same source as twofish.c
* Marc Mutz <Marc@Mutz.com>
*/
/* These two tables are the q0 and q1 permutations, exactly as described in
* the Twofish paper. */
static const u8 q0[256] = {
0xA9, 0x67, 0xB3, 0xE8, 0x04, 0xFD, 0xA3, 0x76, 0x9A, 0x92, 0x80, 0x78,
0xE4, 0xDD, 0xD1, 0x38, 0x0D, 0xC6, 0x35, 0x98, 0x18, 0xF7, 0xEC, 0x6C,
0x43, 0x75, 0x37, 0x26, 0xFA, 0x13, 0x94, 0x48, 0xF2, 0xD0, 0x8B, 0x30,
0x84, 0x54, 0xDF, 0x23, 0x19, 0x5B, 0x3D, 0x59, 0xF3, 0xAE, 0xA2, 0x82,
0x63, 0x01, 0x83, 0x2E, 0xD9, 0x51, 0x9B, 0x7C, 0xA6, 0xEB, 0xA5, 0xBE,
0x16, 0x0C, 0xE3, 0x61, 0xC0, 0x8C, 0x3A, 0xF5, 0x73, 0x2C, 0x25, 0x0B,
0xBB, 0x4E, 0x89, 0x6B, 0x53, 0x6A, 0xB4, 0xF1, 0xE1, 0xE6, 0xBD, 0x45,
0xE2, 0xF4, 0xB6, 0x66, 0xCC, 0x95, 0x03, 0x56, 0xD4, 0x1C, 0x1E, 0xD7,
0xFB, 0xC3, 0x8E, 0xB5, 0xE9, 0xCF, 0xBF, 0xBA, 0xEA, 0x77, 0x39, 0xAF,
0x33, 0xC9, 0x62, 0x71, 0x81, 0x79, 0x09, 0xAD, 0x24, 0xCD, 0xF9, 0xD8,
0xE5, 0xC5, 0xB9, 0x4D, 0x44, 0x08, 0x86, 0xE7, 0xA1, 0x1D, 0xAA, 0xED,
0x06, 0x70, 0xB2, 0xD2, 0x41, 0x7B, 0xA0, 0x11, 0x31, 0xC2, 0x27, 0x90,
0x20, 0xF6, 0x60, 0xFF, 0x96, 0x5C, 0xB1, 0xAB, 0x9E, 0x9C, 0x52, 0x1B,
0x5F, 0x93, 0x0A, 0xEF, 0x91, 0x85, 0x49, 0xEE, 0x2D, 0x4F, 0x8F, 0x3B,
0x47, 0x87, 0x6D, 0x46, 0xD6, 0x3E, 0x69, 0x64, 0x2A, 0xCE, 0xCB, 0x2F,
0xFC, 0x97, 0x05, 0x7A, 0xAC, 0x7F, 0xD5, 0x1A, 0x4B, 0x0E, 0xA7, 0x5A,
0x28, 0x14, 0x3F, 0x29, 0x88, 0x3C, 0x4C, 0x02, 0xB8, 0xDA, 0xB0, 0x17,
0x55, 0x1F, 0x8A, 0x7D, 0x57, 0xC7, 0x8D, 0x74, 0xB7, 0xC4, 0x9F, 0x72,
0x7E, 0x15, 0x22, 0x12, 0x58, 0x07, 0x99, 0x34, 0x6E, 0x50, 0xDE, 0x68,
0x65, 0xBC, 0xDB, 0xF8, 0xC8, 0xA8, 0x2B, 0x40, 0xDC, 0xFE, 0x32, 0xA4,
0xCA, 0x10, 0x21, 0xF0, 0xD3, 0x5D, 0x0F, 0x00, 0x6F, 0x9D, 0x36, 0x42,
0x4A, 0x5E, 0xC1, 0xE0
};
static const u8 q1[256] = {
0x75, 0xF3, 0xC6, 0xF4, 0xDB, 0x7B, 0xFB, 0xC8, 0x4A, 0xD3, 0xE6, 0x6B,
0x45, 0x7D, 0xE8, 0x4B, 0xD6, 0x32, 0xD8, 0xFD, 0x37, 0x71, 0xF1, 0xE1,
0x30, 0x0F, 0xF8, 0x1B, 0x87, 0xFA, 0x06, 0x3F, 0x5E, 0xBA, 0xAE, 0x5B,
0x8A, 0x00, 0xBC, 0x9D, 0x6D, 0xC1, 0xB1, 0x0E, 0x80, 0x5D, 0xD2, 0xD5,
0xA0, 0x84, 0x07, 0x14, 0xB5, 0x90, 0x2C, 0xA3, 0xB2, 0x73, 0x4C, 0x54,
0x92, 0x74, 0x36, 0x51, 0x38, 0xB0, 0xBD, 0x5A, 0xFC, 0x60, 0x62, 0x96,
0x6C, 0x42, 0xF7, 0x10, 0x7C, 0x28, 0x27, 0x8C, 0x13, 0x95, 0x9C, 0xC7,
0x24, 0x46, 0x3B, 0x70, 0xCA, 0xE3, 0x85, 0xCB, 0x11, 0xD0, 0x93, 0xB8,
0xA6, 0x83, 0x20, 0xFF, 0x9F, 0x77, 0xC3, 0xCC, 0x03, 0x6F, 0x08, 0xBF,
0x40, 0xE7, 0x2B, 0xE2, 0x79, 0x0C, 0xAA, 0x82, 0x41, 0x3A, 0xEA, 0xB9,
0xE4, 0x9A, 0xA4, 0x97, 0x7E, 0xDA, 0x7A, 0x17, 0x66, 0x94, 0xA1, 0x1D,
0x3D, 0xF0, 0xDE, 0xB3, 0x0B, 0x72, 0xA7, 0x1C, 0xEF, 0xD1, 0x53, 0x3E,
0x8F, 0x33, 0x26, 0x5F, 0xEC, 0x76, 0x2A, 0x49, 0x81, 0x88, 0xEE, 0x21,
0xC4, 0x1A, 0xEB, 0xD9, 0xC5, 0x39, 0x99, 0xCD, 0xAD, 0x31, 0x8B, 0x01,
0x18, 0x23, 0xDD, 0x1F, 0x4E, 0x2D, 0xF9, 0x48, 0x4F, 0xF2, 0x65, 0x8E,
0x78, 0x5C, 0x58, 0x19, 0x8D, 0xE5, 0x98, 0x57, 0x67, 0x7F, 0x05, 0x64,
0xAF, 0x63, 0xB6, 0xFE, 0xF5, 0xB7, 0x3C, 0xA5, 0xCE, 0xE9, 0x68, 0x44,
0xE0, 0x4D, 0x43, 0x69, 0x29, 0x2E, 0xAC, 0x15, 0x59, 0xA8, 0x0A, 0x9E,
0x6E, 0x47, 0xDF, 0x34, 0x35, 0x6A, 0xCF, 0xDC, 0x22, 0xC9, 0xC0, 0x9B,
0x89, 0xD4, 0xED, 0xAB, 0x12, 0xA2, 0x0D, 0x52, 0xBB, 0x02, 0x2F, 0xA9,
0xD7, 0x61, 0x1E, 0xB4, 0x50, 0x04, 0xF6, 0xC2, 0x16, 0x25, 0x86, 0x56,
0x55, 0x09, 0xBE, 0x91
};
/* These MDS tables are actually tables of MDS composed with q0 and q1,
* because it is only ever used that way and we can save some time by
* precomputing. Of course the main saving comes from precomputing the
* GF(2^8) multiplication involved in the MDS matrix multiply; by looking
* things up in these tables we reduce the matrix multiply to four lookups
* and three XORs. Semi-formally, the definition of these tables is:
* mds[0][i] = MDS (q1[i] 0 0 0)^T mds[1][i] = MDS (0 q0[i] 0 0)^T
* mds[2][i] = MDS (0 0 q1[i] 0)^T mds[3][i] = MDS (0 0 0 q0[i])^T
* where ^T means "transpose", the matrix multiply is performed in GF(2^8)
* represented as GF(2)[x]/v(x) where v(x)=x^8+x^6+x^5+x^3+1 as described
* by Schneier et al, and I'm casually glossing over the byte/word
* conversion issues. */
static const u32 mds[4][256] = {
{
0xBCBC3275, 0xECEC21F3, 0x202043C6, 0xB3B3C9F4, 0xDADA03DB, 0x02028B7B,
0xE2E22BFB, 0x9E9EFAC8, 0xC9C9EC4A, 0xD4D409D3, 0x18186BE6, 0x1E1E9F6B,
0x98980E45, 0xB2B2387D, 0xA6A6D2E8, 0x2626B74B, 0x3C3C57D6, 0x93938A32,
0x8282EED8, 0x525298FD, 0x7B7BD437, 0xBBBB3771, 0x5B5B97F1, 0x474783E1,
0x24243C30, 0x5151E20F, 0xBABAC6F8, 0x4A4AF31B, 0xBFBF4887, 0x0D0D70FA,
0xB0B0B306, 0x7575DE3F, 0xD2D2FD5E, 0x7D7D20BA, 0x666631AE, 0x3A3AA35B,
0x59591C8A, 0x00000000, 0xCDCD93BC, 0x1A1AE09D, 0xAEAE2C6D, 0x7F7FABC1,
0x2B2BC7B1, 0xBEBEB90E, 0xE0E0A080, 0x8A8A105D, 0x3B3B52D2, 0x6464BAD5,
0xD8D888A0, 0xE7E7A584, 0x5F5FE807, 0x1B1B1114, 0x2C2CC2B5, 0xFCFCB490,
0x3131272C, 0x808065A3, 0x73732AB2, 0x0C0C8173, 0x79795F4C, 0x6B6B4154,
0x4B4B0292, 0x53536974, 0x94948F36, 0x83831F51, 0x2A2A3638, 0xC4C49CB0,
0x2222C8BD, 0xD5D5F85A, 0xBDBDC3FC, 0x48487860, 0xFFFFCE62, 0x4C4C0796,
0x4141776C, 0xC7C7E642, 0xEBEB24F7, 0x1C1C1410, 0x5D5D637C, 0x36362228,
0x6767C027, 0xE9E9AF8C, 0x4444F913, 0x1414EA95, 0xF5F5BB9C, 0xCFCF18C7,
0x3F3F2D24, 0xC0C0E346, 0x7272DB3B, 0x54546C70, 0x29294CCA, 0xF0F035E3,
0x0808FE85, 0xC6C617CB, 0xF3F34F11, 0x8C8CE4D0, 0xA4A45993, 0xCACA96B8,
0x68683BA6, 0xB8B84D83, 0x38382820, 0xE5E52EFF, 0xADAD569F, 0x0B0B8477,
0xC8C81DC3, 0x9999FFCC, 0x5858ED03, 0x19199A6F, 0x0E0E0A08, 0x95957EBF,
0x70705040, 0xF7F730E7, 0x6E6ECF2B, 0x1F1F6EE2, 0xB5B53D79, 0x09090F0C,
0x616134AA, 0x57571682, 0x9F9F0B41, 0x9D9D803A, 0x111164EA, 0x2525CDB9,
0xAFAFDDE4, 0x4545089A, 0xDFDF8DA4, 0xA3A35C97, 0xEAEAD57E, 0x353558DA,
0xEDEDD07A, 0x4343FC17, 0xF8F8CB66, 0xFBFBB194, 0x3737D3A1, 0xFAFA401D,
0xC2C2683D, 0xB4B4CCF0, 0x32325DDE, 0x9C9C71B3, 0x5656E70B, 0xE3E3DA72,
0x878760A7, 0x15151B1C, 0xF9F93AEF, 0x6363BFD1, 0x3434A953, 0x9A9A853E,
0xB1B1428F, 0x7C7CD133, 0x88889B26, 0x3D3DA65F, 0xA1A1D7EC, 0xE4E4DF76,
0x8181942A, 0x91910149, 0x0F0FFB81, 0xEEEEAA88, 0x161661EE, 0xD7D77321,
0x9797F5C4, 0xA5A5A81A, 0xFEFE3FEB, 0x6D6DB5D9, 0x7878AEC5, 0xC5C56D39,
0x1D1DE599, 0x7676A4CD, 0x3E3EDCAD, 0xCBCB6731, 0xB6B6478B, 0xEFEF5B01,
0x12121E18, 0x6060C523, 0x6A6AB0DD, 0x4D4DF61F, 0xCECEE94E, 0xDEDE7C2D,
0x55559DF9, 0x7E7E5A48, 0x2121B24F, 0x03037AF2, 0xA0A02665, 0x5E5E198E,
0x5A5A6678, 0x65654B5C, 0x62624E58, 0xFDFD4519, 0x0606F48D, 0x404086E5,
0xF2F2BE98, 0x3333AC57, 0x17179067, 0x05058E7F, 0xE8E85E05, 0x4F4F7D64,
0x89896AAF, 0x10109563, 0x74742FB6, 0x0A0A75FE, 0x5C5C92F5, 0x9B9B74B7,
0x2D2D333C, 0x3030D6A5, 0x2E2E49CE, 0x494989E9, 0x46467268, 0x77775544,
0xA8A8D8E0, 0x9696044D, 0x2828BD43, 0xA9A92969, 0xD9D97929, 0x8686912E,
0xD1D187AC, 0xF4F44A15, 0x8D8D1559, 0xD6D682A8, 0xB9B9BC0A, 0x42420D9E,
0xF6F6C16E, 0x2F2FB847, 0xDDDD06DF, 0x23233934, 0xCCCC6235, 0xF1F1C46A,
0xC1C112CF, 0x8585EBDC, 0x8F8F9E22, 0x7171A1C9, 0x9090F0C0, 0xAAAA539B,
0x0101F189, 0x8B8BE1D4, 0x4E4E8CED, 0x8E8E6FAB, 0xABABA212, 0x6F6F3EA2,
0xE6E6540D, 0xDBDBF252, 0x92927BBB, 0xB7B7B602, 0x6969CA2F, 0x3939D9A9,
0xD3D30CD7, 0xA7A72361, 0xA2A2AD1E, 0xC3C399B4, 0x6C6C4450, 0x07070504,
0x04047FF6, 0x272746C2, 0xACACA716, 0xD0D07625, 0x50501386, 0xDCDCF756,
0x84841A55, 0xE1E15109, 0x7A7A25BE, 0x1313EF91},
{
0xA9D93939, 0x67901717, 0xB3719C9C, 0xE8D2A6A6, 0x04050707, 0xFD985252,
0xA3658080, 0x76DFE4E4, 0x9A084545, 0x92024B4B, 0x80A0E0E0, 0x78665A5A,
0xE4DDAFAF, 0xDDB06A6A, 0xD1BF6363, 0x38362A2A, 0x0D54E6E6, 0xC6432020,
0x3562CCCC, 0x98BEF2F2, 0x181E1212, 0xF724EBEB, 0xECD7A1A1, 0x6C774141,
0x43BD2828, 0x7532BCBC, 0x37D47B7B, 0x269B8888, 0xFA700D0D, 0x13F94444,
0x94B1FBFB, 0x485A7E7E, 0xF27A0303, 0xD0E48C8C, 0x8B47B6B6, 0x303C2424,
0x84A5E7E7, 0x54416B6B, 0xDF06DDDD, 0x23C56060, 0x1945FDFD, 0x5BA33A3A,
0x3D68C2C2, 0x59158D8D, 0xF321ECEC, 0xAE316666, 0xA23E6F6F, 0x82165757,
0x63951010, 0x015BEFEF, 0x834DB8B8, 0x2E918686, 0xD9B56D6D, 0x511F8383,
0x9B53AAAA, 0x7C635D5D, 0xA63B6868, 0xEB3FFEFE, 0xA5D63030, 0xBE257A7A,
0x16A7ACAC, 0x0C0F0909, 0xE335F0F0, 0x6123A7A7, 0xC0F09090, 0x8CAFE9E9,
0x3A809D9D, 0xF5925C5C, 0x73810C0C, 0x2C273131, 0x2576D0D0, 0x0BE75656,
0xBB7B9292, 0x4EE9CECE, 0x89F10101, 0x6B9F1E1E, 0x53A93434, 0x6AC4F1F1,
0xB499C3C3, 0xF1975B5B, 0xE1834747, 0xE66B1818, 0xBDC82222, 0x450E9898,
0xE26E1F1F, 0xF4C9B3B3, 0xB62F7474, 0x66CBF8F8, 0xCCFF9999, 0x95EA1414,
0x03ED5858, 0x56F7DCDC, 0xD4E18B8B, 0x1C1B1515, 0x1EADA2A2, 0xD70CD3D3,
0xFB2BE2E2, 0xC31DC8C8, 0x8E195E5E, 0xB5C22C2C, 0xE9894949, 0xCF12C1C1,
0xBF7E9595, 0xBA207D7D, 0xEA641111, 0x77840B0B, 0x396DC5C5, 0xAF6A8989,
0x33D17C7C, 0xC9A17171, 0x62CEFFFF, 0x7137BBBB, 0x81FB0F0F, 0x793DB5B5,
0x0951E1E1, 0xADDC3E3E, 0x242D3F3F, 0xCDA47676, 0xF99D5555, 0xD8EE8282,
0xE5864040, 0xC5AE7878, 0xB9CD2525, 0x4D049696, 0x44557777, 0x080A0E0E,
0x86135050, 0xE730F7F7, 0xA1D33737, 0x1D40FAFA, 0xAA346161, 0xED8C4E4E,
0x06B3B0B0, 0x706C5454, 0xB22A7373, 0xD2523B3B, 0x410B9F9F, 0x7B8B0202,
0xA088D8D8, 0x114FF3F3, 0x3167CBCB, 0xC2462727, 0x27C06767, 0x90B4FCFC,
0x20283838, 0xF67F0404, 0x60784848, 0xFF2EE5E5, 0x96074C4C, 0x5C4B6565,
0xB1C72B2B, 0xAB6F8E8E, 0x9E0D4242, 0x9CBBF5F5, 0x52F2DBDB, 0x1BF34A4A,
0x5FA63D3D, 0x9359A4A4, 0x0ABCB9B9, 0xEF3AF9F9, 0x91EF1313, 0x85FE0808,
0x49019191, 0xEE611616, 0x2D7CDEDE, 0x4FB22121, 0x8F42B1B1, 0x3BDB7272,
0x47B82F2F, 0x8748BFBF, 0x6D2CAEAE, 0x46E3C0C0, 0xD6573C3C, 0x3E859A9A,
0x6929A9A9, 0x647D4F4F, 0x2A948181, 0xCE492E2E, 0xCB17C6C6, 0x2FCA6969,
0xFCC3BDBD, 0x975CA3A3, 0x055EE8E8, 0x7AD0EDED, 0xAC87D1D1, 0x7F8E0505,
0xD5BA6464, 0x1AA8A5A5, 0x4BB72626, 0x0EB9BEBE, 0xA7608787, 0x5AF8D5D5,
0x28223636, 0x14111B1B, 0x3FDE7575, 0x2979D9D9, 0x88AAEEEE, 0x3C332D2D,
0x4C5F7979, 0x02B6B7B7, 0xB896CACA, 0xDA583535, 0xB09CC4C4, 0x17FC4343,
0x551A8484, 0x1FF64D4D, 0x8A1C5959, 0x7D38B2B2, 0x57AC3333, 0xC718CFCF,
0x8DF40606, 0x74695353, 0xB7749B9B, 0xC4F59797, 0x9F56ADAD, 0x72DAE3E3,
0x7ED5EAEA, 0x154AF4F4, 0x229E8F8F, 0x12A2ABAB, 0x584E6262, 0x07E85F5F,
0x99E51D1D, 0x34392323, 0x6EC1F6F6, 0x50446C6C, 0xDE5D3232, 0x68724646,
0x6526A0A0, 0xBC93CDCD, 0xDB03DADA, 0xF8C6BABA, 0xC8FA9E9E, 0xA882D6D6,
0x2BCF6E6E, 0x40507070, 0xDCEB8585, 0xFE750A0A, 0x328A9393, 0xA48DDFDF,
0xCA4C2929, 0x10141C1C, 0x2173D7D7, 0xF0CCB4B4, 0xD309D4D4, 0x5D108A8A,
0x0FE25151, 0x00000000, 0x6F9A1919, 0x9DE01A1A, 0x368F9494, 0x42E6C7C7,
0x4AECC9C9, 0x5EFDD2D2, 0xC1AB7F7F, 0xE0D8A8A8},
{
0xBC75BC32, 0xECF3EC21, 0x20C62043, 0xB3F4B3C9, 0xDADBDA03, 0x027B028B,
0xE2FBE22B, 0x9EC89EFA, 0xC94AC9EC, 0xD4D3D409, 0x18E6186B, 0x1E6B1E9F,
0x9845980E, 0xB27DB238, 0xA6E8A6D2, 0x264B26B7, 0x3CD63C57, 0x9332938A,
0x82D882EE, 0x52FD5298, 0x7B377BD4, 0xBB71BB37, 0x5BF15B97, 0x47E14783,
0x2430243C, 0x510F51E2, 0xBAF8BAC6, 0x4A1B4AF3, 0xBF87BF48, 0x0DFA0D70,
0xB006B0B3, 0x753F75DE, 0xD25ED2FD, 0x7DBA7D20, 0x66AE6631, 0x3A5B3AA3,
0x598A591C, 0x00000000, 0xCDBCCD93, 0x1A9D1AE0, 0xAE6DAE2C, 0x7FC17FAB,
0x2BB12BC7, 0xBE0EBEB9, 0xE080E0A0, 0x8A5D8A10, 0x3BD23B52, 0x64D564BA,
0xD8A0D888, 0xE784E7A5, 0x5F075FE8, 0x1B141B11, 0x2CB52CC2, 0xFC90FCB4,
0x312C3127, 0x80A38065, 0x73B2732A, 0x0C730C81, 0x794C795F, 0x6B546B41,
0x4B924B02, 0x53745369, 0x9436948F, 0x8351831F, 0x2A382A36, 0xC4B0C49C,
0x22BD22C8, 0xD55AD5F8, 0xBDFCBDC3, 0x48604878, 0xFF62FFCE, 0x4C964C07,
0x416C4177, 0xC742C7E6, 0xEBF7EB24, 0x1C101C14, 0x5D7C5D63, 0x36283622,
0x672767C0, 0xE98CE9AF, 0x441344F9, 0x149514EA, 0xF59CF5BB, 0xCFC7CF18,
0x3F243F2D, 0xC046C0E3, 0x723B72DB, 0x5470546C, 0x29CA294C, 0xF0E3F035,
0x088508FE, 0xC6CBC617, 0xF311F34F, 0x8CD08CE4, 0xA493A459, 0xCAB8CA96,
0x68A6683B, 0xB883B84D, 0x38203828, 0xE5FFE52E, 0xAD9FAD56, 0x0B770B84,
0xC8C3C81D, 0x99CC99FF, 0x580358ED, 0x196F199A, 0x0E080E0A, 0x95BF957E,
0x70407050, 0xF7E7F730, 0x6E2B6ECF, 0x1FE21F6E, 0xB579B53D, 0x090C090F,
0x61AA6134, 0x57825716, 0x9F419F0B, 0x9D3A9D80, 0x11EA1164, 0x25B925CD,
0xAFE4AFDD, 0x459A4508, 0xDFA4DF8D, 0xA397A35C, 0xEA7EEAD5, 0x35DA3558,
0xED7AEDD0, 0x431743FC, 0xF866F8CB, 0xFB94FBB1, 0x37A137D3, 0xFA1DFA40,
0xC23DC268, 0xB4F0B4CC, 0x32DE325D, 0x9CB39C71, 0x560B56E7, 0xE372E3DA,
0x87A78760, 0x151C151B, 0xF9EFF93A, 0x63D163BF, 0x345334A9, 0x9A3E9A85,
0xB18FB142, 0x7C337CD1, 0x8826889B, 0x3D5F3DA6, 0xA1ECA1D7, 0xE476E4DF,
0x812A8194, 0x91499101, 0x0F810FFB, 0xEE88EEAA, 0x16EE1661, 0xD721D773,
0x97C497F5, 0xA51AA5A8, 0xFEEBFE3F, 0x6DD96DB5, 0x78C578AE, 0xC539C56D,
0x1D991DE5, 0x76CD76A4, 0x3EAD3EDC, 0xCB31CB67, 0xB68BB647, 0xEF01EF5B,
0x1218121E, 0x602360C5, 0x6ADD6AB0, 0x4D1F4DF6, 0xCE4ECEE9, 0xDE2DDE7C,
0x55F9559D, 0x7E487E5A, 0x214F21B2, 0x03F2037A, 0xA065A026, 0x5E8E5E19,
0x5A785A66, 0x655C654B, 0x6258624E, 0xFD19FD45, 0x068D06F4, 0x40E54086,
0xF298F2BE, 0x335733AC, 0x17671790, 0x057F058E, 0xE805E85E, 0x4F644F7D,
0x89AF896A, 0x10631095, 0x74B6742F, 0x0AFE0A75, 0x5CF55C92, 0x9BB79B74,
0x2D3C2D33, 0x30A530D6, 0x2ECE2E49, 0x49E94989, 0x46684672, 0x77447755,
0xA8E0A8D8, 0x964D9604, 0x284328BD, 0xA969A929, 0xD929D979, 0x862E8691,
0xD1ACD187, 0xF415F44A, 0x8D598D15, 0xD6A8D682, 0xB90AB9BC, 0x429E420D,
0xF66EF6C1, 0x2F472FB8, 0xDDDFDD06, 0x23342339, 0xCC35CC62, 0xF16AF1C4,
0xC1CFC112, 0x85DC85EB, 0x8F228F9E, 0x71C971A1, 0x90C090F0, 0xAA9BAA53,
0x018901F1, 0x8BD48BE1, 0x4EED4E8C, 0x8EAB8E6F, 0xAB12ABA2, 0x6FA26F3E,
0xE60DE654, 0xDB52DBF2, 0x92BB927B, 0xB702B7B6, 0x692F69CA, 0x39A939D9,
0xD3D7D30C, 0xA761A723, 0xA21EA2AD, 0xC3B4C399, 0x6C506C44, 0x07040705,
0x04F6047F, 0x27C22746, 0xAC16ACA7, 0xD025D076, 0x50865013, 0xDC56DCF7,
0x8455841A, 0xE109E151, 0x7ABE7A25, 0x139113EF},
{
0xD939A9D9, 0x90176790, 0x719CB371, 0xD2A6E8D2, 0x05070405, 0x9852FD98,
0x6580A365, 0xDFE476DF, 0x08459A08, 0x024B9202, 0xA0E080A0, 0x665A7866,
0xDDAFE4DD, 0xB06ADDB0, 0xBF63D1BF, 0x362A3836, 0x54E60D54, 0x4320C643,
0x62CC3562, 0xBEF298BE, 0x1E12181E, 0x24EBF724, 0xD7A1ECD7, 0x77416C77,
0xBD2843BD, 0x32BC7532, 0xD47B37D4, 0x9B88269B, 0x700DFA70, 0xF94413F9,
0xB1FB94B1, 0x5A7E485A, 0x7A03F27A, 0xE48CD0E4, 0x47B68B47, 0x3C24303C,
0xA5E784A5, 0x416B5441, 0x06DDDF06, 0xC56023C5, 0x45FD1945, 0xA33A5BA3,
0x68C23D68, 0x158D5915, 0x21ECF321, 0x3166AE31, 0x3E6FA23E, 0x16578216,
0x95106395, 0x5BEF015B, 0x4DB8834D, 0x91862E91, 0xB56DD9B5, 0x1F83511F,
0x53AA9B53, 0x635D7C63, 0x3B68A63B, 0x3FFEEB3F, 0xD630A5D6, 0x257ABE25,
0xA7AC16A7, 0x0F090C0F, 0x35F0E335, 0x23A76123, 0xF090C0F0, 0xAFE98CAF,
0x809D3A80, 0x925CF592, 0x810C7381, 0x27312C27, 0x76D02576, 0xE7560BE7,
0x7B92BB7B, 0xE9CE4EE9, 0xF10189F1, 0x9F1E6B9F, 0xA93453A9, 0xC4F16AC4,
0x99C3B499, 0x975BF197, 0x8347E183, 0x6B18E66B, 0xC822BDC8, 0x0E98450E,
0x6E1FE26E, 0xC9B3F4C9, 0x2F74B62F, 0xCBF866CB, 0xFF99CCFF, 0xEA1495EA,
0xED5803ED, 0xF7DC56F7, 0xE18BD4E1, 0x1B151C1B, 0xADA21EAD, 0x0CD3D70C,
0x2BE2FB2B, 0x1DC8C31D, 0x195E8E19, 0xC22CB5C2, 0x8949E989, 0x12C1CF12,
0x7E95BF7E, 0x207DBA20, 0x6411EA64, 0x840B7784, 0x6DC5396D, 0x6A89AF6A,
0xD17C33D1, 0xA171C9A1, 0xCEFF62CE, 0x37BB7137, 0xFB0F81FB, 0x3DB5793D,
0x51E10951, 0xDC3EADDC, 0x2D3F242D, 0xA476CDA4, 0x9D55F99D, 0xEE82D8EE,
0x8640E586, 0xAE78C5AE, 0xCD25B9CD, 0x04964D04, 0x55774455, 0x0A0E080A,
0x13508613, 0x30F7E730, 0xD337A1D3, 0x40FA1D40, 0x3461AA34, 0x8C4EED8C,
0xB3B006B3, 0x6C54706C, 0x2A73B22A, 0x523BD252, 0x0B9F410B, 0x8B027B8B,
0x88D8A088, 0x4FF3114F, 0x67CB3167, 0x4627C246, 0xC06727C0, 0xB4FC90B4,
0x28382028, 0x7F04F67F, 0x78486078, 0x2EE5FF2E, 0x074C9607, 0x4B655C4B,
0xC72BB1C7, 0x6F8EAB6F, 0x0D429E0D, 0xBBF59CBB, 0xF2DB52F2, 0xF34A1BF3,
0xA63D5FA6, 0x59A49359, 0xBCB90ABC, 0x3AF9EF3A, 0xEF1391EF, 0xFE0885FE,
0x01914901, 0x6116EE61, 0x7CDE2D7C, 0xB2214FB2, 0x42B18F42, 0xDB723BDB,
0xB82F47B8, 0x48BF8748, 0x2CAE6D2C, 0xE3C046E3, 0x573CD657, 0x859A3E85,
0x29A96929, 0x7D4F647D, 0x94812A94, 0x492ECE49, 0x17C6CB17, 0xCA692FCA,
0xC3BDFCC3, 0x5CA3975C, 0x5EE8055E, 0xD0ED7AD0, 0x87D1AC87, 0x8E057F8E,
0xBA64D5BA, 0xA8A51AA8, 0xB7264BB7, 0xB9BE0EB9, 0x6087A760, 0xF8D55AF8,
0x22362822, 0x111B1411, 0xDE753FDE, 0x79D92979, 0xAAEE88AA, 0x332D3C33,
0x5F794C5F, 0xB6B702B6, 0x96CAB896, 0x5835DA58, 0x9CC4B09C, 0xFC4317FC,
0x1A84551A, 0xF64D1FF6, 0x1C598A1C, 0x38B27D38, 0xAC3357AC, 0x18CFC718,
0xF4068DF4, 0x69537469, 0x749BB774, 0xF597C4F5, 0x56AD9F56, 0xDAE372DA,
0xD5EA7ED5, 0x4AF4154A, 0x9E8F229E, 0xA2AB12A2, 0x4E62584E, 0xE85F07E8,
0xE51D99E5, 0x39233439, 0xC1F66EC1, 0x446C5044, 0x5D32DE5D, 0x72466872,
0x26A06526, 0x93CDBC93, 0x03DADB03, 0xC6BAF8C6, 0xFA9EC8FA, 0x82D6A882,
0xCF6E2BCF, 0x50704050, 0xEB85DCEB, 0x750AFE75, 0x8A93328A, 0x8DDFA48D,
0x4C29CA4C, 0x141C1014, 0x73D72173, 0xCCB4F0CC, 0x09D4D309, 0x108A5D10,
0xE2510FE2, 0x00000000, 0x9A196F9A, 0xE01A9DE0, 0x8F94368F, 0xE6C742E6,
0xECC94AEC, 0xFDD25EFD, 0xAB7FC1AB, 0xD8A8E0D8}
};
/* The exp_to_poly and poly_to_exp tables are used to perform efficient
* operations in GF(2^8) represented as GF(2)[x]/w(x) where
* w(x)=x^8+x^6+x^3+x^2+1. We care about doing that because it's part of the
* definition of the RS matrix in the key schedule. Elements of that field
* are polynomials of degree not greater than 7 and all coefficients 0 or 1,
* which can be represented naturally by bytes (just substitute x=2). In that
* form, GF(2^8) addition is the same as bitwise XOR, but GF(2^8)
* multiplication is inefficient without hardware support. To multiply
* faster, I make use of the fact x is a generator for the nonzero elements,
* so that every element p of GF(2)[x]/w(x) is either 0 or equal to (x)^n for
* some n in 0..254. Note that that caret is exponentiation in GF(2^8),
* *not* polynomial notation. So if I want to compute pq where p and q are
* in GF(2^8), I can just say:
* 1. if p=0 or q=0 then pq=0
* 2. otherwise, find m and n such that p=x^m and q=x^n
* 3. pq=(x^m)(x^n)=x^(m+n), so add m and n and find pq
* The translations in steps 2 and 3 are looked up in the tables
* poly_to_exp (for step 2) and exp_to_poly (for step 3). To see this
* in action, look at the CALC_S macro. As additional wrinkles, note that
* one of my operands is always a constant, so the poly_to_exp lookup on it
* is done in advance; I included the original values in the comments so
* readers can have some chance of recognizing that this *is* the RS matrix
* from the Twofish paper. I've only included the table entries I actually
* need; I never do a lookup on a variable input of zero and the biggest
* exponents I'll ever see are 254 (variable) and 237 (constant), so they'll
* never sum to more than 491. I'm repeating part of the exp_to_poly table
* so that I don't have to do mod-255 reduction in the exponent arithmetic.
* Since I know my constant operands are never zero, I only have to worry
* about zero values in the variable operand, and I do it with a simple
* conditional branch. I know conditionals are expensive, but I couldn't
* see a non-horrible way of avoiding them, and I did manage to group the
* statements so that each if covers four group multiplications. */
static const u8 poly_to_exp[255] = {
0x00, 0x01, 0x17, 0x02, 0x2E, 0x18, 0x53, 0x03, 0x6A, 0x2F, 0x93, 0x19,
0x34, 0x54, 0x45, 0x04, 0x5C, 0x6B, 0xB6, 0x30, 0xA6, 0x94, 0x4B, 0x1A,
0x8C, 0x35, 0x81, 0x55, 0xAA, 0x46, 0x0D, 0x05, 0x24, 0x5D, 0x87, 0x6C,
0x9B, 0xB7, 0xC1, 0x31, 0x2B, 0xA7, 0xA3, 0x95, 0x98, 0x4C, 0xCA, 0x1B,
0xE6, 0x8D, 0x73, 0x36, 0xCD, 0x82, 0x12, 0x56, 0x62, 0xAB, 0xF0, 0x47,
0x4F, 0x0E, 0xBD, 0x06, 0xD4, 0x25, 0xD2, 0x5E, 0x27, 0x88, 0x66, 0x6D,
0xD6, 0x9C, 0x79, 0xB8, 0x08, 0xC2, 0xDF, 0x32, 0x68, 0x2C, 0xFD, 0xA8,
0x8A, 0xA4, 0x5A, 0x96, 0x29, 0x99, 0x22, 0x4D, 0x60, 0xCB, 0xE4, 0x1C,
0x7B, 0xE7, 0x3B, 0x8E, 0x9E, 0x74, 0xF4, 0x37, 0xD8, 0xCE, 0xF9, 0x83,
0x6F, 0x13, 0xB2, 0x57, 0xE1, 0x63, 0xDC, 0xAC, 0xC4, 0xF1, 0xAF, 0x48,
0x0A, 0x50, 0x42, 0x0F, 0xBA, 0xBE, 0xC7, 0x07, 0xDE, 0xD5, 0x78, 0x26,
0x65, 0xD3, 0xD1, 0x5F, 0xE3, 0x28, 0x21, 0x89, 0x59, 0x67, 0xFC, 0x6E,
0xB1, 0xD7, 0xF8, 0x9D, 0xF3, 0x7A, 0x3A, 0xB9, 0xC6, 0x09, 0x41, 0xC3,
0xAE, 0xE0, 0xDB, 0x33, 0x44, 0x69, 0x92, 0x2D, 0x52, 0xFE, 0x16, 0xA9,
0x0C, 0x8B, 0x80, 0xA5, 0x4A, 0x5B, 0xB5, 0x97, 0xC9, 0x2A, 0xA2, 0x9A,
0xC0, 0x23, 0x86, 0x4E, 0xBC, 0x61, 0xEF, 0xCC, 0x11, 0xE5, 0x72, 0x1D,
0x3D, 0x7C, 0xEB, 0xE8, 0xE9, 0x3C, 0xEA, 0x8F, 0x7D, 0x9F, 0xEC, 0x75,
0x1E, 0xF5, 0x3E, 0x38, 0xF6, 0xD9, 0x3F, 0xCF, 0x76, 0xFA, 0x1F, 0x84,
0xA0, 0x70, 0xED, 0x14, 0x90, 0xB3, 0x7E, 0x58, 0xFB, 0xE2, 0x20, 0x64,
0xD0, 0xDD, 0x77, 0xAD, 0xDA, 0xC5, 0x40, 0xF2, 0x39, 0xB0, 0xF7, 0x49,
0xB4, 0x0B, 0x7F, 0x51, 0x15, 0x43, 0x91, 0x10, 0x71, 0xBB, 0xEE, 0xBF,
0x85, 0xC8, 0xA1
};
static const u8 exp_to_poly[492] = {
0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x4D, 0x9A, 0x79, 0xF2,
0xA9, 0x1F, 0x3E, 0x7C, 0xF8, 0xBD, 0x37, 0x6E, 0xDC, 0xF5, 0xA7, 0x03,
0x06, 0x0C, 0x18, 0x30, 0x60, 0xC0, 0xCD, 0xD7, 0xE3, 0x8B, 0x5B, 0xB6,
0x21, 0x42, 0x84, 0x45, 0x8A, 0x59, 0xB2, 0x29, 0x52, 0xA4, 0x05, 0x0A,
0x14, 0x28, 0x50, 0xA0, 0x0D, 0x1A, 0x34, 0x68, 0xD0, 0xED, 0x97, 0x63,
0xC6, 0xC1, 0xCF, 0xD3, 0xEB, 0x9B, 0x7B, 0xF6, 0xA1, 0x0F, 0x1E, 0x3C,
0x78, 0xF0, 0xAD, 0x17, 0x2E, 0x5C, 0xB8, 0x3D, 0x7A, 0xF4, 0xA5, 0x07,
0x0E, 0x1C, 0x38, 0x70, 0xE0, 0x8D, 0x57, 0xAE, 0x11, 0x22, 0x44, 0x88,
0x5D, 0xBA, 0x39, 0x72, 0xE4, 0x85, 0x47, 0x8E, 0x51, 0xA2, 0x09, 0x12,
0x24, 0x48, 0x90, 0x6D, 0xDA, 0xF9, 0xBF, 0x33, 0x66, 0xCC, 0xD5, 0xE7,
0x83, 0x4B, 0x96, 0x61, 0xC2, 0xC9, 0xDF, 0xF3, 0xAB, 0x1B, 0x36, 0x6C,
0xD8, 0xFD, 0xB7, 0x23, 0x46, 0x8C, 0x55, 0xAA, 0x19, 0x32, 0x64, 0xC8,
0xDD, 0xF7, 0xA3, 0x0B, 0x16, 0x2C, 0x58, 0xB0, 0x2D, 0x5A, 0xB4, 0x25,
0x4A, 0x94, 0x65, 0xCA, 0xD9, 0xFF, 0xB3, 0x2B, 0x56, 0xAC, 0x15, 0x2A,
0x54, 0xA8, 0x1D, 0x3A, 0x74, 0xE8, 0x9D, 0x77, 0xEE, 0x91, 0x6F, 0xDE,
0xF1, 0xAF, 0x13, 0x26, 0x4C, 0x98, 0x7D, 0xFA, 0xB9, 0x3F, 0x7E, 0xFC,
0xB5, 0x27, 0x4E, 0x9C, 0x75, 0xEA, 0x99, 0x7F, 0xFE, 0xB1, 0x2F, 0x5E,
0xBC, 0x35, 0x6A, 0xD4, 0xE5, 0x87, 0x43, 0x86, 0x41, 0x82, 0x49, 0x92,
0x69, 0xD2, 0xE9, 0x9F, 0x73, 0xE6, 0x81, 0x4F, 0x9E, 0x71, 0xE2, 0x89,
0x5F, 0xBE, 0x31, 0x62, 0xC4, 0xC5, 0xC7, 0xC3, 0xCB, 0xDB, 0xFB, 0xBB,
0x3B, 0x76, 0xEC, 0x95, 0x67, 0xCE, 0xD1, 0xEF, 0x93, 0x6B, 0xD6, 0xE1,
0x8F, 0x53, 0xA6, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x4D,
0x9A, 0x79, 0xF2, 0xA9, 0x1F, 0x3E, 0x7C, 0xF8, 0xBD, 0x37, 0x6E, 0xDC,
0xF5, 0xA7, 0x03, 0x06, 0x0C, 0x18, 0x30, 0x60, 0xC0, 0xCD, 0xD7, 0xE3,
0x8B, 0x5B, 0xB6, 0x21, 0x42, 0x84, 0x45, 0x8A, 0x59, 0xB2, 0x29, 0x52,
0xA4, 0x05, 0x0A, 0x14, 0x28, 0x50, 0xA0, 0x0D, 0x1A, 0x34, 0x68, 0xD0,
0xED, 0x97, 0x63, 0xC6, 0xC1, 0xCF, 0xD3, 0xEB, 0x9B, 0x7B, 0xF6, 0xA1,
0x0F, 0x1E, 0x3C, 0x78, 0xF0, 0xAD, 0x17, 0x2E, 0x5C, 0xB8, 0x3D, 0x7A,
0xF4, 0xA5, 0x07, 0x0E, 0x1C, 0x38, 0x70, 0xE0, 0x8D, 0x57, 0xAE, 0x11,
0x22, 0x44, 0x88, 0x5D, 0xBA, 0x39, 0x72, 0xE4, 0x85, 0x47, 0x8E, 0x51,
0xA2, 0x09, 0x12, 0x24, 0x48, 0x90, 0x6D, 0xDA, 0xF9, 0xBF, 0x33, 0x66,
0xCC, 0xD5, 0xE7, 0x83, 0x4B, 0x96, 0x61, 0xC2, 0xC9, 0xDF, 0xF3, 0xAB,
0x1B, 0x36, 0x6C, 0xD8, 0xFD, 0xB7, 0x23, 0x46, 0x8C, 0x55, 0xAA, 0x19,
0x32, 0x64, 0xC8, 0xDD, 0xF7, 0xA3, 0x0B, 0x16, 0x2C, 0x58, 0xB0, 0x2D,
0x5A, 0xB4, 0x25, 0x4A, 0x94, 0x65, 0xCA, 0xD9, 0xFF, 0xB3, 0x2B, 0x56,
0xAC, 0x15, 0x2A, 0x54, 0xA8, 0x1D, 0x3A, 0x74, 0xE8, 0x9D, 0x77, 0xEE,
0x91, 0x6F, 0xDE, 0xF1, 0xAF, 0x13, 0x26, 0x4C, 0x98, 0x7D, 0xFA, 0xB9,
0x3F, 0x7E, 0xFC, 0xB5, 0x27, 0x4E, 0x9C, 0x75, 0xEA, 0x99, 0x7F, 0xFE,
0xB1, 0x2F, 0x5E, 0xBC, 0x35, 0x6A, 0xD4, 0xE5, 0x87, 0x43, 0x86, 0x41,
0x82, 0x49, 0x92, 0x69, 0xD2, 0xE9, 0x9F, 0x73, 0xE6, 0x81, 0x4F, 0x9E,
0x71, 0xE2, 0x89, 0x5F, 0xBE, 0x31, 0x62, 0xC4, 0xC5, 0xC7, 0xC3, 0xCB
};
/* The table constants are indices of
* S-box entries, preprocessed through q0 and q1. */
static const u8 calc_sb_tbl[512] = {
0xA9, 0x75, 0x67, 0xF3, 0xB3, 0xC6, 0xE8, 0xF4,
0x04, 0xDB, 0xFD, 0x7B, 0xA3, 0xFB, 0x76, 0xC8,
0x9A, 0x4A, 0x92, 0xD3, 0x80, 0xE6, 0x78, 0x6B,
0xE4, 0x45, 0xDD, 0x7D, 0xD1, 0xE8, 0x38, 0x4B,
0x0D, 0xD6, 0xC6, 0x32, 0x35, 0xD8, 0x98, 0xFD,
0x18, 0x37, 0xF7, 0x71, 0xEC, 0xF1, 0x6C, 0xE1,
0x43, 0x30, 0x75, 0x0F, 0x37, 0xF8, 0x26, 0x1B,
0xFA, 0x87, 0x13, 0xFA, 0x94, 0x06, 0x48, 0x3F,
0xF2, 0x5E, 0xD0, 0xBA, 0x8B, 0xAE, 0x30, 0x5B,
0x84, 0x8A, 0x54, 0x00, 0xDF, 0xBC, 0x23, 0x9D,
0x19, 0x6D, 0x5B, 0xC1, 0x3D, 0xB1, 0x59, 0x0E,
0xF3, 0x80, 0xAE, 0x5D, 0xA2, 0xD2, 0x82, 0xD5,
0x63, 0xA0, 0x01, 0x84, 0x83, 0x07, 0x2E, 0x14,
0xD9, 0xB5, 0x51, 0x90, 0x9B, 0x2C, 0x7C, 0xA3,
0xA6, 0xB2, 0xEB, 0x73, 0xA5, 0x4C, 0xBE, 0x54,
0x16, 0x92, 0x0C, 0x74, 0xE3, 0x36, 0x61, 0x51,
0xC0, 0x38, 0x8C, 0xB0, 0x3A, 0xBD, 0xF5, 0x5A,
0x73, 0xFC, 0x2C, 0x60, 0x25, 0x62, 0x0B, 0x96,
0xBB, 0x6C, 0x4E, 0x42, 0x89, 0xF7, 0x6B, 0x10,
0x53, 0x7C, 0x6A, 0x28, 0xB4, 0x27, 0xF1, 0x8C,
0xE1, 0x13, 0xE6, 0x95, 0xBD, 0x9C, 0x45, 0xC7,
0xE2, 0x24, 0xF4, 0x46, 0xB6, 0x3B, 0x66, 0x70,
0xCC, 0xCA, 0x95, 0xE3, 0x03, 0x85, 0x56, 0xCB,
0xD4, 0x11, 0x1C, 0xD0, 0x1E, 0x93, 0xD7, 0xB8,
0xFB, 0xA6, 0xC3, 0x83, 0x8E, 0x20, 0xB5, 0xFF,
0xE9, 0x9F, 0xCF, 0x77, 0xBF, 0xC3, 0xBA, 0xCC,
0xEA, 0x03, 0x77, 0x6F, 0x39, 0x08, 0xAF, 0xBF,
0x33, 0x40, 0xC9, 0xE7, 0x62, 0x2B, 0x71, 0xE2,
0x81, 0x79, 0x79, 0x0C, 0x09, 0xAA, 0xAD, 0x82,
0x24, 0x41, 0xCD, 0x3A, 0xF9, 0xEA, 0xD8, 0xB9,
0xE5, 0xE4, 0xC5, 0x9A, 0xB9, 0xA4, 0x4D, 0x97,
0x44, 0x7E, 0x08, 0xDA, 0x86, 0x7A, 0xE7, 0x17,
0xA1, 0x66, 0x1D, 0x94, 0xAA, 0xA1, 0xED, 0x1D,
0x06, 0x3D, 0x70, 0xF0, 0xB2, 0xDE, 0xD2, 0xB3,
0x41, 0x0B, 0x7B, 0x72, 0xA0, 0xA7, 0x11, 0x1C,
0x31, 0xEF, 0xC2, 0xD1, 0x27, 0x53, 0x90, 0x3E,
0x20, 0x8F, 0xF6, 0x33, 0x60, 0x26, 0xFF, 0x5F,
0x96, 0xEC, 0x5C, 0x76, 0xB1, 0x2A, 0xAB, 0x49,
0x9E, 0x81, 0x9C, 0x88, 0x52, 0xEE, 0x1B, 0x21,
0x5F, 0xC4, 0x93, 0x1A, 0x0A, 0xEB, 0xEF, 0xD9,
0x91, 0xC5, 0x85, 0x39, 0x49, 0x99, 0xEE, 0xCD,
0x2D, 0xAD, 0x4F, 0x31, 0x8F, 0x8B, 0x3B, 0x01,
0x47, 0x18, 0x87, 0x23, 0x6D, 0xDD, 0x46, 0x1F,
0xD6, 0x4E, 0x3E, 0x2D, 0x69, 0xF9, 0x64, 0x48,
0x2A, 0x4F, 0xCE, 0xF2, 0xCB, 0x65, 0x2F, 0x8E,
0xFC, 0x78, 0x97, 0x5C, 0x05, 0x58, 0x7A, 0x19,
0xAC, 0x8D, 0x7F, 0xE5, 0xD5, 0x98, 0x1A, 0x57,
0x4B, 0x67, 0x0E, 0x7F, 0xA7, 0x05, 0x5A, 0x64,
0x28, 0xAF, 0x14, 0x63, 0x3F, 0xB6, 0x29, 0xFE,
0x88, 0xF5, 0x3C, 0xB7, 0x4C, 0x3C, 0x02, 0xA5,
0xB8, 0xCE, 0xDA, 0xE9, 0xB0, 0x68, 0x17, 0x44,
0x55, 0xE0, 0x1F, 0x4D, 0x8A, 0x43, 0x7D, 0x69,
0x57, 0x29, 0xC7, 0x2E, 0x8D, 0xAC, 0x74, 0x15,
0xB7, 0x59, 0xC4, 0xA8, 0x9F, 0x0A, 0x72, 0x9E,
0x7E, 0x6E, 0x15, 0x47, 0x22, 0xDF, 0x12, 0x34,
0x58, 0x35, 0x07, 0x6A, 0x99, 0xCF, 0x34, 0xDC,
0x6E, 0x22, 0x50, 0xC9, 0xDE, 0xC0, 0x68, 0x9B,
0x65, 0x89, 0xBC, 0xD4, 0xDB, 0xED, 0xF8, 0xAB,
0xC8, 0x12, 0xA8, 0xA2, 0x2B, 0x0D, 0x40, 0x52,
0xDC, 0xBB, 0xFE, 0x02, 0x32, 0x2F, 0xA4, 0xA9,
0xCA, 0xD7, 0x10, 0x61, 0x21, 0x1E, 0xF0, 0xB4,
0xD3, 0x50, 0x5D, 0x04, 0x0F, 0xF6, 0x00, 0xC2,
0x6F, 0x16, 0x9D, 0x25, 0x36, 0x86, 0x42, 0x56,
0x4A, 0x55, 0x5E, 0x09, 0xC1, 0xBE, 0xE0, 0x91
};
/* Macro to perform one column of the RS matrix multiplication. The
* parameters a, b, c, and d are the four bytes of output; i is the index
* of the key bytes, and w, x, y, and z, are the column of constants from
* the RS matrix, preprocessed through the poly_to_exp table. */
#define CALC_S(a, b, c, d, i, w, x, y, z) \
if (key[i]) { \
tmp = poly_to_exp[key[i] - 1]; \
(a) ^= exp_to_poly[tmp + (w)]; \
(b) ^= exp_to_poly[tmp + (x)]; \
(c) ^= exp_to_poly[tmp + (y)]; \
(d) ^= exp_to_poly[tmp + (z)]; \
}
/* Macros to calculate the key-dependent S-boxes for a 128-bit key using
* the S vector from CALC_S. CALC_SB_2 computes a single entry in all
* four S-boxes, where i is the index of the entry to compute, and a and b
* are the index numbers preprocessed through the q0 and q1 tables
* respectively. */
#define CALC_SB_2(i, a, b) \
ctx->s[0][i] = mds[0][q0[(a) ^ sa] ^ se]; \
ctx->s[1][i] = mds[1][q0[(b) ^ sb] ^ sf]; \
ctx->s[2][i] = mds[2][q1[(a) ^ sc] ^ sg]; \
ctx->s[3][i] = mds[3][q1[(b) ^ sd] ^ sh]
/* Macro exactly like CALC_SB_2, but for 192-bit keys. */
#define CALC_SB192_2(i, a, b) \
ctx->s[0][i] = mds[0][q0[q0[(b) ^ sa] ^ se] ^ si]; \
ctx->s[1][i] = mds[1][q0[q1[(b) ^ sb] ^ sf] ^ sj]; \
ctx->s[2][i] = mds[2][q1[q0[(a) ^ sc] ^ sg] ^ sk]; \
ctx->s[3][i] = mds[3][q1[q1[(a) ^ sd] ^ sh] ^ sl];
/* Macro exactly like CALC_SB_2, but for 256-bit keys. */
#define CALC_SB256_2(i, a, b) \
ctx->s[0][i] = mds[0][q0[q0[q1[(b) ^ sa] ^ se] ^ si] ^ sm]; \
ctx->s[1][i] = mds[1][q0[q1[q1[(a) ^ sb] ^ sf] ^ sj] ^ sn]; \
ctx->s[2][i] = mds[2][q1[q0[q0[(a) ^ sc] ^ sg] ^ sk] ^ so]; \
ctx->s[3][i] = mds[3][q1[q1[q0[(b) ^ sd] ^ sh] ^ sl] ^ sp];
/* Macros to calculate the whitening and round subkeys. CALC_K_2 computes the
* last two stages of the h() function for a given index (either 2i or 2i+1).
* a, b, c, and d are the four bytes going into the last two stages. For
* 128-bit keys, this is the entire h() function and a and c are the index
* preprocessed through q0 and q1 respectively; for longer keys they are the
* output of previous stages. j is the index of the first key byte to use.
* CALC_K computes a pair of subkeys for 128-bit Twofish, by calling CALC_K_2
* twice, doing the Pseudo-Hadamard Transform, and doing the necessary
* rotations. Its parameters are: a, the array to write the results into,
* j, the index of the first output entry, k and l, the preprocessed indices
* for index 2i, and m and n, the preprocessed indices for index 2i+1.
* CALC_K192_2 expands CALC_K_2 to handle 192-bit keys, by doing an
* additional lookup-and-XOR stage. The parameters a, b, c and d are the
* four bytes going into the last three stages. For 192-bit keys, c = d
* are the index preprocessed through q0, and a = b are the index
* preprocessed through q1; j is the index of the first key byte to use.
* CALC_K192 is identical to CALC_K but for using the CALC_K192_2 macro
* instead of CALC_K_2.
* CALC_K256_2 expands CALC_K192_2 to handle 256-bit keys, by doing an
* additional lookup-and-XOR stage. The parameters a and b are the index
* preprocessed through q0 and q1 respectively; j is the index of the first
* key byte to use. CALC_K256 is identical to CALC_K but for using the
* CALC_K256_2 macro instead of CALC_K_2. */
#define CALC_K_2(a, b, c, d, j) \
mds[0][q0[a ^ key[(j) + 8]] ^ key[j]] \
^ mds[1][q0[b ^ key[(j) + 9]] ^ key[(j) + 1]] \
^ mds[2][q1[c ^ key[(j) + 10]] ^ key[(j) + 2]] \
^ mds[3][q1[d ^ key[(j) + 11]] ^ key[(j) + 3]]
#define CALC_K(a, j, k, l, m, n) \
x = CALC_K_2 (k, l, k, l, 0); \
y = CALC_K_2 (m, n, m, n, 4); \
y = rol32(y, 8); \
x += y; y += x; ctx->a[j] = x; \
ctx->a[(j) + 1] = rol32(y, 9)
#define CALC_K192_2(a, b, c, d, j) \
CALC_K_2 (q0[a ^ key[(j) + 16]], \
q1[b ^ key[(j) + 17]], \
q0[c ^ key[(j) + 18]], \
q1[d ^ key[(j) + 19]], j)
#define CALC_K192(a, j, k, l, m, n) \
x = CALC_K192_2 (l, l, k, k, 0); \
y = CALC_K192_2 (n, n, m, m, 4); \
y = rol32(y, 8); \
x += y; y += x; ctx->a[j] = x; \
ctx->a[(j) + 1] = rol32(y, 9)
#define CALC_K256_2(a, b, j) \
CALC_K192_2 (q1[b ^ key[(j) + 24]], \
q1[a ^ key[(j) + 25]], \
q0[a ^ key[(j) + 26]], \
q0[b ^ key[(j) + 27]], j)
#define CALC_K256(a, j, k, l, m, n) \
x = CALC_K256_2 (k, l, 0); \
y = CALC_K256_2 (m, n, 4); \
y = rol32(y, 8); \
x += y; y += x; ctx->a[j] = x; \
ctx->a[(j) + 1] = rol32(y, 9)
/* Perform the key setup. */
int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len)
{
struct twofish_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
int i, j, k;
/* Temporaries for CALC_K. */
u32 x, y;
/* The S vector used to key the S-boxes, split up into individual bytes.
* 128-bit keys use only sa through sh; 256-bit use all of them. */
u8 sa = 0, sb = 0, sc = 0, sd = 0, se = 0, sf = 0, sg = 0, sh = 0;
u8 si = 0, sj = 0, sk = 0, sl = 0, sm = 0, sn = 0, so = 0, sp = 0;
/* Temporary for CALC_S. */
u8 tmp;
/* Check key length. */
if (key_len % 8)
{
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL; /* unsupported key length */
}
/* Compute the first two words of the S vector. The magic numbers are
* the entries of the RS matrix, preprocessed through poly_to_exp. The
* numbers in the comments are the original (polynomial form) matrix
* entries. */
CALC_S (sa, sb, sc, sd, 0, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */
CALC_S (sa, sb, sc, sd, 1, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */
CALC_S (sa, sb, sc, sd, 2, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */
CALC_S (sa, sb, sc, sd, 3, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */
CALC_S (sa, sb, sc, sd, 4, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */
CALC_S (sa, sb, sc, sd, 5, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */
CALC_S (sa, sb, sc, sd, 6, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */
CALC_S (sa, sb, sc, sd, 7, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */
CALC_S (se, sf, sg, sh, 8, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */
CALC_S (se, sf, sg, sh, 9, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */
CALC_S (se, sf, sg, sh, 10, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */
CALC_S (se, sf, sg, sh, 11, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */
CALC_S (se, sf, sg, sh, 12, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */
CALC_S (se, sf, sg, sh, 13, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */
CALC_S (se, sf, sg, sh, 14, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */
CALC_S (se, sf, sg, sh, 15, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */
if (key_len == 24 || key_len == 32) { /* 192- or 256-bit key */
/* Calculate the third word of the S vector */
CALC_S (si, sj, sk, sl, 16, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */
CALC_S (si, sj, sk, sl, 17, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */
CALC_S (si, sj, sk, sl, 18, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */
CALC_S (si, sj, sk, sl, 19, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */
CALC_S (si, sj, sk, sl, 20, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */
CALC_S (si, sj, sk, sl, 21, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */
CALC_S (si, sj, sk, sl, 22, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */
CALC_S (si, sj, sk, sl, 23, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */
}
if (key_len == 32) { /* 256-bit key */
/* Calculate the fourth word of the S vector */
CALC_S (sm, sn, so, sp, 24, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */
CALC_S (sm, sn, so, sp, 25, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */
CALC_S (sm, sn, so, sp, 26, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */
CALC_S (sm, sn, so, sp, 27, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */
CALC_S (sm, sn, so, sp, 28, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */
CALC_S (sm, sn, so, sp, 29, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */
CALC_S (sm, sn, so, sp, 30, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */
CALC_S (sm, sn, so, sp, 31, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */
/* Compute the S-boxes. */
for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
CALC_SB256_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
}
/* Calculate whitening and round subkeys. The constants are
* indices of subkeys, preprocessed through q0 and q1. */
CALC_K256 (w, 0, 0xA9, 0x75, 0x67, 0xF3);
CALC_K256 (w, 2, 0xB3, 0xC6, 0xE8, 0xF4);
CALC_K256 (w, 4, 0x04, 0xDB, 0xFD, 0x7B);
CALC_K256 (w, 6, 0xA3, 0xFB, 0x76, 0xC8);
CALC_K256 (k, 0, 0x9A, 0x4A, 0x92, 0xD3);
CALC_K256 (k, 2, 0x80, 0xE6, 0x78, 0x6B);
CALC_K256 (k, 4, 0xE4, 0x45, 0xDD, 0x7D);
CALC_K256 (k, 6, 0xD1, 0xE8, 0x38, 0x4B);
CALC_K256 (k, 8, 0x0D, 0xD6, 0xC6, 0x32);
CALC_K256 (k, 10, 0x35, 0xD8, 0x98, 0xFD);
CALC_K256 (k, 12, 0x18, 0x37, 0xF7, 0x71);
CALC_K256 (k, 14, 0xEC, 0xF1, 0x6C, 0xE1);
CALC_K256 (k, 16, 0x43, 0x30, 0x75, 0x0F);
CALC_K256 (k, 18, 0x37, 0xF8, 0x26, 0x1B);
CALC_K256 (k, 20, 0xFA, 0x87, 0x13, 0xFA);
CALC_K256 (k, 22, 0x94, 0x06, 0x48, 0x3F);
CALC_K256 (k, 24, 0xF2, 0x5E, 0xD0, 0xBA);
CALC_K256 (k, 26, 0x8B, 0xAE, 0x30, 0x5B);
CALC_K256 (k, 28, 0x84, 0x8A, 0x54, 0x00);
CALC_K256 (k, 30, 0xDF, 0xBC, 0x23, 0x9D);
} else if (key_len == 24) { /* 192-bit key */
/* Compute the S-boxes. */
for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
CALC_SB192_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
}
/* Calculate whitening and round subkeys. The constants are
* indices of subkeys, preprocessed through q0 and q1. */
CALC_K192 (w, 0, 0xA9, 0x75, 0x67, 0xF3);
CALC_K192 (w, 2, 0xB3, 0xC6, 0xE8, 0xF4);
CALC_K192 (w, 4, 0x04, 0xDB, 0xFD, 0x7B);
CALC_K192 (w, 6, 0xA3, 0xFB, 0x76, 0xC8);
CALC_K192 (k, 0, 0x9A, 0x4A, 0x92, 0xD3);
CALC_K192 (k, 2, 0x80, 0xE6, 0x78, 0x6B);
CALC_K192 (k, 4, 0xE4, 0x45, 0xDD, 0x7D);
CALC_K192 (k, 6, 0xD1, 0xE8, 0x38, 0x4B);
CALC_K192 (k, 8, 0x0D, 0xD6, 0xC6, 0x32);
CALC_K192 (k, 10, 0x35, 0xD8, 0x98, 0xFD);
CALC_K192 (k, 12, 0x18, 0x37, 0xF7, 0x71);
CALC_K192 (k, 14, 0xEC, 0xF1, 0x6C, 0xE1);
CALC_K192 (k, 16, 0x43, 0x30, 0x75, 0x0F);
CALC_K192 (k, 18, 0x37, 0xF8, 0x26, 0x1B);
CALC_K192 (k, 20, 0xFA, 0x87, 0x13, 0xFA);
CALC_K192 (k, 22, 0x94, 0x06, 0x48, 0x3F);
CALC_K192 (k, 24, 0xF2, 0x5E, 0xD0, 0xBA);
CALC_K192 (k, 26, 0x8B, 0xAE, 0x30, 0x5B);
CALC_K192 (k, 28, 0x84, 0x8A, 0x54, 0x00);
CALC_K192 (k, 30, 0xDF, 0xBC, 0x23, 0x9D);
} else { /* 128-bit key */
/* Compute the S-boxes. */
for ( i = j = 0, k = 1; i < 256; i++, j += 2, k += 2 ) {
CALC_SB_2( i, calc_sb_tbl[j], calc_sb_tbl[k] );
}
/* Calculate whitening and round subkeys. The constants are
* indices of subkeys, preprocessed through q0 and q1. */
CALC_K (w, 0, 0xA9, 0x75, 0x67, 0xF3);
CALC_K (w, 2, 0xB3, 0xC6, 0xE8, 0xF4);
CALC_K (w, 4, 0x04, 0xDB, 0xFD, 0x7B);
CALC_K (w, 6, 0xA3, 0xFB, 0x76, 0xC8);
CALC_K (k, 0, 0x9A, 0x4A, 0x92, 0xD3);
CALC_K (k, 2, 0x80, 0xE6, 0x78, 0x6B);
CALC_K (k, 4, 0xE4, 0x45, 0xDD, 0x7D);
CALC_K (k, 6, 0xD1, 0xE8, 0x38, 0x4B);
CALC_K (k, 8, 0x0D, 0xD6, 0xC6, 0x32);
CALC_K (k, 10, 0x35, 0xD8, 0x98, 0xFD);
CALC_K (k, 12, 0x18, 0x37, 0xF7, 0x71);
CALC_K (k, 14, 0xEC, 0xF1, 0x6C, 0xE1);
CALC_K (k, 16, 0x43, 0x30, 0x75, 0x0F);
CALC_K (k, 18, 0x37, 0xF8, 0x26, 0x1B);
CALC_K (k, 20, 0xFA, 0x87, 0x13, 0xFA);
CALC_K (k, 22, 0x94, 0x06, 0x48, 0x3F);
CALC_K (k, 24, 0xF2, 0x5E, 0xD0, 0xBA);
CALC_K (k, 26, 0x8B, 0xAE, 0x30, 0x5B);
CALC_K (k, 28, 0x84, 0x8A, 0x54, 0x00);
CALC_K (k, 30, 0xDF, 0xBC, 0x23, 0x9D);
}
return 0;
}
EXPORT_SYMBOL_GPL(twofish_setkey);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Twofish cipher common functions");

View File

@ -40,11 +40,13 @@ static int
cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
{
int err = -EINVAL;
int cipher_len;
int mode_len;
char cms[LO_NAME_SIZE]; /* cipher-mode string */
char *cipher;
char *mode;
char *cmsp = cms; /* c-m string pointer */
struct crypto_tfm *tfm = NULL;
struct crypto_blkcipher *tfm;
/* encryption breaks for non sector aligned offsets */
@ -53,20 +55,39 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
strncpy(cms, info->lo_crypt_name, LO_NAME_SIZE);
cms[LO_NAME_SIZE - 1] = 0;
cipher = strsep(&cmsp, "-");
mode = strsep(&cmsp, "-");
if (mode == NULL || strcmp(mode, "cbc") == 0)
tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_CBC |
CRYPTO_TFM_REQ_MAY_SLEEP);
else if (strcmp(mode, "ecb") == 0)
tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_ECB |
CRYPTO_TFM_REQ_MAY_SLEEP);
if (tfm == NULL)
cipher = cmsp;
cipher_len = strcspn(cmsp, "-");
mode = cmsp + cipher_len;
mode_len = 0;
if (*mode) {
mode++;
mode_len = strcspn(mode, "-");
}
if (!mode_len) {
mode = "cbc";
mode_len = 3;
}
if (cipher_len + mode_len + 3 > LO_NAME_SIZE)
return -EINVAL;
err = tfm->crt_u.cipher.cit_setkey(tfm, info->lo_encrypt_key,
info->lo_encrypt_key_size);
memmove(cms, mode, mode_len);
cmsp = cms + mode_len;
*cmsp++ = '(';
memcpy(cmsp, info->lo_crypt_name, cipher_len);
cmsp += cipher_len;
*cmsp++ = ')';
*cmsp = 0;
tfm = crypto_alloc_blkcipher(cms, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
err = crypto_blkcipher_setkey(tfm, info->lo_encrypt_key,
info->lo_encrypt_key_size);
if (err != 0)
goto out_free_tfm;
@ -75,99 +96,49 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
return 0;
out_free_tfm:
crypto_free_tfm(tfm);
crypto_free_blkcipher(tfm);
out:
return err;
}
typedef int (*encdec_ecb_t)(struct crypto_tfm *tfm,
typedef int (*encdec_cbc_t)(struct blkcipher_desc *desc,
struct scatterlist *sg_out,
struct scatterlist *sg_in,
unsigned int nsg);
static int
cryptoloop_transfer_ecb(struct loop_device *lo, int cmd,
struct page *raw_page, unsigned raw_off,
struct page *loop_page, unsigned loop_off,
int size, sector_t IV)
cryptoloop_transfer(struct loop_device *lo, int cmd,
struct page *raw_page, unsigned raw_off,
struct page *loop_page, unsigned loop_off,
int size, sector_t IV)
{
struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data;
struct scatterlist sg_out = { NULL, };
struct scatterlist sg_in = { NULL, };
encdec_ecb_t encdecfunc;
struct page *in_page, *out_page;
unsigned in_offs, out_offs;
if (cmd == READ) {
in_page = raw_page;
in_offs = raw_off;
out_page = loop_page;
out_offs = loop_off;
encdecfunc = tfm->crt_u.cipher.cit_decrypt;
} else {
in_page = loop_page;
in_offs = loop_off;
out_page = raw_page;
out_offs = raw_off;
encdecfunc = tfm->crt_u.cipher.cit_encrypt;
}
while (size > 0) {
const int sz = min(size, LOOP_IV_SECTOR_SIZE);
sg_in.page = in_page;
sg_in.offset = in_offs;
sg_in.length = sz;
sg_out.page = out_page;
sg_out.offset = out_offs;
sg_out.length = sz;
encdecfunc(tfm, &sg_out, &sg_in, sz);
size -= sz;
in_offs += sz;
out_offs += sz;
}
return 0;
}
typedef int (*encdec_cbc_t)(struct crypto_tfm *tfm,
struct scatterlist *sg_out,
struct scatterlist *sg_in,
unsigned int nsg, u8 *iv);
static int
cryptoloop_transfer_cbc(struct loop_device *lo, int cmd,
struct page *raw_page, unsigned raw_off,
struct page *loop_page, unsigned loop_off,
int size, sector_t IV)
{
struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data;
struct crypto_blkcipher *tfm = lo->key_data;
struct blkcipher_desc desc = {
.tfm = tfm,
.flags = CRYPTO_TFM_REQ_MAY_SLEEP,
};
struct scatterlist sg_out = { NULL, };
struct scatterlist sg_in = { NULL, };
encdec_cbc_t encdecfunc;
struct page *in_page, *out_page;
unsigned in_offs, out_offs;
int err;
if (cmd == READ) {
in_page = raw_page;
in_offs = raw_off;
out_page = loop_page;
out_offs = loop_off;
encdecfunc = tfm->crt_u.cipher.cit_decrypt_iv;
encdecfunc = crypto_blkcipher_crt(tfm)->decrypt;
} else {
in_page = loop_page;
in_offs = loop_off;
out_page = raw_page;
out_offs = raw_off;
encdecfunc = tfm->crt_u.cipher.cit_encrypt_iv;
encdecfunc = crypto_blkcipher_crt(tfm)->encrypt;
}
while (size > 0) {
@ -183,7 +154,10 @@ cryptoloop_transfer_cbc(struct loop_device *lo, int cmd,
sg_out.offset = out_offs;
sg_out.length = sz;
encdecfunc(tfm, &sg_out, &sg_in, sz, (u8 *)iv);
desc.info = iv;
err = encdecfunc(&desc, &sg_out, &sg_in, sz);
if (err)
return err;
IV++;
size -= sz;
@ -194,32 +168,6 @@ cryptoloop_transfer_cbc(struct loop_device *lo, int cmd,
return 0;
}
static int
cryptoloop_transfer(struct loop_device *lo, int cmd,
struct page *raw_page, unsigned raw_off,
struct page *loop_page, unsigned loop_off,
int size, sector_t IV)
{
struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data;
if(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB)
{
lo->transfer = cryptoloop_transfer_ecb;
return cryptoloop_transfer_ecb(lo, cmd, raw_page, raw_off,
loop_page, loop_off, size, IV);
}
if(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_CBC)
{
lo->transfer = cryptoloop_transfer_cbc;
return cryptoloop_transfer_cbc(lo, cmd, raw_page, raw_off,
loop_page, loop_off, size, IV);
}
/* This is not supposed to happen */
printk( KERN_ERR "cryptoloop: unsupported cipher mode in cryptoloop_transfer!\n");
return -EINVAL;
}
static int
cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg)
{
@ -229,9 +177,9 @@ cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg)
static int
cryptoloop_release(struct loop_device *lo)
{
struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data;
struct crypto_blkcipher *tfm = lo->key_data;
if (tfm != NULL) {
crypto_free_tfm(tfm);
crypto_free_blkcipher(tfm);
lo->key_data = NULL;
return 0;
}

View File

@ -2,22 +2,53 @@ menu "Hardware crypto devices"
config CRYPTO_DEV_PADLOCK
tristate "Support for VIA PadLock ACE"
depends on CRYPTO && X86_32
depends on X86_32
select CRYPTO_ALGAPI
default m
help
Some VIA processors come with an integrated crypto engine
(so called VIA PadLock ACE, Advanced Cryptography Engine)
that provides instructions for very fast {en,de}cryption
with some algorithms.
that provides instructions for very fast cryptographic
operations with supported algorithms.
The instructions are used only when the CPU supports them.
Otherwise software encryption is used. If you are unsure,
say Y.
Otherwise software encryption is used.
Selecting M for this option will compile a helper module
padlock.ko that should autoload all below configured
algorithms. Don't worry if your hardware does not support
some or all of them. In such case padlock.ko will
simply write a single line into the kernel log informing
about its failure but everything will keep working fine.
If you are unsure, say M. The compiled module will be
called padlock.ko
config CRYPTO_DEV_PADLOCK_AES
bool "Support for AES in VIA PadLock"
tristate "PadLock driver for AES algorithm"
depends on CRYPTO_DEV_PADLOCK
default y
select CRYPTO_BLKCIPHER
default m
help
Use VIA PadLock for AES algorithm.
Available in VIA C3 and newer CPUs.
If unsure say M. The compiled module will be
called padlock-aes.ko
config CRYPTO_DEV_PADLOCK_SHA
tristate "PadLock driver for SHA1 and SHA256 algorithms"
depends on CRYPTO_DEV_PADLOCK
select CRYPTO_SHA1
select CRYPTO_SHA256
default m
help
Use VIA PadLock for SHA1/SHA256 algorithms.
Available in VIA C7 and newer processors.
If unsure say M. The compiled module will be
called padlock-sha.ko
endmenu

View File

@ -1,7 +1,3 @@
obj-$(CONFIG_CRYPTO_DEV_PADLOCK) += padlock.o
padlock-objs-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
padlock-objs := padlock-generic.o $(padlock-objs-y)
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o

View File

@ -43,11 +43,11 @@
* ---------------------------------------------------------------------------
*/
#include <crypto/algapi.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/crypto.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <asm/byteorder.h>
@ -59,6 +59,17 @@
#define AES_EXTENDED_KEY_SIZE 64 /* in uint32_t units */
#define AES_EXTENDED_KEY_SIZE_B (AES_EXTENDED_KEY_SIZE * sizeof(uint32_t))
/* Control word. */
struct cword {
unsigned int __attribute__ ((__packed__))
rounds:4,
algo:3,
keygen:1,
interm:1,
encdec:1,
ksize:2;
} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
/* Whenever making any changes to the following
* structure *make sure* you keep E, d_data
* and cword aligned on 16 Bytes boundaries!!! */
@ -286,9 +297,9 @@ aes_hw_extkey_available(uint8_t key_len)
return 0;
}
static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
static inline struct aes_ctx *aes_ctx_common(void *ctx)
{
unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm);
unsigned long addr = (unsigned long)ctx;
unsigned long align = PADLOCK_ALIGNMENT;
if (align <= crypto_tfm_ctx_alignment())
@ -296,16 +307,27 @@ static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
return (struct aes_ctx *)ALIGN(addr, align);
}
static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
{
return aes_ctx_common(crypto_tfm_ctx(tfm));
}
static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm)
{
return aes_ctx_common(crypto_blkcipher_ctx(tfm));
}
static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len, u32 *flags)
unsigned int key_len)
{
struct aes_ctx *ctx = aes_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
u32 *flags = &tfm->crt_flags;
uint32_t i, t, u, v, w;
uint32_t P[AES_EXTENDED_KEY_SIZE];
uint32_t rounds;
if (key_len != 16 && key_len != 24 && key_len != 32) {
if (key_len % 8) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
@ -430,50 +452,10 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt, 1);
}
static unsigned int aes_encrypt_ecb(const struct cipher_desc *desc, u8 *out,
const u8 *in, unsigned int nbytes)
{
struct aes_ctx *ctx = aes_ctx(desc->tfm);
padlock_xcrypt_ecb(in, out, ctx->E, &ctx->cword.encrypt,
nbytes / AES_BLOCK_SIZE);
return nbytes & ~(AES_BLOCK_SIZE - 1);
}
static unsigned int aes_decrypt_ecb(const struct cipher_desc *desc, u8 *out,
const u8 *in, unsigned int nbytes)
{
struct aes_ctx *ctx = aes_ctx(desc->tfm);
padlock_xcrypt_ecb(in, out, ctx->D, &ctx->cword.decrypt,
nbytes / AES_BLOCK_SIZE);
return nbytes & ~(AES_BLOCK_SIZE - 1);
}
static unsigned int aes_encrypt_cbc(const struct cipher_desc *desc, u8 *out,
const u8 *in, unsigned int nbytes)
{
struct aes_ctx *ctx = aes_ctx(desc->tfm);
u8 *iv;
iv = padlock_xcrypt_cbc(in, out, ctx->E, desc->info,
&ctx->cword.encrypt, nbytes / AES_BLOCK_SIZE);
memcpy(desc->info, iv, AES_BLOCK_SIZE);
return nbytes & ~(AES_BLOCK_SIZE - 1);
}
static unsigned int aes_decrypt_cbc(const struct cipher_desc *desc, u8 *out,
const u8 *in, unsigned int nbytes)
{
struct aes_ctx *ctx = aes_ctx(desc->tfm);
padlock_xcrypt_cbc(in, out, ctx->D, desc->info, &ctx->cword.decrypt,
nbytes / AES_BLOCK_SIZE);
return nbytes & ~(AES_BLOCK_SIZE - 1);
}
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-padlock",
.cra_priority = 300,
.cra_priority = PADLOCK_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aes_ctx),
@ -487,23 +469,195 @@ static struct crypto_alg aes_alg = {
.cia_setkey = aes_set_key,
.cia_encrypt = aes_encrypt,
.cia_decrypt = aes_decrypt,
.cia_encrypt_ecb = aes_encrypt_ecb,
.cia_decrypt_ecb = aes_decrypt_ecb,
.cia_encrypt_cbc = aes_encrypt_cbc,
.cia_decrypt_cbc = aes_decrypt_cbc,
}
}
};
int __init padlock_init_aes(void)
static int ecb_aes_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
ctx->E, &ctx->cword.encrypt,
nbytes / AES_BLOCK_SIZE);
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
return err;
}
static int ecb_aes_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
ctx->D, &ctx->cword.decrypt,
nbytes / AES_BLOCK_SIZE);
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
return err;
}
static struct crypto_alg ecb_aes_alg = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-padlock",
.cra_priority = PADLOCK_COMPOSITE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aes_ctx),
.cra_alignmask = PADLOCK_ALIGNMENT - 1,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.setkey = aes_set_key,
.encrypt = ecb_aes_encrypt,
.decrypt = ecb_aes_decrypt,
}
}
};
static int cbc_aes_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
walk.dst.virt.addr, ctx->E,
walk.iv, &ctx->cword.encrypt,
nbytes / AES_BLOCK_SIZE);
memcpy(walk.iv, iv, AES_BLOCK_SIZE);
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
return err;
}
static int cbc_aes_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
ctx->D, walk.iv, &ctx->cword.decrypt,
nbytes / AES_BLOCK_SIZE);
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
return err;
}
static struct crypto_alg cbc_aes_alg = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-padlock",
.cra_priority = PADLOCK_COMPOSITE_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aes_ctx),
.cra_alignmask = PADLOCK_ALIGNMENT - 1,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = aes_set_key,
.encrypt = cbc_aes_encrypt,
.decrypt = cbc_aes_decrypt,
}
}
};
static int __init padlock_init(void)
{
int ret;
if (!cpu_has_xcrypt) {
printk(KERN_ERR PFX "VIA PadLock not detected.\n");
return -ENODEV;
}
if (!cpu_has_xcrypt_enabled) {
printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
return -ENODEV;
}
gen_tabs();
return crypto_register_alg(&aes_alg);
if ((ret = crypto_register_alg(&aes_alg)))
goto aes_err;
if ((ret = crypto_register_alg(&ecb_aes_alg)))
goto ecb_aes_err;
if ((ret = crypto_register_alg(&cbc_aes_alg)))
goto cbc_aes_err;
printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
out:
return ret;
cbc_aes_err:
crypto_unregister_alg(&ecb_aes_alg);
ecb_aes_err:
crypto_unregister_alg(&aes_alg);
aes_err:
printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
goto out;
}
void __exit padlock_fini_aes(void)
static void __exit padlock_fini(void)
{
crypto_unregister_alg(&cbc_aes_alg);
crypto_unregister_alg(&ecb_aes_alg);
crypto_unregister_alg(&aes_alg);
}
module_init(padlock_init);
module_exit(padlock_fini);
MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Ludvig");
MODULE_ALIAS("aes-padlock");

View File

@ -1,63 +0,0 @@
/*
* Cryptographic API.
*
* Support for VIA PadLock hardware crypto engine.
*
* Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/crypto.h>
#include <asm/byteorder.h>
#include "padlock.h"
static int __init
padlock_init(void)
{
int ret = -ENOSYS;
if (!cpu_has_xcrypt) {
printk(KERN_ERR PFX "VIA PadLock not detected.\n");
return -ENODEV;
}
if (!cpu_has_xcrypt_enabled) {
printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
return -ENODEV;
}
#ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES
if ((ret = padlock_init_aes())) {
printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
return ret;
}
#endif
if (ret == -ENOSYS)
printk(KERN_ERR PFX "Hmm, VIA PadLock was compiled without any algorithm.\n");
return ret;
}
static void __exit
padlock_fini(void)
{
#ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES
padlock_fini_aes();
#endif
}
module_init(padlock_init);
module_exit(padlock_fini);
MODULE_DESCRIPTION("VIA PadLock crypto engine support.");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Michal Ludvig");

View File

@ -0,0 +1,318 @@
/*
* Cryptographic API.
*
* Support for VIA PadLock hardware crypto engine.
*
* Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <crypto/algapi.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/cryptohash.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/scatterlist.h>
#include "padlock.h"
#define SHA1_DEFAULT_FALLBACK "sha1-generic"
#define SHA1_DIGEST_SIZE 20
#define SHA1_HMAC_BLOCK_SIZE 64
#define SHA256_DEFAULT_FALLBACK "sha256-generic"
#define SHA256_DIGEST_SIZE 32
#define SHA256_HMAC_BLOCK_SIZE 64
struct padlock_sha_ctx {
char *data;
size_t used;
int bypass;
void (*f_sha_padlock)(const char *in, char *out, int count);
struct hash_desc fallback;
};
static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm)
{
return crypto_tfm_ctx(tfm);
}
/* We'll need aligned address on the stack */
#define NEAREST_ALIGNED(ptr) \
((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT))
static struct crypto_alg sha1_alg, sha256_alg;
static void padlock_sha_bypass(struct crypto_tfm *tfm)
{
if (ctx(tfm)->bypass)
return;
crypto_hash_init(&ctx(tfm)->fallback);
if (ctx(tfm)->data && ctx(tfm)->used) {
struct scatterlist sg;
sg_set_buf(&sg, ctx(tfm)->data, ctx(tfm)->used);
crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length);
}
ctx(tfm)->used = 0;
ctx(tfm)->bypass = 1;
}
static void padlock_sha_init(struct crypto_tfm *tfm)
{
ctx(tfm)->used = 0;
ctx(tfm)->bypass = 0;
}
static void padlock_sha_update(struct crypto_tfm *tfm,
const uint8_t *data, unsigned int length)
{
/* Our buffer is always one page. */
if (unlikely(!ctx(tfm)->bypass &&
(ctx(tfm)->used + length > PAGE_SIZE)))
padlock_sha_bypass(tfm);
if (unlikely(ctx(tfm)->bypass)) {
struct scatterlist sg;
sg_set_buf(&sg, (uint8_t *)data, length);
crypto_hash_update(&ctx(tfm)->fallback, &sg, length);
return;
}
memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length);
ctx(tfm)->used += length;
}
static inline void padlock_output_block(uint32_t *src,
uint32_t *dst, size_t count)
{
while (count--)
*dst++ = swab32(*src++);
}
static void padlock_do_sha1(const char *in, char *out, int count)
{
/* We can't store directly to *out as it may be unaligned. */
/* BTW Don't reduce the buffer size below 128 Bytes!
* PadLock microcode needs it that big. */
char buf[128+16];
char *result = NEAREST_ALIGNED(buf);
((uint32_t *)result)[0] = 0x67452301;
((uint32_t *)result)[1] = 0xEFCDAB89;
((uint32_t *)result)[2] = 0x98BADCFE;
((uint32_t *)result)[3] = 0x10325476;
((uint32_t *)result)[4] = 0xC3D2E1F0;
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
: "+S"(in), "+D"(result)
: "c"(count), "a"(0));
padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
}
static void padlock_do_sha256(const char *in, char *out, int count)
{
/* We can't store directly to *out as it may be unaligned. */
/* BTW Don't reduce the buffer size below 128 Bytes!
* PadLock microcode needs it that big. */
char buf[128+16];
char *result = NEAREST_ALIGNED(buf);
((uint32_t *)result)[0] = 0x6A09E667;
((uint32_t *)result)[1] = 0xBB67AE85;
((uint32_t *)result)[2] = 0x3C6EF372;
((uint32_t *)result)[3] = 0xA54FF53A;
((uint32_t *)result)[4] = 0x510E527F;
((uint32_t *)result)[5] = 0x9B05688C;
((uint32_t *)result)[6] = 0x1F83D9AB;
((uint32_t *)result)[7] = 0x5BE0CD19;
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
: "+S"(in), "+D"(result)
: "c"(count), "a"(0));
padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
}
static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out)
{
if (unlikely(ctx(tfm)->bypass)) {
crypto_hash_final(&ctx(tfm)->fallback, out);
ctx(tfm)->bypass = 0;
return;
}
/* Pass the input buffer to PadLock microcode... */
ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used);
ctx(tfm)->used = 0;
}
static int padlock_cra_init(struct crypto_tfm *tfm)
{
const char *fallback_driver_name = tfm->__crt_alg->cra_name;
struct crypto_hash *fallback_tfm;
/* For now we'll allocate one page. This
* could eventually be configurable one day. */
ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL);
if (!ctx(tfm)->data)
return -ENOMEM;
/* Allocate a fallback and abort if it failed. */
fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0,
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback_tfm)) {
printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
fallback_driver_name);
free_page((unsigned long)(ctx(tfm)->data));
return PTR_ERR(fallback_tfm);
}
ctx(tfm)->fallback.tfm = fallback_tfm;
return 0;
}
static int padlock_sha1_cra_init(struct crypto_tfm *tfm)
{
ctx(tfm)->f_sha_padlock = padlock_do_sha1;
return padlock_cra_init(tfm);
}
static int padlock_sha256_cra_init(struct crypto_tfm *tfm)
{
ctx(tfm)->f_sha_padlock = padlock_do_sha256;
return padlock_cra_init(tfm);
}
static void padlock_cra_exit(struct crypto_tfm *tfm)
{
if (ctx(tfm)->data) {
free_page((unsigned long)(ctx(tfm)->data));
ctx(tfm)->data = NULL;
}
crypto_free_hash(ctx(tfm)->fallback.tfm);
ctx(tfm)->fallback.tfm = NULL;
}
static struct crypto_alg sha1_alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-padlock",
.cra_priority = PADLOCK_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_DIGEST |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(sha1_alg.cra_list),
.cra_init = padlock_sha1_cra_init,
.cra_exit = padlock_cra_exit,
.cra_u = {
.digest = {
.dia_digestsize = SHA1_DIGEST_SIZE,
.dia_init = padlock_sha_init,
.dia_update = padlock_sha_update,
.dia_final = padlock_sha_final,
}
}
};
static struct crypto_alg sha256_alg = {
.cra_name = "sha256",
.cra_driver_name = "sha256-padlock",
.cra_priority = PADLOCK_CRA_PRIORITY,
.cra_flags = CRYPTO_ALG_TYPE_DIGEST |
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct padlock_sha_ctx),
.cra_module = THIS_MODULE,
.cra_list = LIST_HEAD_INIT(sha256_alg.cra_list),
.cra_init = padlock_sha256_cra_init,
.cra_exit = padlock_cra_exit,
.cra_u = {
.digest = {
.dia_digestsize = SHA256_DIGEST_SIZE,
.dia_init = padlock_sha_init,
.dia_update = padlock_sha_update,
.dia_final = padlock_sha_final,
}
}
};
static void __init padlock_sha_check_fallbacks(void)
{
if (!crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK))
printk(KERN_WARNING PFX
"Couldn't load fallback module for sha1.\n");
if (!crypto_has_hash("sha256", 0, CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK))
printk(KERN_WARNING PFX
"Couldn't load fallback module for sha256.\n");
}
static int __init padlock_init(void)
{
int rc = -ENODEV;
if (!cpu_has_phe) {
printk(KERN_ERR PFX "VIA PadLock Hash Engine not detected.\n");
return -ENODEV;
}
if (!cpu_has_phe_enabled) {
printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
return -ENODEV;
}
padlock_sha_check_fallbacks();
rc = crypto_register_alg(&sha1_alg);
if (rc)
goto out;
rc = crypto_register_alg(&sha256_alg);
if (rc)
goto out_unreg1;
printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");
return 0;
out_unreg1:
crypto_unregister_alg(&sha1_alg);
out:
printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
return rc;
}
static void __exit padlock_fini(void)
{
crypto_unregister_alg(&sha1_alg);
crypto_unregister_alg(&sha256_alg);
}
module_init(padlock_init);
module_exit(padlock_fini);
MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Ludvig");
MODULE_ALIAS("sha1-padlock");
MODULE_ALIAS("sha256-padlock");

58
drivers/crypto/padlock.c Normal file
View File

@ -0,0 +1,58 @@
/*
* Cryptographic API.
*
* Support for VIA PadLock hardware crypto engine.
*
* Copyright (c) 2006 Michal Ludvig <michal@logix.cz>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/crypto.h>
#include <linux/cryptohash.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/scatterlist.h>
#include "padlock.h"
static int __init padlock_init(void)
{
int success = 0;
if (crypto_has_cipher("aes-padlock", 0, 0))
success++;
if (crypto_has_hash("sha1-padlock", 0, 0))
success++;
if (crypto_has_hash("sha256-padlock", 0, 0))
success++;
if (!success) {
printk(KERN_WARNING PFX "No VIA PadLock drivers have been loaded.\n");
return -ENODEV;
}
printk(KERN_NOTICE PFX "%d drivers are available.\n", success);
return 0;
}
static void __exit padlock_fini(void)
{
}
module_init(padlock_init);
module_exit(padlock_fini);
MODULE_DESCRIPTION("Load all configured PadLock algorithms.");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Ludvig");

View File

@ -15,22 +15,9 @@
#define PADLOCK_ALIGNMENT 16
/* Control word. */
struct cword {
unsigned int __attribute__ ((__packed__))
rounds:4,
algo:3,
keygen:1,
interm:1,
encdec:1,
ksize:2;
} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
#define PFX "padlock: "
#ifdef CONFIG_CRYPTO_DEV_PADLOCK_AES
int padlock_init_aes(void);
void padlock_fini_aes(void);
#endif
#define PADLOCK_CRA_PRIORITY 300
#define PADLOCK_COMPOSITE_PRIORITY 400
#endif /* _CRYPTO_PADLOCK_H */

View File

@ -5,6 +5,7 @@
* This file is released under the GPL.
*/
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
@ -78,11 +79,13 @@ struct crypt_config {
*/
struct crypt_iv_operations *iv_gen_ops;
char *iv_mode;
void *iv_gen_private;
struct crypto_cipher *iv_gen_private;
sector_t iv_offset;
unsigned int iv_size;
struct crypto_tfm *tfm;
char cipher[CRYPTO_MAX_ALG_NAME];
char chainmode[CRYPTO_MAX_ALG_NAME];
struct crypto_blkcipher *tfm;
unsigned int key_size;
u8 key[0];
};
@ -96,12 +99,12 @@ static kmem_cache_t *_crypt_io_pool;
/*
* Different IV generation algorithms:
*
* plain: the initial vector is the 32-bit low-endian version of the sector
* plain: the initial vector is the 32-bit little-endian version of the sector
* number, padded with zeros if neccessary.
*
* ess_iv: "encrypted sector|salt initial vector", the sector number is
* encrypted with the bulk cipher using a salt as key. The salt
* should be derived from the bulk cipher's key via hashing.
* essiv: "encrypted sector|salt initial vector", the sector number is
* encrypted with the bulk cipher using a salt as key. The salt
* should be derived from the bulk cipher's key via hashing.
*
* plumb: unimplemented, see:
* http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
@ -118,11 +121,13 @@ static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
const char *opts)
{
struct crypto_tfm *essiv_tfm;
struct crypto_tfm *hash_tfm;
struct crypto_cipher *essiv_tfm;
struct crypto_hash *hash_tfm;
struct hash_desc desc;
struct scatterlist sg;
unsigned int saltsize;
u8 *salt;
int err;
if (opts == NULL) {
ti->error = "Digest algorithm missing for ESSIV mode";
@ -130,76 +135,70 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
}
/* Hash the cipher key with the given hash algorithm */
hash_tfm = crypto_alloc_tfm(opts, CRYPTO_TFM_REQ_MAY_SLEEP);
if (hash_tfm == NULL) {
hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(hash_tfm)) {
ti->error = "Error initializing ESSIV hash";
return -EINVAL;
return PTR_ERR(hash_tfm);
}
if (crypto_tfm_alg_type(hash_tfm) != CRYPTO_ALG_TYPE_DIGEST) {
ti->error = "Expected digest algorithm for ESSIV hash";
crypto_free_tfm(hash_tfm);
return -EINVAL;
}
saltsize = crypto_tfm_alg_digestsize(hash_tfm);
saltsize = crypto_hash_digestsize(hash_tfm);
salt = kmalloc(saltsize, GFP_KERNEL);
if (salt == NULL) {
ti->error = "Error kmallocing salt storage in ESSIV";
crypto_free_tfm(hash_tfm);
crypto_free_hash(hash_tfm);
return -ENOMEM;
}
sg_set_buf(&sg, cc->key, cc->key_size);
crypto_digest_digest(hash_tfm, &sg, 1, salt);
crypto_free_tfm(hash_tfm);
desc.tfm = hash_tfm;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
crypto_free_hash(hash_tfm);
if (err) {
ti->error = "Error calculating hash in ESSIV";
return err;
}
/* Setup the essiv_tfm with the given salt */
essiv_tfm = crypto_alloc_tfm(crypto_tfm_alg_name(cc->tfm),
CRYPTO_TFM_MODE_ECB |
CRYPTO_TFM_REQ_MAY_SLEEP);
if (essiv_tfm == NULL) {
essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(essiv_tfm)) {
ti->error = "Error allocating crypto tfm for ESSIV";
kfree(salt);
return -EINVAL;
return PTR_ERR(essiv_tfm);
}
if (crypto_tfm_alg_blocksize(essiv_tfm)
!= crypto_tfm_alg_ivsize(cc->tfm)) {
if (crypto_cipher_blocksize(essiv_tfm) !=
crypto_blkcipher_ivsize(cc->tfm)) {
ti->error = "Block size of ESSIV cipher does "
"not match IV size of block cipher";
crypto_free_tfm(essiv_tfm);
crypto_free_cipher(essiv_tfm);
kfree(salt);
return -EINVAL;
}
if (crypto_cipher_setkey(essiv_tfm, salt, saltsize) < 0) {
err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
if (err) {
ti->error = "Failed to set key for ESSIV cipher";
crypto_free_tfm(essiv_tfm);
crypto_free_cipher(essiv_tfm);
kfree(salt);
return -EINVAL;
return err;
}
kfree(salt);
cc->iv_gen_private = (void *)essiv_tfm;
cc->iv_gen_private = essiv_tfm;
return 0;
}
static void crypt_iv_essiv_dtr(struct crypt_config *cc)
{
crypto_free_tfm((struct crypto_tfm *)cc->iv_gen_private);
crypto_free_cipher(cc->iv_gen_private);
cc->iv_gen_private = NULL;
}
static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
struct scatterlist sg;
memset(iv, 0, cc->iv_size);
*(u64 *)iv = cpu_to_le64(sector);
sg_set_buf(&sg, iv, cc->iv_size);
crypto_cipher_encrypt((struct crypto_tfm *)cc->iv_gen_private,
&sg, &sg, cc->iv_size);
crypto_cipher_encrypt_one(cc->iv_gen_private, iv, iv);
return 0;
}
@ -220,6 +219,11 @@ crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
int write, sector_t sector)
{
u8 iv[cc->iv_size];
struct blkcipher_desc desc = {
.tfm = cc->tfm,
.info = iv,
.flags = CRYPTO_TFM_REQ_MAY_SLEEP,
};
int r;
if (cc->iv_gen_ops) {
@ -228,14 +232,14 @@ crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
return r;
if (write)
r = crypto_cipher_encrypt_iv(cc->tfm, out, in, length, iv);
r = crypto_blkcipher_encrypt_iv(&desc, out, in, length);
else
r = crypto_cipher_decrypt_iv(cc->tfm, out, in, length, iv);
r = crypto_blkcipher_decrypt_iv(&desc, out, in, length);
} else {
if (write)
r = crypto_cipher_encrypt(cc->tfm, out, in, length);
r = crypto_blkcipher_encrypt(&desc, out, in, length);
else
r = crypto_cipher_decrypt(cc->tfm, out, in, length);
r = crypto_blkcipher_decrypt(&desc, out, in, length);
}
return r;
@ -510,13 +514,12 @@ static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct crypt_config *cc;
struct crypto_tfm *tfm;
struct crypto_blkcipher *tfm;
char *tmp;
char *cipher;
char *chainmode;
char *ivmode;
char *ivopts;
unsigned int crypto_flags;
unsigned int key_size;
unsigned long long tmpll;
@ -556,31 +559,25 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ivmode = "plain";
}
/* Choose crypto_flags according to chainmode */
if (strcmp(chainmode, "cbc") == 0)
crypto_flags = CRYPTO_TFM_MODE_CBC;
else if (strcmp(chainmode, "ecb") == 0)
crypto_flags = CRYPTO_TFM_MODE_ECB;
else {
ti->error = "Unknown chaining mode";
goto bad1;
}
if (crypto_flags != CRYPTO_TFM_MODE_ECB && !ivmode) {
if (strcmp(chainmode, "ecb") && !ivmode) {
ti->error = "This chaining mode requires an IV mechanism";
goto bad1;
}
tfm = crypto_alloc_tfm(cipher, crypto_flags | CRYPTO_TFM_REQ_MAY_SLEEP);
if (!tfm) {
if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", chainmode,
cipher) >= CRYPTO_MAX_ALG_NAME) {
ti->error = "Chain mode + cipher name is too long";
goto bad1;
}
tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) {
ti->error = "Error allocating crypto tfm";
goto bad1;
}
if (crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER) {
ti->error = "Expected cipher algorithm";
goto bad2;
}
strcpy(cc->cipher, cipher);
strcpy(cc->chainmode, chainmode);
cc->tfm = tfm;
/*
@ -603,12 +600,12 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
goto bad2;
if (tfm->crt_cipher.cit_decrypt_iv && tfm->crt_cipher.cit_encrypt_iv)
cc->iv_size = crypto_blkcipher_ivsize(tfm);
if (cc->iv_size)
/* at least a 64 bit sector number should fit in our buffer */
cc->iv_size = max(crypto_tfm_alg_ivsize(tfm),
cc->iv_size = max(cc->iv_size,
(unsigned int)(sizeof(u64) / sizeof(u8)));
else {
cc->iv_size = 0;
if (cc->iv_gen_ops) {
DMWARN("Selected cipher does not support IVs");
if (cc->iv_gen_ops->dtr)
@ -629,7 +626,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad4;
}
if (tfm->crt_cipher.cit_setkey(tfm, cc->key, key_size) < 0) {
if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) {
ti->error = "Error setting key";
goto bad5;
}
@ -675,7 +672,7 @@ bad3:
if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
cc->iv_gen_ops->dtr(cc);
bad2:
crypto_free_tfm(tfm);
crypto_free_blkcipher(tfm);
bad1:
/* Must zero key material before freeing */
memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
@ -693,7 +690,7 @@ static void crypt_dtr(struct dm_target *ti)
kfree(cc->iv_mode);
if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
cc->iv_gen_ops->dtr(cc);
crypto_free_tfm(cc->tfm);
crypto_free_blkcipher(cc->tfm);
dm_put_device(ti, cc->dev);
/* Must zero key material before freeing */
@ -858,18 +855,9 @@ static int crypt_status(struct dm_target *ti, status_type_t type,
break;
case STATUSTYPE_TABLE:
cipher = crypto_tfm_alg_name(cc->tfm);
cipher = crypto_blkcipher_name(cc->tfm);
switch(cc->tfm->crt_cipher.cit_mode) {
case CRYPTO_TFM_MODE_CBC:
chainmode = "cbc";
break;
case CRYPTO_TFM_MODE_ECB:
chainmode = "ecb";
break;
default:
BUG();
}
chainmode = cc->chainmode;
if (cc->iv_mode)
DMEMIT("%s-%s-%s ", cipher, chainmode, cc->iv_mode);

View File

@ -43,6 +43,7 @@
* deprecated in 2.6
*/
#include <linux/err.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/version.h>
@ -64,12 +65,13 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE));
MODULE_VERSION("1.0.2");
static void
static unsigned int
setup_sg(struct scatterlist *sg, const void *address, unsigned int length)
{
sg[0].page = virt_to_page(address);
sg[0].offset = offset_in_page(address);
sg[0].length = length;
return length;
}
#define SHA1_PAD_SIZE 40
@ -95,8 +97,8 @@ static inline void sha_pad_init(struct sha_pad *shapad)
* State for an MPPE (de)compressor.
*/
struct ppp_mppe_state {
struct crypto_tfm *arc4;
struct crypto_tfm *sha1;
struct crypto_blkcipher *arc4;
struct crypto_hash *sha1;
unsigned char *sha1_digest;
unsigned char master_key[MPPE_MAX_KEY_LEN];
unsigned char session_key[MPPE_MAX_KEY_LEN];
@ -136,14 +138,21 @@ struct ppp_mppe_state {
*/
static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *InterimKey)
{
struct hash_desc desc;
struct scatterlist sg[4];
unsigned int nbytes;
setup_sg(&sg[0], state->master_key, state->keylen);
setup_sg(&sg[1], sha_pad->sha_pad1, sizeof(sha_pad->sha_pad1));
setup_sg(&sg[2], state->session_key, state->keylen);
setup_sg(&sg[3], sha_pad->sha_pad2, sizeof(sha_pad->sha_pad2));
nbytes = setup_sg(&sg[0], state->master_key, state->keylen);
nbytes += setup_sg(&sg[1], sha_pad->sha_pad1,
sizeof(sha_pad->sha_pad1));
nbytes += setup_sg(&sg[2], state->session_key, state->keylen);
nbytes += setup_sg(&sg[3], sha_pad->sha_pad2,
sizeof(sha_pad->sha_pad2));
crypto_digest_digest (state->sha1, sg, 4, state->sha1_digest);
desc.tfm = state->sha1;
desc.flags = 0;
crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest);
memcpy(InterimKey, state->sha1_digest, state->keylen);
}
@ -156,14 +165,15 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
{
unsigned char InterimKey[MPPE_MAX_KEY_LEN];
struct scatterlist sg_in[1], sg_out[1];
struct blkcipher_desc desc = { .tfm = state->arc4 };
get_new_key_from_sha(state, InterimKey);
if (!initial_key) {
crypto_cipher_setkey(state->arc4, InterimKey, state->keylen);
crypto_blkcipher_setkey(state->arc4, InterimKey, state->keylen);
setup_sg(sg_in, InterimKey, state->keylen);
setup_sg(sg_out, state->session_key, state->keylen);
if (crypto_cipher_encrypt(state->arc4, sg_out, sg_in,
state->keylen) != 0) {
if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
state->keylen) != 0) {
printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n");
}
} else {
@ -175,7 +185,7 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
state->session_key[1] = 0x26;
state->session_key[2] = 0x9e;
}
crypto_cipher_setkey(state->arc4, state->session_key, state->keylen);
crypto_blkcipher_setkey(state->arc4, state->session_key, state->keylen);
}
/*
@ -196,15 +206,19 @@ static void *mppe_alloc(unsigned char *options, int optlen)
memset(state, 0, sizeof(*state));
state->arc4 = crypto_alloc_tfm("arc4", 0);
if (!state->arc4)
state->arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(state->arc4)) {
state->arc4 = NULL;
goto out_free;
}
state->sha1 = crypto_alloc_tfm("sha1", 0);
if (!state->sha1)
state->sha1 = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(state->sha1)) {
state->sha1 = NULL;
goto out_free;
}
digestsize = crypto_tfm_alg_digestsize(state->sha1);
digestsize = crypto_hash_digestsize(state->sha1);
if (digestsize < MPPE_MAX_KEY_LEN)
goto out_free;
@ -229,9 +243,9 @@ static void *mppe_alloc(unsigned char *options, int optlen)
if (state->sha1_digest)
kfree(state->sha1_digest);
if (state->sha1)
crypto_free_tfm(state->sha1);
crypto_free_hash(state->sha1);
if (state->arc4)
crypto_free_tfm(state->arc4);
crypto_free_blkcipher(state->arc4);
kfree(state);
out:
return NULL;
@ -247,9 +261,9 @@ static void mppe_free(void *arg)
if (state->sha1_digest)
kfree(state->sha1_digest);
if (state->sha1)
crypto_free_tfm(state->sha1);
crypto_free_hash(state->sha1);
if (state->arc4)
crypto_free_tfm(state->arc4);
crypto_free_blkcipher(state->arc4);
kfree(state);
}
}
@ -356,6 +370,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
int isize, int osize)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
struct blkcipher_desc desc = { .tfm = state->arc4 };
int proto;
struct scatterlist sg_in[1], sg_out[1];
@ -413,7 +428,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
/* Encrypt packet */
setup_sg(sg_in, ibuf, isize);
setup_sg(sg_out, obuf, osize);
if (crypto_cipher_encrypt(state->arc4, sg_out, sg_in, isize) != 0) {
if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, isize) != 0) {
printk(KERN_DEBUG "crypto_cypher_encrypt failed\n");
return -1;
}
@ -462,6 +477,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
int osize)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
struct blkcipher_desc desc = { .tfm = state->arc4 };
unsigned ccount;
int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
int sanity = 0;
@ -599,7 +615,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
*/
setup_sg(sg_in, ibuf, 1);
setup_sg(sg_out, obuf, 1);
if (crypto_cipher_decrypt(state->arc4, sg_out, sg_in, 1) != 0) {
if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, 1) != 0) {
printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
return DECOMP_ERROR;
}
@ -619,7 +635,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
/* And finally, decrypt the rest of the packet. */
setup_sg(sg_in, ibuf + 1, isize - 1);
setup_sg(sg_out, obuf + 1, osize - 1);
if (crypto_cipher_decrypt(state->arc4, sg_out, sg_in, isize - 1) != 0) {
if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, isize - 1)) {
printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
return DECOMP_ERROR;
}
@ -694,8 +710,8 @@ static struct compressor ppp_mppe = {
static int __init ppp_mppe_init(void)
{
int answer;
if (!(crypto_alg_available("arc4", 0) &&
crypto_alg_available("sha1", 0)))
if (!(crypto_has_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) &&
crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC)))
return -ENODEV;
sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL);

View File

@ -19,6 +19,7 @@
======================================================================*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@ -1203,7 +1204,7 @@ struct airo_info {
struct iw_spy_data spy_data;
struct iw_public_data wireless_data;
/* MIC stuff */
struct crypto_tfm *tfm;
struct crypto_cipher *tfm;
mic_module mod[2];
mic_statistics micstats;
HostRxDesc rxfids[MPI_MAX_FIDS]; // rx/tx/config MPI350 descriptors
@ -1271,7 +1272,8 @@ static int flashrestart(struct airo_info *ai,struct net_device *dev);
static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq);
static void MoveWindow(miccntx *context, u32 micSeq);
static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_tfm *);
static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen,
struct crypto_cipher *tfm);
static void emmh32_init(emmh32_context *context);
static void emmh32_update(emmh32_context *context, u8 *pOctets, int len);
static void emmh32_final(emmh32_context *context, u8 digest[4]);
@ -1339,10 +1341,11 @@ static int micsetup(struct airo_info *ai) {
int i;
if (ai->tfm == NULL)
ai->tfm = crypto_alloc_tfm("aes", CRYPTO_TFM_REQ_MAY_SLEEP);
ai->tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
if (ai->tfm == NULL) {
if (IS_ERR(ai->tfm)) {
airo_print_err(ai->dev->name, "failed to load transform for AES");
ai->tfm = NULL;
return ERROR;
}
@ -1608,7 +1611,8 @@ static void MoveWindow(miccntx *context, u32 micSeq)
static unsigned char aes_counter[16];
/* expand the key to fill the MMH coefficient array */
static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_tfm *tfm)
static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen,
struct crypto_cipher *tfm)
{
/* take the keying material, expand if necessary, truncate at 16-bytes */
/* run through AES counter mode to generate context->coeff[] */
@ -1616,7 +1620,6 @@ static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct
int i,j;
u32 counter;
u8 *cipher, plain[16];
struct scatterlist sg[1];
crypto_cipher_setkey(tfm, pkey, 16);
counter = 0;
@ -1627,9 +1630,8 @@ static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct
aes_counter[12] = (u8)(counter >> 24);
counter++;
memcpy (plain, aes_counter, 16);
sg_set_buf(sg, plain, 16);
crypto_cipher_encrypt(tfm, sg, sg, 16);
cipher = kmap(sg->page) + sg->offset;
crypto_cipher_encrypt_one(tfm, plain, plain);
cipher = plain;
for (j=0; (j<16) && (i< (sizeof(context->coeff)/sizeof(context->coeff[0]))); ) {
context->coeff[i++] = ntohl(*(u32 *)&cipher[j]);
j += 4;
@ -2432,7 +2434,7 @@ void stop_airo_card( struct net_device *dev, int freeres )
ai->shared, ai->shared_dma);
}
}
crypto_free_tfm(ai->tfm);
crypto_free_cipher(ai->tfm);
del_airo_dev( dev );
free_netdev( dev );
}

View File

@ -26,6 +26,7 @@
* Zhenyu Wang
*/
#include <linux/err.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/inet.h>
@ -107,8 +108,11 @@ iscsi_hdr_digest(struct iscsi_conn *conn, struct iscsi_buf *buf,
u8* crc)
{
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct hash_desc desc;
crypto_digest_digest(tcp_conn->tx_tfm, &buf->sg, 1, crc);
desc.tfm = tcp_conn->tx_tfm;
desc.flags = 0;
crypto_hash_digest(&desc, &buf->sg, buf->sg.length, crc);
buf->sg.length += sizeof(uint32_t);
}
@ -452,11 +456,14 @@ iscsi_tcp_hdr_recv(struct iscsi_conn *conn)
}
if (conn->hdrdgst_en) {
struct hash_desc desc;
struct scatterlist sg;
sg_init_one(&sg, (u8 *)hdr,
sizeof(struct iscsi_hdr) + ahslen);
crypto_digest_digest(tcp_conn->rx_tfm, &sg, 1, (u8 *)&cdgst);
desc.tfm = tcp_conn->rx_tfm;
desc.flags = 0;
crypto_hash_digest(&desc, &sg, sg.length, (u8 *)&cdgst);
rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) +
ahslen);
if (cdgst != rdgst) {
@ -673,7 +680,7 @@ partial_sg_digest_update(struct iscsi_tcp_conn *tcp_conn,
memcpy(&temp, sg, sizeof(struct scatterlist));
temp.offset = offset;
temp.length = length;
crypto_digest_update(tcp_conn->data_rx_tfm, &temp, 1);
crypto_hash_update(&tcp_conn->data_rx_hash, &temp, length);
}
static void
@ -682,7 +689,7 @@ iscsi_recv_digest_update(struct iscsi_tcp_conn *tcp_conn, char* buf, int len)
struct scatterlist tmp;
sg_init_one(&tmp, buf, len);
crypto_digest_update(tcp_conn->data_rx_tfm, &tmp, 1);
crypto_hash_update(&tcp_conn->data_rx_hash, &tmp, len);
}
static int iscsi_scsi_data_in(struct iscsi_conn *conn)
@ -736,9 +743,9 @@ static int iscsi_scsi_data_in(struct iscsi_conn *conn)
if (!rc) {
if (conn->datadgst_en) {
if (!offset)
crypto_digest_update(
tcp_conn->data_rx_tfm,
&sg[i], 1);
crypto_hash_update(
&tcp_conn->data_rx_hash,
&sg[i], sg[i].length);
else
partial_sg_digest_update(tcp_conn,
&sg[i],
@ -877,8 +884,7 @@ more:
rc = iscsi_tcp_hdr_recv(conn);
if (!rc && tcp_conn->in.datalen) {
if (conn->datadgst_en) {
BUG_ON(!tcp_conn->data_rx_tfm);
crypto_digest_init(tcp_conn->data_rx_tfm);
crypto_hash_init(&tcp_conn->data_rx_hash);
}
tcp_conn->in_progress = IN_PROGRESS_DATA_RECV;
} else if (rc) {
@ -931,11 +937,11 @@ more:
tcp_conn->in.padding);
memset(pad, 0, tcp_conn->in.padding);
sg_init_one(&sg, pad, tcp_conn->in.padding);
crypto_digest_update(tcp_conn->data_rx_tfm,
&sg, 1);
crypto_hash_update(&tcp_conn->data_rx_hash,
&sg, sg.length);
}
crypto_digest_final(tcp_conn->data_rx_tfm,
(u8 *) & tcp_conn->in.datadgst);
crypto_hash_final(&tcp_conn->data_rx_hash,
(u8 *)&tcp_conn->in.datadgst);
debug_tcp("rx digest 0x%x\n", tcp_conn->in.datadgst);
tcp_conn->in_progress = IN_PROGRESS_DDIGEST_RECV;
} else
@ -1181,8 +1187,7 @@ iscsi_data_digest_init(struct iscsi_tcp_conn *tcp_conn,
{
struct iscsi_tcp_cmd_task *tcp_ctask = ctask->dd_data;
BUG_ON(!tcp_conn->data_tx_tfm);
crypto_digest_init(tcp_conn->data_tx_tfm);
crypto_hash_init(&tcp_conn->data_tx_hash);
tcp_ctask->digest_count = 4;
}
@ -1196,7 +1201,7 @@ iscsi_digest_final_send(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
int sent = 0;
if (final)
crypto_digest_final(tcp_conn->data_tx_tfm, (u8*)digest);
crypto_hash_final(&tcp_conn->data_tx_hash, (u8 *)digest);
iscsi_buf_init_iov(buf, (char*)digest, 4);
rc = iscsi_sendpage(conn, buf, &tcp_ctask->digest_count, &sent);
@ -1491,16 +1496,17 @@ handle_xmstate_imm_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
if (rc) {
tcp_ctask->xmstate |= XMSTATE_IMM_DATA;
if (conn->datadgst_en) {
crypto_digest_final(tcp_conn->data_tx_tfm,
(u8*)&tcp_ctask->immdigest);
crypto_hash_final(&tcp_conn->data_tx_hash,
(u8 *)&tcp_ctask->immdigest);
debug_tcp("tx imm sendpage fail 0x%x\n",
tcp_ctask->datadigest);
}
return rc;
}
if (conn->datadgst_en)
crypto_digest_update(tcp_conn->data_tx_tfm,
&tcp_ctask->sendbuf.sg, 1);
crypto_hash_update(&tcp_conn->data_tx_hash,
&tcp_ctask->sendbuf.sg,
tcp_ctask->sendbuf.sg.length);
if (!ctask->imm_count)
break;
@ -1577,8 +1583,8 @@ handle_xmstate_uns_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
tcp_ctask->xmstate |= XMSTATE_UNS_DATA;
/* will continue with this ctask later.. */
if (conn->datadgst_en) {
crypto_digest_final(tcp_conn->data_tx_tfm,
(u8 *)&dtask->digest);
crypto_hash_final(&tcp_conn->data_tx_hash,
(u8 *)&dtask->digest);
debug_tcp("tx uns data fail 0x%x\n",
dtask->digest);
}
@ -1593,8 +1599,9 @@ handle_xmstate_uns_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
* so pass it
*/
if (conn->datadgst_en && tcp_ctask->sent - start > 0)
crypto_digest_update(tcp_conn->data_tx_tfm,
&tcp_ctask->sendbuf.sg, 1);
crypto_hash_update(&tcp_conn->data_tx_hash,
&tcp_ctask->sendbuf.sg,
tcp_ctask->sendbuf.sg.length);
if (!ctask->data_count)
break;
@ -1668,7 +1675,7 @@ solicit_again:
tcp_ctask->xmstate |= XMSTATE_SOL_DATA;
/* will continue with this ctask later.. */
if (conn->datadgst_en) {
crypto_digest_final(tcp_conn->data_tx_tfm,
crypto_hash_final(&tcp_conn->data_tx_hash,
(u8 *)&dtask->digest);
debug_tcp("r2t data send fail 0x%x\n", dtask->digest);
}
@ -1677,8 +1684,8 @@ solicit_again:
BUG_ON(r2t->data_count < 0);
if (conn->datadgst_en)
crypto_digest_update(tcp_conn->data_tx_tfm, &r2t->sendbuf.sg,
1);
crypto_hash_update(&tcp_conn->data_tx_hash, &r2t->sendbuf.sg,
r2t->sendbuf.sg.length);
if (r2t->data_count) {
BUG_ON(ctask->sc->use_sg == 0);
@ -1766,8 +1773,9 @@ handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
}
if (conn->datadgst_en) {
crypto_digest_update(tcp_conn->data_tx_tfm,
&tcp_ctask->sendbuf.sg, 1);
crypto_hash_update(&tcp_conn->data_tx_hash,
&tcp_ctask->sendbuf.sg,
tcp_ctask->sendbuf.sg.length);
/* imm data? */
if (!dtask) {
rc = iscsi_digest_final_send(conn, ctask,
@ -1963,13 +1971,13 @@ iscsi_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
/* now free tcp_conn */
if (digest) {
if (tcp_conn->tx_tfm)
crypto_free_tfm(tcp_conn->tx_tfm);
crypto_free_hash(tcp_conn->tx_tfm);
if (tcp_conn->rx_tfm)
crypto_free_tfm(tcp_conn->rx_tfm);
if (tcp_conn->data_tx_tfm)
crypto_free_tfm(tcp_conn->data_tx_tfm);
if (tcp_conn->data_rx_tfm)
crypto_free_tfm(tcp_conn->data_rx_tfm);
crypto_free_hash(tcp_conn->rx_tfm);
if (tcp_conn->data_tx_hash.tfm)
crypto_free_hash(tcp_conn->data_tx_hash.tfm);
if (tcp_conn->data_rx_hash.tfm)
crypto_free_hash(tcp_conn->data_rx_hash.tfm);
}
kfree(tcp_conn);
@ -2130,44 +2138,48 @@ iscsi_conn_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param,
if (conn->hdrdgst_en) {
tcp_conn->hdr_size += sizeof(__u32);
if (!tcp_conn->tx_tfm)
tcp_conn->tx_tfm = crypto_alloc_tfm("crc32c",
0);
if (!tcp_conn->tx_tfm)
return -ENOMEM;
tcp_conn->tx_tfm =
crypto_alloc_hash("crc32c", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(tcp_conn->tx_tfm))
return PTR_ERR(tcp_conn->tx_tfm);
if (!tcp_conn->rx_tfm)
tcp_conn->rx_tfm = crypto_alloc_tfm("crc32c",
0);
if (!tcp_conn->rx_tfm) {
crypto_free_tfm(tcp_conn->tx_tfm);
return -ENOMEM;
tcp_conn->rx_tfm =
crypto_alloc_hash("crc32c", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(tcp_conn->rx_tfm)) {
crypto_free_hash(tcp_conn->tx_tfm);
return PTR_ERR(tcp_conn->rx_tfm);
}
} else {
if (tcp_conn->tx_tfm)
crypto_free_tfm(tcp_conn->tx_tfm);
crypto_free_hash(tcp_conn->tx_tfm);
if (tcp_conn->rx_tfm)
crypto_free_tfm(tcp_conn->rx_tfm);
crypto_free_hash(tcp_conn->rx_tfm);
}
break;
case ISCSI_PARAM_DATADGST_EN:
iscsi_set_param(cls_conn, param, buf, buflen);
if (conn->datadgst_en) {
if (!tcp_conn->data_tx_tfm)
tcp_conn->data_tx_tfm =
crypto_alloc_tfm("crc32c", 0);
if (!tcp_conn->data_tx_tfm)
return -ENOMEM;
if (!tcp_conn->data_rx_tfm)
tcp_conn->data_rx_tfm =
crypto_alloc_tfm("crc32c", 0);
if (!tcp_conn->data_rx_tfm) {
crypto_free_tfm(tcp_conn->data_tx_tfm);
return -ENOMEM;
if (!tcp_conn->data_tx_hash.tfm)
tcp_conn->data_tx_hash.tfm =
crypto_alloc_hash("crc32c", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(tcp_conn->data_tx_hash.tfm))
return PTR_ERR(tcp_conn->data_tx_hash.tfm);
if (!tcp_conn->data_rx_hash.tfm)
tcp_conn->data_rx_hash.tfm =
crypto_alloc_hash("crc32c", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(tcp_conn->data_rx_hash.tfm)) {
crypto_free_hash(tcp_conn->data_tx_hash.tfm);
return PTR_ERR(tcp_conn->data_rx_hash.tfm);
}
} else {
if (tcp_conn->data_tx_tfm)
crypto_free_tfm(tcp_conn->data_tx_tfm);
if (tcp_conn->data_rx_tfm)
crypto_free_tfm(tcp_conn->data_rx_tfm);
if (tcp_conn->data_tx_hash.tfm)
crypto_free_hash(tcp_conn->data_tx_hash.tfm);
if (tcp_conn->data_rx_hash.tfm)
crypto_free_hash(tcp_conn->data_rx_hash.tfm);
}
tcp_conn->sendpage = conn->datadgst_en ?
sock_no_sendpage : tcp_conn->sock->ops->sendpage;

View File

@ -51,6 +51,7 @@
#define ISCSI_SG_TABLESIZE SG_ALL
#define ISCSI_TCP_MAX_CMD_LEN 16
struct crypto_hash;
struct socket;
/* Socket connection recieve helper */
@ -84,8 +85,8 @@ struct iscsi_tcp_conn {
/* iSCSI connection-wide sequencing */
int hdr_size; /* PDU header size */
struct crypto_tfm *rx_tfm; /* CRC32C (Rx) */
struct crypto_tfm *data_rx_tfm; /* CRC32C (Rx) for data */
struct crypto_hash *rx_tfm; /* CRC32C (Rx) */
struct hash_desc data_rx_hash; /* CRC32C (Rx) for data */
/* control data */
struct iscsi_tcp_recv in; /* TCP receive context */
@ -97,8 +98,8 @@ struct iscsi_tcp_conn {
void (*old_write_space)(struct sock *);
/* xmit */
struct crypto_tfm *tx_tfm; /* CRC32C (Tx) */
struct crypto_tfm *data_tx_tfm; /* CRC32C (Tx) for data */
struct crypto_hash *tx_tfm; /* CRC32C (Tx) */
struct hash_desc data_tx_hash; /* CRC32C (Tx) for data */
/* MIB custom statistics */
uint32_t sendpage_failures_cnt;

View File

@ -33,7 +33,7 @@
*
*/
#include <linux/err.h>
#include <linux/sunrpc/svc.h>
#include <linux/nfsd/nfsd.h>
#include <linux/nfs4.h>
@ -87,34 +87,35 @@ int
nfs4_make_rec_clidname(char *dname, struct xdr_netobj *clname)
{
struct xdr_netobj cksum;
struct crypto_tfm *tfm;
struct hash_desc desc;
struct scatterlist sg[1];
int status = nfserr_resource;
dprintk("NFSD: nfs4_make_rec_clidname for %.*s\n",
clname->len, clname->data);
tfm = crypto_alloc_tfm("md5", CRYPTO_TFM_REQ_MAY_SLEEP);
if (tfm == NULL)
goto out;
cksum.len = crypto_tfm_alg_digestsize(tfm);
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
desc.tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(desc.tfm))
goto out_no_tfm;
cksum.len = crypto_hash_digestsize(desc.tfm);
cksum.data = kmalloc(cksum.len, GFP_KERNEL);
if (cksum.data == NULL)
goto out;
crypto_digest_init(tfm);
sg[0].page = virt_to_page(clname->data);
sg[0].offset = offset_in_page(clname->data);
sg[0].length = clname->len;
crypto_digest_update(tfm, sg, 1);
crypto_digest_final(tfm, cksum.data);
if (crypto_hash_digest(&desc, sg, sg->length, cksum.data))
goto out;
md5_to_hex(dname, cksum.data);
kfree(cksum.data);
status = nfs_ok;
out:
crypto_free_tfm(tfm);
crypto_free_hash(desc.tfm);
out_no_tfm:
return status;
}

156
include/crypto/algapi.h Normal file
View File

@ -0,0 +1,156 @@
/*
* Cryptographic API for algorithms (i.e., low-level API).
*
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#ifndef _CRYPTO_ALGAPI_H
#define _CRYPTO_ALGAPI_H
#include <linux/crypto.h>
struct module;
struct seq_file;
struct crypto_type {
unsigned int (*ctxsize)(struct crypto_alg *alg);
int (*init)(struct crypto_tfm *tfm);
void (*exit)(struct crypto_tfm *tfm);
void (*show)(struct seq_file *m, struct crypto_alg *alg);
};
struct crypto_instance {
struct crypto_alg alg;
struct crypto_template *tmpl;
struct hlist_node list;
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
struct crypto_template {
struct list_head list;
struct hlist_head instances;
struct module *module;
struct crypto_instance *(*alloc)(void *param, unsigned int len);
void (*free)(struct crypto_instance *inst);
char name[CRYPTO_MAX_ALG_NAME];
};
struct crypto_spawn {
struct list_head list;
struct crypto_alg *alg;
struct crypto_instance *inst;
};
struct scatter_walk {
struct scatterlist *sg;
unsigned int offset;
};
struct blkcipher_walk {
union {
struct {
struct page *page;
unsigned long offset;
} phys;
struct {
u8 *page;
u8 *addr;
} virt;
} src, dst;
struct scatter_walk in;
unsigned int nbytes;
struct scatter_walk out;
unsigned int total;
void *page;
u8 *buffer;
u8 *iv;
int flags;
};
extern const struct crypto_type crypto_blkcipher_type;
extern const struct crypto_type crypto_hash_type;
void crypto_mod_put(struct crypto_alg *alg);
int crypto_register_template(struct crypto_template *tmpl);
void crypto_unregister_template(struct crypto_template *tmpl);
struct crypto_template *crypto_lookup_template(const char *name);
int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
struct crypto_instance *inst);
void crypto_drop_spawn(struct crypto_spawn *spawn);
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn);
struct crypto_alg *crypto_get_attr_alg(void *param, unsigned int len,
u32 type, u32 mask);
struct crypto_instance *crypto_alloc_instance(const char *name,
struct crypto_alg *alg);
int blkcipher_walk_done(struct blkcipher_desc *desc,
struct blkcipher_walk *walk, int err);
int blkcipher_walk_virt(struct blkcipher_desc *desc,
struct blkcipher_walk *walk);
int blkcipher_walk_phys(struct blkcipher_desc *desc,
struct blkcipher_walk *walk);
static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
{
unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm);
unsigned long align = crypto_tfm_alg_alignmask(tfm);
if (align <= crypto_tfm_ctx_alignment())
align = 1;
return (void *)ALIGN(addr, align);
}
static inline void *crypto_instance_ctx(struct crypto_instance *inst)
{
return inst->__ctx;
}
static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm)
{
return crypto_tfm_ctx(&tfm->base);
}
static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm)
{
return crypto_tfm_ctx_aligned(&tfm->base);
}
static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
{
return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
}
static inline void *crypto_hash_ctx_aligned(struct crypto_hash *tfm)
{
return crypto_tfm_ctx_aligned(&tfm->base);
}
static inline void blkcipher_walk_init(struct blkcipher_walk *walk,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
walk->in.sg = src;
walk->out.sg = dst;
walk->total = nbytes;
}
#endif /* _CRYPTO_ALGAPI_H */

22
include/crypto/twofish.h Normal file
View File

@ -0,0 +1,22 @@
#ifndef _CRYPTO_TWOFISH_H
#define _CRYPTO_TWOFISH_H
#include <linux/types.h>
#define TF_MIN_KEY_SIZE 16
#define TF_MAX_KEY_SIZE 32
#define TF_BLOCK_SIZE 16
struct crypto_tfm;
/* Structure for an expanded Twofish key. s contains the key-dependent
* S-boxes composed with the MDS matrix; w contains the eight "whitening"
* subkeys, K[0] through K[7]. k holds the remaining, "round" subkeys. Note
* that k[i] corresponds to what the Twofish paper calls K[i+8]. */
struct twofish_ctx {
u32 s[4][256], w[8], k[32];
};
int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len);
#endif

View File

@ -17,20 +17,36 @@
#ifndef _LINUX_CRYPTO_H
#define _LINUX_CRYPTO_H
#include <asm/atomic.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <asm/page.h>
#include <linux/uaccess.h>
/*
* Algorithm masks and types.
*/
#define CRYPTO_ALG_TYPE_MASK 0x000000ff
#define CRYPTO_ALG_TYPE_MASK 0x0000000f
#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
#define CRYPTO_ALG_TYPE_DIGEST 0x00000002
#define CRYPTO_ALG_TYPE_COMPRESS 0x00000004
#define CRYPTO_ALG_TYPE_HASH 0x00000003
#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004
#define CRYPTO_ALG_TYPE_COMPRESS 0x00000005
#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
#define CRYPTO_ALG_LARVAL 0x00000010
#define CRYPTO_ALG_DEAD 0x00000020
#define CRYPTO_ALG_DYING 0x00000040
#define CRYPTO_ALG_ASYNC 0x00000080
/*
* Set this bit if and only if the algorithm requires another algorithm of
* the same type to handle corner cases.
*/
#define CRYPTO_ALG_NEED_FALLBACK 0x00000100
/*
* Transform masks and values (for crt_flags).
@ -61,8 +77,37 @@
#define CRYPTO_DIR_ENCRYPT 1
#define CRYPTO_DIR_DECRYPT 0
/*
* The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
* declaration) is used to ensure that the crypto_tfm context structure is
* aligned correctly for the given architecture so that there are no alignment
* faults for C data types. In particular, this is required on platforms such
* as arm where pointers are 32-bit aligned but there are data types such as
* u64 which require 64-bit alignment.
*/
#if defined(ARCH_KMALLOC_MINALIGN)
#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
#elif defined(ARCH_SLAB_MINALIGN)
#define CRYPTO_MINALIGN ARCH_SLAB_MINALIGN
#endif
#ifdef CRYPTO_MINALIGN
#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
#else
#define CRYPTO_MINALIGN_ATTR
#endif
struct scatterlist;
struct crypto_blkcipher;
struct crypto_hash;
struct crypto_tfm;
struct crypto_type;
struct blkcipher_desc {
struct crypto_blkcipher *tfm;
void *info;
u32 flags;
};
struct cipher_desc {
struct crypto_tfm *tfm;
@ -72,30 +117,50 @@ struct cipher_desc {
void *info;
};
struct hash_desc {
struct crypto_hash *tfm;
u32 flags;
};
/*
* Algorithms: modular crypto algorithm implementations, managed
* via crypto_register_alg() and crypto_unregister_alg().
*/
struct blkcipher_alg {
int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen);
int (*encrypt)(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes);
int (*decrypt)(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes);
unsigned int min_keysize;
unsigned int max_keysize;
unsigned int ivsize;
};
struct cipher_alg {
unsigned int cia_min_keysize;
unsigned int cia_max_keysize;
int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen, u32 *flags);
unsigned int keylen);
void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
unsigned int (*cia_encrypt_ecb)(const struct cipher_desc *desc,
u8 *dst, const u8 *src,
unsigned int nbytes);
unsigned int nbytes) __deprecated;
unsigned int (*cia_decrypt_ecb)(const struct cipher_desc *desc,
u8 *dst, const u8 *src,
unsigned int nbytes);
unsigned int nbytes) __deprecated;
unsigned int (*cia_encrypt_cbc)(const struct cipher_desc *desc,
u8 *dst, const u8 *src,
unsigned int nbytes);
unsigned int nbytes) __deprecated;
unsigned int (*cia_decrypt_cbc)(const struct cipher_desc *desc,
u8 *dst, const u8 *src,
unsigned int nbytes);
unsigned int nbytes) __deprecated;
};
struct digest_alg {
@ -105,7 +170,20 @@ struct digest_alg {
unsigned int len);
void (*dia_final)(struct crypto_tfm *tfm, u8 *out);
int (*dia_setkey)(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen, u32 *flags);
unsigned int keylen);
};
struct hash_alg {
int (*init)(struct hash_desc *desc);
int (*update)(struct hash_desc *desc, struct scatterlist *sg,
unsigned int nbytes);
int (*final)(struct hash_desc *desc, u8 *out);
int (*digest)(struct hash_desc *desc, struct scatterlist *sg,
unsigned int nbytes, u8 *out);
int (*setkey)(struct crypto_hash *tfm, const u8 *key,
unsigned int keylen);
unsigned int digestsize;
};
struct compress_alg {
@ -115,30 +193,40 @@ struct compress_alg {
unsigned int slen, u8 *dst, unsigned int *dlen);
};
#define cra_blkcipher cra_u.blkcipher
#define cra_cipher cra_u.cipher
#define cra_digest cra_u.digest
#define cra_hash cra_u.hash
#define cra_compress cra_u.compress
struct crypto_alg {
struct list_head cra_list;
struct list_head cra_users;
u32 cra_flags;
unsigned int cra_blocksize;
unsigned int cra_ctxsize;
unsigned int cra_alignmask;
int cra_priority;
atomic_t cra_refcnt;
char cra_name[CRYPTO_MAX_ALG_NAME];
char cra_driver_name[CRYPTO_MAX_ALG_NAME];
const struct crypto_type *cra_type;
union {
struct blkcipher_alg blkcipher;
struct cipher_alg cipher;
struct digest_alg digest;
struct hash_alg hash;
struct compress_alg compress;
} cra_u;
int (*cra_init)(struct crypto_tfm *tfm);
void (*cra_exit)(struct crypto_tfm *tfm);
void (*cra_destroy)(struct crypto_alg *alg);
struct module *cra_module;
};
@ -153,20 +241,39 @@ int crypto_unregister_alg(struct crypto_alg *alg);
* Algorithm query interface.
*/
#ifdef CONFIG_CRYPTO
int crypto_alg_available(const char *name, u32 flags);
int crypto_alg_available(const char *name, u32 flags)
__deprecated_for_modules;
int crypto_has_alg(const char *name, u32 type, u32 mask);
#else
static int crypto_alg_available(const char *name, u32 flags);
__deprecated_for_modules;
static inline int crypto_alg_available(const char *name, u32 flags)
{
return 0;
}
static inline int crypto_has_alg(const char *name, u32 type, u32 mask)
{
return 0;
}
#endif
/*
* Transforms: user-instantiated objects which encapsulate algorithms
* and core processing logic. Managed via crypto_alloc_tfm() and
* crypto_free_tfm(), as well as the various helpers below.
* and core processing logic. Managed via crypto_alloc_*() and
* crypto_free_*(), as well as the various helpers below.
*/
struct blkcipher_tfm {
void *iv;
int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen);
int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes);
int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes);
};
struct cipher_tfm {
void *cit_iv;
unsigned int cit_ivsize;
@ -190,20 +297,20 @@ struct cipher_tfm {
struct scatterlist *src,
unsigned int nbytes, u8 *iv);
void (*cit_xor_block)(u8 *dst, const u8 *src);
void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
};
struct digest_tfm {
void (*dit_init)(struct crypto_tfm *tfm);
void (*dit_update)(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg);
void (*dit_final)(struct crypto_tfm *tfm, u8 *out);
void (*dit_digest)(struct crypto_tfm *tfm, struct scatterlist *sg,
unsigned int nsg, u8 *out);
int (*dit_setkey)(struct crypto_tfm *tfm,
const u8 *key, unsigned int keylen);
#ifdef CONFIG_CRYPTO_HMAC
void *dit_hmac_block;
#endif
struct hash_tfm {
int (*init)(struct hash_desc *desc);
int (*update)(struct hash_desc *desc,
struct scatterlist *sg, unsigned int nsg);
int (*final)(struct hash_desc *desc, u8 *out);
int (*digest)(struct hash_desc *desc, struct scatterlist *sg,
unsigned int nsg, u8 *out);
int (*setkey)(struct crypto_hash *tfm, const u8 *key,
unsigned int keylen);
unsigned int digestsize;
};
struct compress_tfm {
@ -215,8 +322,9 @@ struct compress_tfm {
u8 *dst, unsigned int *dlen);
};
#define crt_blkcipher crt_u.blkcipher
#define crt_cipher crt_u.cipher
#define crt_digest crt_u.digest
#define crt_hash crt_u.hash
#define crt_compress crt_u.compress
struct crypto_tfm {
@ -224,30 +332,43 @@ struct crypto_tfm {
u32 crt_flags;
union {
struct blkcipher_tfm blkcipher;
struct cipher_tfm cipher;
struct digest_tfm digest;
struct hash_tfm hash;
struct compress_tfm compress;
} crt_u;
struct crypto_alg *__crt_alg;
char __crt_ctx[] __attribute__ ((__aligned__));
void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
};
#define crypto_cipher crypto_tfm
#define crypto_comp crypto_tfm
struct crypto_blkcipher {
struct crypto_tfm base;
};
struct crypto_hash {
struct crypto_tfm base;
};
enum {
CRYPTOA_UNSPEC,
CRYPTOA_ALG,
};
struct crypto_attr_alg {
char name[CRYPTO_MAX_ALG_NAME];
};
/*
* Transform user interface.
*/
/*
* crypto_alloc_tfm() will first attempt to locate an already loaded algorithm.
* If that fails and the kernel supports dynamically loadable modules, it
* will then attempt to load a module of the same name or alias. A refcount
* is grabbed on the algorithm which is then associated with the new transform.
*
* crypto_free_tfm() frees up the transform and any associated resources,
* then drops the refcount on the associated algorithm.
*/
struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, u32 tfm_flags);
struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
void crypto_free_tfm(struct crypto_tfm *tfm);
/*
@ -258,6 +379,16 @@ static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm)
return tfm->__crt_alg->cra_name;
}
static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm)
{
return tfm->__crt_alg->cra_driver_name;
}
static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm)
{
return tfm->__crt_alg->cra_priority;
}
static inline const char *crypto_tfm_alg_modname(struct crypto_tfm *tfm)
{
return module_name(tfm->__crt_alg->cra_module);
@ -268,18 +399,23 @@ static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
}
static unsigned int crypto_tfm_alg_min_keysize(struct crypto_tfm *tfm)
__deprecated;
static inline unsigned int crypto_tfm_alg_min_keysize(struct crypto_tfm *tfm)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
return tfm->__crt_alg->cra_cipher.cia_min_keysize;
}
static unsigned int crypto_tfm_alg_max_keysize(struct crypto_tfm *tfm)
__deprecated;
static inline unsigned int crypto_tfm_alg_max_keysize(struct crypto_tfm *tfm)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
return tfm->__crt_alg->cra_cipher.cia_max_keysize;
}
static unsigned int crypto_tfm_alg_ivsize(struct crypto_tfm *tfm) __deprecated;
static inline unsigned int crypto_tfm_alg_ivsize(struct crypto_tfm *tfm)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
@ -302,6 +438,21 @@ static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
return tfm->__crt_alg->cra_alignmask;
}
static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm)
{
return tfm->crt_flags;
}
static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags)
{
tfm->crt_flags |= flags;
}
static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags)
{
tfm->crt_flags &= ~flags;
}
static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
{
return tfm->__crt_ctx;
@ -316,50 +467,374 @@ static inline unsigned int crypto_tfm_ctx_alignment(void)
/*
* API wrappers.
*/
static inline void crypto_digest_init(struct crypto_tfm *tfm)
static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
struct crypto_tfm *tfm)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
tfm->crt_digest.dit_init(tfm);
return (struct crypto_blkcipher *)tfm;
}
static inline void crypto_digest_update(struct crypto_tfm *tfm,
struct scatterlist *sg,
unsigned int nsg)
static inline struct crypto_blkcipher *crypto_blkcipher_cast(
struct crypto_tfm *tfm)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
tfm->crt_digest.dit_update(tfm, sg, nsg);
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER);
return __crypto_blkcipher_cast(tfm);
}
static inline void crypto_digest_final(struct crypto_tfm *tfm, u8 *out)
static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
const char *alg_name, u32 type, u32 mask)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
tfm->crt_digest.dit_final(tfm, out);
type &= ~CRYPTO_ALG_TYPE_MASK;
type |= CRYPTO_ALG_TYPE_BLKCIPHER;
mask |= CRYPTO_ALG_TYPE_MASK;
return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask));
}
static inline void crypto_digest_digest(struct crypto_tfm *tfm,
struct scatterlist *sg,
unsigned int nsg, u8 *out)
static inline struct crypto_tfm *crypto_blkcipher_tfm(
struct crypto_blkcipher *tfm)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
tfm->crt_digest.dit_digest(tfm, sg, nsg, out);
return &tfm->base;
}
static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
{
crypto_free_tfm(crypto_blkcipher_tfm(tfm));
}
static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
{
type &= ~CRYPTO_ALG_TYPE_MASK;
type |= CRYPTO_ALG_TYPE_BLKCIPHER;
mask |= CRYPTO_ALG_TYPE_MASK;
return crypto_has_alg(alg_name, type, mask);
}
static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
{
return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
}
static inline struct blkcipher_tfm *crypto_blkcipher_crt(
struct crypto_blkcipher *tfm)
{
return &crypto_blkcipher_tfm(tfm)->crt_blkcipher;
}
static inline struct blkcipher_alg *crypto_blkcipher_alg(
struct crypto_blkcipher *tfm)
{
return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher;
}
static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
{
return crypto_blkcipher_alg(tfm)->ivsize;
}
static inline unsigned int crypto_blkcipher_blocksize(
struct crypto_blkcipher *tfm)
{
return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm));
}
static inline unsigned int crypto_blkcipher_alignmask(
struct crypto_blkcipher *tfm)
{
return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm));
}
static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm)
{
return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm));
}
static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm,
u32 flags)
{
crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags);
}
static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
u32 flags)
{
crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
}
static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
const u8 *key, unsigned int keylen)
{
return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm),
key, keylen);
}
static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
}
static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
}
static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
}
static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes)
{
return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
}
static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
const u8 *src, unsigned int len)
{
memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
}
static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
u8 *dst, unsigned int len)
{
memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
}
static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
{
return (struct crypto_cipher *)tfm;
}
static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
return __crypto_cipher_cast(tfm);
}
static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
u32 type, u32 mask)
{
type &= ~CRYPTO_ALG_TYPE_MASK;
type |= CRYPTO_ALG_TYPE_CIPHER;
mask |= CRYPTO_ALG_TYPE_MASK;
return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
}
static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
{
return tfm;
}
static inline void crypto_free_cipher(struct crypto_cipher *tfm)
{
crypto_free_tfm(crypto_cipher_tfm(tfm));
}
static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
{
type &= ~CRYPTO_ALG_TYPE_MASK;
type |= CRYPTO_ALG_TYPE_CIPHER;
mask |= CRYPTO_ALG_TYPE_MASK;
return crypto_has_alg(alg_name, type, mask);
}
static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
{
return &crypto_cipher_tfm(tfm)->crt_cipher;
}
static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
{
return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
}
static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm)
{
return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm));
}
static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm)
{
return crypto_tfm_get_flags(crypto_cipher_tfm(tfm));
}
static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm,
u32 flags)
{
crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags);
}
static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
u32 flags)
{
crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
}
static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
const u8 *key, unsigned int keylen)
{
return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm),
key, keylen);
}
static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
u8 *dst, const u8 *src)
{
crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm),
dst, src);
}
static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
u8 *dst, const u8 *src)
{
crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm),
dst, src);
}
void crypto_digest_init(struct crypto_tfm *tfm) __deprecated_for_modules;
void crypto_digest_update(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg)
__deprecated_for_modules;
void crypto_digest_final(struct crypto_tfm *tfm, u8 *out)
__deprecated_for_modules;
void crypto_digest_digest(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg, u8 *out)
__deprecated_for_modules;
static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm)
{
return (struct crypto_hash *)tfm;
}
static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm)
{
BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_HASH) &
CRYPTO_ALG_TYPE_HASH_MASK);
return __crypto_hash_cast(tfm);
}
static int crypto_digest_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen) __deprecated;
static inline int crypto_digest_setkey(struct crypto_tfm *tfm,
const u8 *key, unsigned int keylen)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
if (tfm->crt_digest.dit_setkey == NULL)
return -ENOSYS;
return tfm->crt_digest.dit_setkey(tfm, key, keylen);
return tfm->crt_hash.setkey(crypto_hash_cast(tfm), key, keylen);
}
static inline int crypto_cipher_setkey(struct crypto_tfm *tfm,
const u8 *key, unsigned int keylen)
static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name,
u32 type, u32 mask)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
return tfm->crt_cipher.cit_setkey(tfm, key, keylen);
type &= ~CRYPTO_ALG_TYPE_MASK;
type |= CRYPTO_ALG_TYPE_HASH;
mask |= CRYPTO_ALG_TYPE_HASH_MASK;
return __crypto_hash_cast(crypto_alloc_base(alg_name, type, mask));
}
static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm)
{
return &tfm->base;
}
static inline void crypto_free_hash(struct crypto_hash *tfm)
{
crypto_free_tfm(crypto_hash_tfm(tfm));
}
static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask)
{
type &= ~CRYPTO_ALG_TYPE_MASK;
type |= CRYPTO_ALG_TYPE_HASH;
mask |= CRYPTO_ALG_TYPE_HASH_MASK;
return crypto_has_alg(alg_name, type, mask);
}
static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm)
{
return &crypto_hash_tfm(tfm)->crt_hash;
}
static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm)
{
return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm));
}
static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm)
{
return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm));
}
static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm)
{
return crypto_hash_crt(tfm)->digestsize;
}
static inline u32 crypto_hash_get_flags(struct crypto_hash *tfm)
{
return crypto_tfm_get_flags(crypto_hash_tfm(tfm));
}
static inline void crypto_hash_set_flags(struct crypto_hash *tfm, u32 flags)
{
crypto_tfm_set_flags(crypto_hash_tfm(tfm), flags);
}
static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags)
{
crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags);
}
static inline int crypto_hash_init(struct hash_desc *desc)
{
return crypto_hash_crt(desc->tfm)->init(desc);
}
static inline int crypto_hash_update(struct hash_desc *desc,
struct scatterlist *sg,
unsigned int nbytes)
{
return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes);
}
static inline int crypto_hash_final(struct hash_desc *desc, u8 *out)
{
return crypto_hash_crt(desc->tfm)->final(desc, out);
}
static inline int crypto_hash_digest(struct hash_desc *desc,
struct scatterlist *sg,
unsigned int nbytes, u8 *out)
{
return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out);
}
static inline int crypto_hash_setkey(struct crypto_hash *hash,
const u8 *key, unsigned int keylen)
{
return crypto_hash_crt(hash)->setkey(hash, key, keylen);
}
static int crypto_cipher_encrypt(struct crypto_tfm *tfm,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes) __deprecated;
static inline int crypto_cipher_encrypt(struct crypto_tfm *tfm,
struct scatterlist *dst,
struct scatterlist *src,
@ -369,16 +844,23 @@ static inline int crypto_cipher_encrypt(struct crypto_tfm *tfm,
return tfm->crt_cipher.cit_encrypt(tfm, dst, src, nbytes);
}
static int crypto_cipher_encrypt_iv(struct crypto_tfm *tfm,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes, u8 *iv) __deprecated;
static inline int crypto_cipher_encrypt_iv(struct crypto_tfm *tfm,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes, u8 *iv)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
BUG_ON(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB);
return tfm->crt_cipher.cit_encrypt_iv(tfm, dst, src, nbytes, iv);
}
static int crypto_cipher_decrypt(struct crypto_tfm *tfm,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes) __deprecated;
static inline int crypto_cipher_decrypt(struct crypto_tfm *tfm,
struct scatterlist *dst,
struct scatterlist *src,
@ -388,16 +870,21 @@ static inline int crypto_cipher_decrypt(struct crypto_tfm *tfm,
return tfm->crt_cipher.cit_decrypt(tfm, dst, src, nbytes);
}
static int crypto_cipher_decrypt_iv(struct crypto_tfm *tfm,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes, u8 *iv) __deprecated;
static inline int crypto_cipher_decrypt_iv(struct crypto_tfm *tfm,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes, u8 *iv)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
BUG_ON(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB);
return tfm->crt_cipher.cit_decrypt_iv(tfm, dst, src, nbytes, iv);
}
static void crypto_cipher_set_iv(struct crypto_tfm *tfm,
const u8 *src, unsigned int len) __deprecated;
static inline void crypto_cipher_set_iv(struct crypto_tfm *tfm,
const u8 *src, unsigned int len)
{
@ -405,6 +892,8 @@ static inline void crypto_cipher_set_iv(struct crypto_tfm *tfm,
memcpy(tfm->crt_cipher.cit_iv, src, len);
}
static void crypto_cipher_get_iv(struct crypto_tfm *tfm,
u8 *dst, unsigned int len) __deprecated;
static inline void crypto_cipher_get_iv(struct crypto_tfm *tfm,
u8 *dst, unsigned int len)
{
@ -412,34 +901,70 @@ static inline void crypto_cipher_get_iv(struct crypto_tfm *tfm,
memcpy(dst, tfm->crt_cipher.cit_iv, len);
}
static inline int crypto_comp_compress(struct crypto_tfm *tfm,
static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
{
return (struct crypto_comp *)tfm;
}
static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm)
{
BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) &
CRYPTO_ALG_TYPE_MASK);
return __crypto_comp_cast(tfm);
}
static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name,
u32 type, u32 mask)
{
type &= ~CRYPTO_ALG_TYPE_MASK;
type |= CRYPTO_ALG_TYPE_COMPRESS;
mask |= CRYPTO_ALG_TYPE_MASK;
return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask));
}
static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm)
{
return tfm;
}
static inline void crypto_free_comp(struct crypto_comp *tfm)
{
crypto_free_tfm(crypto_comp_tfm(tfm));
}
static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask)
{
type &= ~CRYPTO_ALG_TYPE_MASK;
type |= CRYPTO_ALG_TYPE_COMPRESS;
mask |= CRYPTO_ALG_TYPE_MASK;
return crypto_has_alg(alg_name, type, mask);
}
static inline const char *crypto_comp_name(struct crypto_comp *tfm)
{
return crypto_tfm_alg_name(crypto_comp_tfm(tfm));
}
static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm)
{
return &crypto_comp_tfm(tfm)->crt_compress;
}
static inline int crypto_comp_compress(struct crypto_comp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_COMPRESS);
return tfm->crt_compress.cot_compress(tfm, src, slen, dst, dlen);
return crypto_comp_crt(tfm)->cot_compress(tfm, src, slen, dst, dlen);
}
static inline int crypto_comp_decompress(struct crypto_tfm *tfm,
static inline int crypto_comp_decompress(struct crypto_comp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_COMPRESS);
return tfm->crt_compress.cot_decompress(tfm, src, slen, dst, dlen);
return crypto_comp_crt(tfm)->cot_decompress(tfm, src, slen, dst, dlen);
}
/*
* HMAC support.
*/
#ifdef CONFIG_CRYPTO_HMAC
void crypto_hmac_init(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen);
void crypto_hmac_update(struct crypto_tfm *tfm,
struct scatterlist *sg, unsigned int nsg);
void crypto_hmac_final(struct crypto_tfm *tfm, u8 *key,
unsigned int *keylen, u8 *out);
void crypto_hmac(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen,
struct scatterlist *sg, unsigned int nsg, u8 *out);
#endif /* CONFIG_CRYPTO_HMAC */
#endif /* _LINUX_CRYPTO_H */

View File

@ -5,7 +5,7 @@
#include <linux/mm.h>
#include <linux/string.h>
static inline void sg_set_buf(struct scatterlist *sg, void *buf,
static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
unsigned int buflen)
{
sg->page = virt_to_page(buf);
@ -13,7 +13,7 @@ static inline void sg_set_buf(struct scatterlist *sg, void *buf,
sg->length = buflen;
}
static inline void sg_init_one(struct scatterlist *sg, void *buf,
static inline void sg_init_one(struct scatterlist *sg, const void *buf,
unsigned int buflen)
{
memset(sg, 0, sizeof(*sg));

View File

@ -46,8 +46,8 @@ struct krb5_ctx {
unsigned char seed[16];
int signalg;
int sealalg;
struct crypto_tfm *enc;
struct crypto_tfm *seq;
struct crypto_blkcipher *enc;
struct crypto_blkcipher *seq;
s32 endtime;
u32 seq_send;
struct xdr_netobj mech_used;
@ -136,26 +136,27 @@ gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset,
u32
krb5_encrypt(struct crypto_tfm * key,
krb5_encrypt(struct crypto_blkcipher *key,
void *iv, void *in, void *out, int length);
u32
krb5_decrypt(struct crypto_tfm * key,
krb5_decrypt(struct crypto_blkcipher *key,
void *iv, void *in, void *out, int length);
int
gss_encrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *outbuf, int offset,
struct page **pages);
gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *outbuf,
int offset, struct page **pages);
int
gss_decrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *inbuf, int offset);
gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *inbuf,
int offset);
s32
krb5_make_seq_num(struct crypto_tfm * key,
krb5_make_seq_num(struct crypto_blkcipher *key,
int direction,
s32 seqnum, unsigned char *cksum, unsigned char *buf);
s32
krb5_get_seq_num(struct crypto_tfm * key,
krb5_get_seq_num(struct crypto_blkcipher *key,
unsigned char *cksum,
unsigned char *buf, int *direction, s32 * seqnum);

View File

@ -19,9 +19,9 @@ struct spkm3_ctx {
unsigned int req_flags ;
struct xdr_netobj share_key;
int conf_alg;
struct crypto_tfm* derived_conf_key;
struct crypto_blkcipher *derived_conf_key;
int intg_alg;
struct crypto_tfm* derived_integ_key;
struct crypto_blkcipher *derived_integ_key;
int keyestb_alg; /* alg used to get share_key */
int owf_alg; /* one way function */
};

View File

@ -1,6 +1,7 @@
#ifndef _NET_AH_H
#define _NET_AH_H
#include <linux/crypto.h>
#include <net/xfrm.h>
/* This is the maximum truncated ICV length that we know of. */
@ -14,22 +15,29 @@ struct ah_data
int icv_full_len;
int icv_trunc_len;
void (*icv)(struct ah_data*,
struct sk_buff *skb, u8 *icv);
struct crypto_tfm *tfm;
struct crypto_hash *tfm;
};
static inline void
ah_hmac_digest(struct ah_data *ahp, struct sk_buff *skb, u8 *auth_data)
static inline int ah_mac_digest(struct ah_data *ahp, struct sk_buff *skb,
u8 *auth_data)
{
struct crypto_tfm *tfm = ahp->tfm;
struct hash_desc desc;
int err;
desc.tfm = ahp->tfm;
desc.flags = 0;
memset(auth_data, 0, ahp->icv_trunc_len);
crypto_hmac_init(tfm, ahp->key, &ahp->key_len);
skb_icv_walk(skb, tfm, 0, skb->len, crypto_hmac_update);
crypto_hmac_final(tfm, ahp->key, &ahp->key_len, ahp->work_icv);
memcpy(auth_data, ahp->work_icv, ahp->icv_trunc_len);
err = crypto_hash_init(&desc);
if (unlikely(err))
goto out;
err = skb_icv_walk(skb, &desc, 0, skb->len, crypto_hash_update);
if (unlikely(err))
goto out;
err = crypto_hash_final(&desc, ahp->work_icv);
out:
return err;
}
#endif

View File

@ -1,6 +1,7 @@
#ifndef _NET_ESP_H
#define _NET_ESP_H
#include <linux/crypto.h>
#include <net/xfrm.h>
#include <asm/scatterlist.h>
@ -21,7 +22,7 @@ struct esp_data
* >= crypto_tfm_alg_ivsize(tfm). */
int ivlen;
int padlen; /* 0..255 */
struct crypto_tfm *tfm; /* crypto handle */
struct crypto_blkcipher *tfm; /* crypto handle */
} conf;
/* Integrity. It is active when icv_full_len != 0 */
@ -34,7 +35,7 @@ struct esp_data
void (*icv)(struct esp_data*,
struct sk_buff *skb,
int offset, int len, u8 *icv);
struct crypto_tfm *tfm;
struct crypto_hash *tfm;
} auth;
};
@ -42,18 +43,22 @@ extern int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
extern int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
extern void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
static inline void
esp_hmac_digest(struct esp_data *esp, struct sk_buff *skb, int offset,
int len, u8 *auth_data)
static inline int esp_mac_digest(struct esp_data *esp, struct sk_buff *skb,
int offset, int len)
{
struct crypto_tfm *tfm = esp->auth.tfm;
char *icv = esp->auth.work_icv;
struct hash_desc desc;
int err;
memset(auth_data, 0, esp->auth.icv_trunc_len);
crypto_hmac_init(tfm, esp->auth.key, &esp->auth.key_len);
skb_icv_walk(skb, tfm, offset, len, crypto_hmac_update);
crypto_hmac_final(tfm, esp->auth.key, &esp->auth.key_len, icv);
memcpy(auth_data, icv, esp->auth.icv_trunc_len);
desc.tfm = esp->auth.tfm;
desc.flags = 0;
err = crypto_hash_init(&desc);
if (unlikely(err))
return err;
err = skb_icv_walk(skb, &desc, offset, len, crypto_hash_update);
if (unlikely(err))
return err;
return crypto_hash_final(&desc, esp->auth.work_icv);
}
#endif

View File

@ -1,11 +1,14 @@
#ifndef _NET_IPCOMP_H
#define _NET_IPCOMP_H
#include <linux/crypto.h>
#include <linux/types.h>
#define IPCOMP_SCRATCH_SIZE 65400
struct ipcomp_data {
u16 threshold;
struct crypto_tfm **tfms;
struct crypto_comp **tfms;
};
#endif

View File

@ -312,9 +312,9 @@ enum { SCTP_MAX_GABS = 16 };
*/
#if defined (CONFIG_SCTP_HMAC_MD5)
#define SCTP_COOKIE_HMAC_ALG "md5"
#define SCTP_COOKIE_HMAC_ALG "hmac(md5)"
#elif defined (CONFIG_SCTP_HMAC_SHA1)
#define SCTP_COOKIE_HMAC_ALG "sha1"
#define SCTP_COOKIE_HMAC_ALG "hmac(sha1)"
#else
#define SCTP_COOKIE_HMAC_ALG NULL
#endif

View File

@ -330,17 +330,6 @@ static inline void sctp_v6_exit(void) { return; }
#endif /* #if defined(CONFIG_IPV6) */
/* Some wrappers, in case crypto not available. */
#if defined (CONFIG_CRYPTO_HMAC)
#define sctp_crypto_alloc_tfm crypto_alloc_tfm
#define sctp_crypto_free_tfm crypto_free_tfm
#define sctp_crypto_hmac crypto_hmac
#else
#define sctp_crypto_alloc_tfm(x...) NULL
#define sctp_crypto_free_tfm(x...)
#define sctp_crypto_hmac(x...)
#endif
/* Map an association to an assoc_id. */
static inline sctp_assoc_t sctp_assoc2id(const struct sctp_association *asoc)

View File

@ -87,6 +87,7 @@ struct sctp_bind_addr;
struct sctp_ulpq;
struct sctp_ep_common;
struct sctp_ssnmap;
struct crypto_hash;
#include <net/sctp/tsnmap.h>
@ -264,7 +265,7 @@ struct sctp_sock {
struct sctp_pf *pf;
/* Access to HMAC transform. */
struct crypto_tfm *hmac;
struct crypto_hash *hmac;
/* What is our base endpointer? */
struct sctp_endpoint *ep;

View File

@ -8,7 +8,6 @@
#include <linux/list.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
#include <linux/crypto.h>
#include <linux/pfkeyv2.h>
#include <linux/in6.h>
#include <linux/mutex.h>
@ -855,6 +854,7 @@ struct xfrm_algo_comp_info {
struct xfrm_algo_desc {
char *name;
char *compat;
u8 available:1;
union {
struct xfrm_algo_auth_info auth;
@ -984,11 +984,13 @@ extern struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe);
extern struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe);
extern struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe);
struct crypto_tfm;
typedef void (icv_update_fn_t)(struct crypto_tfm *, struct scatterlist *, unsigned int);
struct hash_desc;
struct scatterlist;
typedef int (icv_update_fn_t)(struct hash_desc *, struct scatterlist *,
unsigned int);
extern void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
int offset, int len, icv_update_fn_t icv_update);
extern int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *tfm,
int offset, int len, icv_update_fn_t icv_update);
static inline int xfrm_addr_cmp(xfrm_address_t *a, xfrm_address_t *b,
int family)

View File

@ -9,6 +9,7 @@
* more details.
*/
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@ -48,7 +49,7 @@ struct ieee80211_ccmp_data {
int key_idx;
struct crypto_tfm *tfm;
struct crypto_cipher *tfm;
/* scratch buffers for virt_to_page() (crypto API) */
u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN],
@ -56,20 +57,10 @@ struct ieee80211_ccmp_data {
u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN];
};
static void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm,
const u8 pt[16], u8 ct[16])
static inline void ieee80211_ccmp_aes_encrypt(struct crypto_cipher *tfm,
const u8 pt[16], u8 ct[16])
{
struct scatterlist src, dst;
src.page = virt_to_page(pt);
src.offset = offset_in_page(pt);
src.length = AES_BLOCK_LEN;
dst.page = virt_to_page(ct);
dst.offset = offset_in_page(ct);
dst.length = AES_BLOCK_LEN;
crypto_cipher_encrypt(tfm, &dst, &src, AES_BLOCK_LEN);
crypto_cipher_encrypt_one(tfm, ct, pt);
}
static void *ieee80211_ccmp_init(int key_idx)
@ -81,10 +72,11 @@ static void *ieee80211_ccmp_init(int key_idx)
goto fail;
priv->key_idx = key_idx;
priv->tfm = crypto_alloc_tfm("aes", 0);
if (priv->tfm == NULL) {
priv->tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tfm)) {
printk(KERN_DEBUG "ieee80211_crypt_ccmp: could not allocate "
"crypto API aes\n");
priv->tfm = NULL;
goto fail;
}
@ -93,7 +85,7 @@ static void *ieee80211_ccmp_init(int key_idx)
fail:
if (priv) {
if (priv->tfm)
crypto_free_tfm(priv->tfm);
crypto_free_cipher(priv->tfm);
kfree(priv);
}
@ -104,7 +96,7 @@ static void ieee80211_ccmp_deinit(void *priv)
{
struct ieee80211_ccmp_data *_priv = priv;
if (_priv && _priv->tfm)
crypto_free_tfm(_priv->tfm);
crypto_free_cipher(_priv->tfm);
kfree(priv);
}
@ -115,7 +107,7 @@ static inline void xor_block(u8 * b, u8 * a, size_t len)
b[i] ^= a[i];
}
static void ccmp_init_blocks(struct crypto_tfm *tfm,
static void ccmp_init_blocks(struct crypto_cipher *tfm,
struct ieee80211_hdr_4addr *hdr,
u8 * pn, size_t dlen, u8 * b0, u8 * auth, u8 * s0)
{
@ -377,7 +369,7 @@ static int ieee80211_ccmp_set_key(void *key, int len, u8 * seq, void *priv)
{
struct ieee80211_ccmp_data *data = priv;
int keyidx;
struct crypto_tfm *tfm = data->tfm;
struct crypto_cipher *tfm = data->tfm;
keyidx = data->key_idx;
memset(data, 0, sizeof(*data));

View File

@ -9,6 +9,7 @@
* more details.
*/
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@ -52,8 +53,8 @@ struct ieee80211_tkip_data {
int key_idx;
struct crypto_tfm *tfm_arc4;
struct crypto_tfm *tfm_michael;
struct crypto_blkcipher *tfm_arc4;
struct crypto_hash *tfm_michael;
/* scratch buffers for virt_to_page() (crypto API) */
u8 rx_hdr[16], tx_hdr[16];
@ -85,17 +86,21 @@ static void *ieee80211_tkip_init(int key_idx)
priv->key_idx = key_idx;
priv->tfm_arc4 = crypto_alloc_tfm("arc4", 0);
if (priv->tfm_arc4 == NULL) {
priv->tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tfm_arc4)) {
printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
"crypto API arc4\n");
priv->tfm_arc4 = NULL;
goto fail;
}
priv->tfm_michael = crypto_alloc_tfm("michael_mic", 0);
if (priv->tfm_michael == NULL) {
priv->tfm_michael = crypto_alloc_hash("michael_mic", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tfm_michael)) {
printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
"crypto API michael_mic\n");
priv->tfm_michael = NULL;
goto fail;
}
@ -104,9 +109,9 @@ static void *ieee80211_tkip_init(int key_idx)
fail:
if (priv) {
if (priv->tfm_michael)
crypto_free_tfm(priv->tfm_michael);
crypto_free_hash(priv->tfm_michael);
if (priv->tfm_arc4)
crypto_free_tfm(priv->tfm_arc4);
crypto_free_blkcipher(priv->tfm_arc4);
kfree(priv);
}
@ -117,9 +122,9 @@ static void ieee80211_tkip_deinit(void *priv)
{
struct ieee80211_tkip_data *_priv = priv;
if (_priv && _priv->tfm_michael)
crypto_free_tfm(_priv->tfm_michael);
crypto_free_hash(_priv->tfm_michael);
if (_priv && _priv->tfm_arc4)
crypto_free_tfm(_priv->tfm_arc4);
crypto_free_blkcipher(_priv->tfm_arc4);
kfree(priv);
}
@ -318,6 +323,7 @@ static int ieee80211_tkip_hdr(struct sk_buff *skb, int hdr_len,
static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct ieee80211_tkip_data *tkey = priv;
struct blkcipher_desc desc = { .tfm = tkey->tfm_arc4 };
int len;
u8 rc4key[16], *pos, *icv;
u32 crc;
@ -351,18 +357,17 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[2] = crc >> 16;
icv[3] = crc >> 24;
crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16);
crypto_blkcipher_setkey(tkey->tfm_arc4, rc4key, 16);
sg.page = virt_to_page(pos);
sg.offset = offset_in_page(pos);
sg.length = len + 4;
crypto_cipher_encrypt(tkey->tfm_arc4, &sg, &sg, len + 4);
return 0;
return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
}
static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct ieee80211_tkip_data *tkey = priv;
struct blkcipher_desc desc = { .tfm = tkey->tfm_arc4 };
u8 rc4key[16];
u8 keyidx, *pos;
u32 iv32;
@ -434,11 +439,18 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
plen = skb->len - hdr_len - 12;
crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16);
crypto_blkcipher_setkey(tkey->tfm_arc4, rc4key, 16);
sg.page = virt_to_page(pos);
sg.offset = offset_in_page(pos);
sg.length = plen + 4;
crypto_cipher_decrypt(tkey->tfm_arc4, &sg, &sg, plen + 4);
if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) {
if (net_ratelimit()) {
printk(KERN_DEBUG ": TKIP: failed to decrypt "
"received packet from " MAC_FMT "\n",
MAC_ARG(hdr->addr2));
}
return -7;
}
crc = ~crc32_le(~0, pos, plen);
icv[0] = crc;
@ -475,6 +487,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
static int michael_mic(struct ieee80211_tkip_data *tkey, u8 * key, u8 * hdr,
u8 * data, size_t data_len, u8 * mic)
{
struct hash_desc desc;
struct scatterlist sg[2];
if (tkey->tfm_michael == NULL) {
@ -489,12 +502,12 @@ static int michael_mic(struct ieee80211_tkip_data *tkey, u8 * key, u8 * hdr,
sg[1].offset = offset_in_page(data);
sg[1].length = data_len;
crypto_digest_init(tkey->tfm_michael);
crypto_digest_setkey(tkey->tfm_michael, key, 8);
crypto_digest_update(tkey->tfm_michael, sg, 2);
crypto_digest_final(tkey->tfm_michael, mic);
if (crypto_hash_setkey(tkey->tfm_michael, key, 8))
return -1;
return 0;
desc.tfm = tkey->tfm_michael;
desc.flags = 0;
return crypto_hash_digest(&desc, sg, data_len + 16, mic);
}
static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr)
@ -618,8 +631,8 @@ static int ieee80211_tkip_set_key(void *key, int len, u8 * seq, void *priv)
{
struct ieee80211_tkip_data *tkey = priv;
int keyidx;
struct crypto_tfm *tfm = tkey->tfm_michael;
struct crypto_tfm *tfm2 = tkey->tfm_arc4;
struct crypto_hash *tfm = tkey->tfm_michael;
struct crypto_blkcipher *tfm2 = tkey->tfm_arc4;
keyidx = tkey->key_idx;
memset(tkey, 0, sizeof(*tkey));

View File

@ -9,6 +9,7 @@
* more details.
*/
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@ -32,7 +33,7 @@ struct prism2_wep_data {
u8 key[WEP_KEY_LEN + 1];
u8 key_len;
u8 key_idx;
struct crypto_tfm *tfm;
struct crypto_blkcipher *tfm;
};
static void *prism2_wep_init(int keyidx)
@ -44,10 +45,11 @@ static void *prism2_wep_init(int keyidx)
goto fail;
priv->key_idx = keyidx;
priv->tfm = crypto_alloc_tfm("arc4", 0);
if (priv->tfm == NULL) {
priv->tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tfm)) {
printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate "
"crypto API arc4\n");
priv->tfm = NULL;
goto fail;
}
@ -59,7 +61,7 @@ static void *prism2_wep_init(int keyidx)
fail:
if (priv) {
if (priv->tfm)
crypto_free_tfm(priv->tfm);
crypto_free_blkcipher(priv->tfm);
kfree(priv);
}
return NULL;
@ -69,7 +71,7 @@ static void prism2_wep_deinit(void *priv)
{
struct prism2_wep_data *_priv = priv;
if (_priv && _priv->tfm)
crypto_free_tfm(_priv->tfm);
crypto_free_blkcipher(_priv->tfm);
kfree(priv);
}
@ -120,6 +122,7 @@ static int prism2_wep_build_iv(struct sk_buff *skb, int hdr_len,
static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct prism2_wep_data *wep = priv;
struct blkcipher_desc desc = { .tfm = wep->tfm };
u32 crc, klen, len;
u8 *pos, *icv;
struct scatterlist sg;
@ -151,13 +154,11 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[2] = crc >> 16;
icv[3] = crc >> 24;
crypto_cipher_setkey(wep->tfm, key, klen);
crypto_blkcipher_setkey(wep->tfm, key, klen);
sg.page = virt_to_page(pos);
sg.offset = offset_in_page(pos);
sg.length = len + 4;
crypto_cipher_encrypt(wep->tfm, &sg, &sg, len + 4);
return 0;
return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
}
/* Perform WEP decryption on given buffer. Buffer includes whole WEP part of
@ -170,6 +171,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct prism2_wep_data *wep = priv;
struct blkcipher_desc desc = { .tfm = wep->tfm };
u32 crc, klen, plen;
u8 key[WEP_KEY_LEN + 3];
u8 keyidx, *pos, icv[4];
@ -194,11 +196,12 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
/* Apply RC4 to data and compute CRC32 over decrypted data */
plen = skb->len - hdr_len - 8;
crypto_cipher_setkey(wep->tfm, key, klen);
crypto_blkcipher_setkey(wep->tfm, key, klen);
sg.page = virt_to_page(pos);
sg.offset = offset_in_page(pos);
sg.length = plen + 4;
crypto_cipher_decrypt(wep->tfm, &sg, &sg, plen + 4);
if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4))
return -7;
crc = ~crc32_le(~0, pos, plen);
icv[0] = crc;

View File

@ -386,6 +386,7 @@ config INET_ESP
select CRYPTO
select CRYPTO_HMAC
select CRYPTO_MD5
select CRYPTO_CBC
select CRYPTO_SHA1
select CRYPTO_DES
---help---

View File

@ -1,3 +1,4 @@
#include <linux/err.h>
#include <linux/module.h>
#include <net/ip.h>
#include <net/xfrm.h>
@ -97,7 +98,10 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
ah->spi = x->id.spi;
ah->seq_no = htonl(++x->replay.oseq);
xfrm_aevent_doreplay(x);
ahp->icv(ahp, skb, ah->auth_data);
err = ah_mac_digest(ahp, skb, ah->auth_data);
if (err)
goto error;
memcpy(ah->auth_data, ahp->work_icv, ahp->icv_trunc_len);
top_iph->tos = iph->tos;
top_iph->ttl = iph->ttl;
@ -119,6 +123,7 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
{
int ah_hlen;
int ihl;
int err = -EINVAL;
struct iphdr *iph;
struct ip_auth_hdr *ah;
struct ah_data *ahp;
@ -166,8 +171,11 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
skb_push(skb, ihl);
ahp->icv(ahp, skb, ah->auth_data);
if (memcmp(ah->auth_data, auth_data, ahp->icv_trunc_len)) {
err = ah_mac_digest(ahp, skb, ah->auth_data);
if (err)
goto out;
err = -EINVAL;
if (memcmp(ahp->work_icv, auth_data, ahp->icv_trunc_len)) {
x->stats.integrity_failed++;
goto out;
}
@ -179,7 +187,7 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
return 0;
out:
return -EINVAL;
return err;
}
static void ah4_err(struct sk_buff *skb, u32 info)
@ -204,6 +212,7 @@ static int ah_init_state(struct xfrm_state *x)
{
struct ah_data *ahp = NULL;
struct xfrm_algo_desc *aalg_desc;
struct crypto_hash *tfm;
if (!x->aalg)
goto error;
@ -221,24 +230,27 @@ static int ah_init_state(struct xfrm_state *x)
ahp->key = x->aalg->alg_key;
ahp->key_len = (x->aalg->alg_key_len+7)/8;
ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, 0);
if (!ahp->tfm)
tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
goto error;
ahp->tfm = tfm;
if (crypto_hash_setkey(tfm, ahp->key, ahp->key_len))
goto error;
ahp->icv = ah_hmac_digest;
/*
* Lookup the algorithm description maintained by xfrm_algo,
* verify crypto transform properties, and store information
* we need for AH processing. This lookup cannot fail here
* after a successful crypto_alloc_tfm().
* after a successful crypto_alloc_hash().
*/
aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
BUG_ON(!aalg_desc);
if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
crypto_tfm_alg_digestsize(ahp->tfm)) {
crypto_hash_digestsize(tfm)) {
printk(KERN_INFO "AH: %s digestsize %u != %hu\n",
x->aalg->alg_name, crypto_tfm_alg_digestsize(ahp->tfm),
x->aalg->alg_name, crypto_hash_digestsize(tfm),
aalg_desc->uinfo.auth.icv_fullbits/8);
goto error;
}
@ -262,7 +274,7 @@ static int ah_init_state(struct xfrm_state *x)
error:
if (ahp) {
kfree(ahp->work_icv);
crypto_free_tfm(ahp->tfm);
crypto_free_hash(ahp->tfm);
kfree(ahp);
}
return -EINVAL;
@ -277,7 +289,7 @@ static void ah_destroy(struct xfrm_state *x)
kfree(ahp->work_icv);
ahp->work_icv = NULL;
crypto_free_tfm(ahp->tfm);
crypto_free_hash(ahp->tfm);
ahp->tfm = NULL;
kfree(ahp);
}

View File

@ -1,3 +1,4 @@
#include <linux/err.h>
#include <linux/module.h>
#include <net/ip.h>
#include <net/xfrm.h>
@ -16,7 +17,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
int err;
struct iphdr *top_iph;
struct ip_esp_hdr *esph;
struct crypto_tfm *tfm;
struct crypto_blkcipher *tfm;
struct blkcipher_desc desc;
struct esp_data *esp;
struct sk_buff *trailer;
int blksize;
@ -36,7 +38,9 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
esp = x->data;
alen = esp->auth.icv_trunc_len;
tfm = esp->conf.tfm;
blksize = ALIGN(crypto_tfm_alg_blocksize(tfm), 4);
desc.tfm = tfm;
desc.flags = 0;
blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
clen = ALIGN(clen + 2, blksize);
if (esp->conf.padlen)
clen = ALIGN(clen, esp->conf.padlen);
@ -92,7 +96,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
xfrm_aevent_doreplay(x);
if (esp->conf.ivlen)
crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));
crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
do {
struct scatterlist *sg = &esp->sgbuf[0];
@ -103,26 +107,27 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
goto error;
}
skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
crypto_cipher_encrypt(tfm, sg, sg, clen);
err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
if (unlikely(sg != &esp->sgbuf[0]))
kfree(sg);
} while (0);
if (unlikely(err))
goto error;
if (esp->conf.ivlen) {
memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));
crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));
memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen);
crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
}
if (esp->auth.icv_full_len) {
esp->auth.icv(esp, skb, (u8*)esph-skb->data,
sizeof(struct ip_esp_hdr) + esp->conf.ivlen+clen, trailer->tail);
pskb_put(skb, trailer, alen);
err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data,
sizeof(*esph) + esp->conf.ivlen + clen);
memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen);
}
ip_send_check(top_iph);
err = 0;
error:
return err;
}
@ -137,8 +142,10 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
struct iphdr *iph;
struct ip_esp_hdr *esph;
struct esp_data *esp = x->data;
struct crypto_blkcipher *tfm = esp->conf.tfm;
struct blkcipher_desc desc = { .tfm = tfm };
struct sk_buff *trailer;
int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4);
int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
int alen = esp->auth.icv_trunc_len;
int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen;
int nfrags;
@ -146,6 +153,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
u8 nexthdr[2];
struct scatterlist *sg;
int padlen;
int err;
if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr)))
goto out;
@ -155,15 +163,16 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
/* If integrity check is required, do this. */
if (esp->auth.icv_full_len) {
u8 sum[esp->auth.icv_full_len];
u8 sum1[alen];
esp->auth.icv(esp, skb, 0, skb->len-alen, sum);
u8 sum[alen];
if (skb_copy_bits(skb, skb->len-alen, sum1, alen))
err = esp_mac_digest(esp, skb, 0, skb->len - alen);
if (err)
goto out;
if (skb_copy_bits(skb, skb->len - alen, sum, alen))
BUG();
if (unlikely(memcmp(sum, sum1, alen))) {
if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) {
x->stats.integrity_failed++;
goto out;
}
@ -178,7 +187,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
/* Get ivec. This can be wrong, check against another impls. */
if (esp->conf.ivlen)
crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm));
crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen);
sg = &esp->sgbuf[0];
@ -188,9 +197,11 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
goto out;
}
skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen);
crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen);
err = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
if (unlikely(sg != &esp->sgbuf[0]))
kfree(sg);
if (unlikely(err))
return err;
if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
BUG();
@ -254,7 +265,7 @@ out:
static u32 esp4_get_max_size(struct xfrm_state *x, int mtu)
{
struct esp_data *esp = x->data;
u32 blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4);
u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4);
if (x->props.mode) {
mtu = ALIGN(mtu + 2, blksize);
@ -293,11 +304,11 @@ static void esp_destroy(struct xfrm_state *x)
if (!esp)
return;
crypto_free_tfm(esp->conf.tfm);
crypto_free_blkcipher(esp->conf.tfm);
esp->conf.tfm = NULL;
kfree(esp->conf.ivec);
esp->conf.ivec = NULL;
crypto_free_tfm(esp->auth.tfm);
crypto_free_hash(esp->auth.tfm);
esp->auth.tfm = NULL;
kfree(esp->auth.work_icv);
esp->auth.work_icv = NULL;
@ -307,6 +318,7 @@ static void esp_destroy(struct xfrm_state *x)
static int esp_init_state(struct xfrm_state *x)
{
struct esp_data *esp = NULL;
struct crypto_blkcipher *tfm;
/* null auth and encryption can have zero length keys */
if (x->aalg) {
@ -322,22 +334,27 @@ static int esp_init_state(struct xfrm_state *x)
if (x->aalg) {
struct xfrm_algo_desc *aalg_desc;
struct crypto_hash *hash;
esp->auth.key = x->aalg->alg_key;
esp->auth.key_len = (x->aalg->alg_key_len+7)/8;
esp->auth.tfm = crypto_alloc_tfm(x->aalg->alg_name, 0);
if (esp->auth.tfm == NULL)
hash = crypto_alloc_hash(x->aalg->alg_name, 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(hash))
goto error;
esp->auth.tfm = hash;
if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len))
goto error;
esp->auth.icv = esp_hmac_digest;
aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
BUG_ON(!aalg_desc);
if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
crypto_tfm_alg_digestsize(esp->auth.tfm)) {
crypto_hash_digestsize(hash)) {
NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
x->aalg->alg_name,
crypto_tfm_alg_digestsize(esp->auth.tfm),
crypto_hash_digestsize(hash),
aalg_desc->uinfo.auth.icv_fullbits/8);
goto error;
}
@ -351,13 +368,11 @@ static int esp_init_state(struct xfrm_state *x)
}
esp->conf.key = x->ealg->alg_key;
esp->conf.key_len = (x->ealg->alg_key_len+7)/8;
if (x->props.ealgo == SADB_EALG_NULL)
esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_ECB);
else
esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_CBC);
if (esp->conf.tfm == NULL)
tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
goto error;
esp->conf.ivlen = crypto_tfm_alg_ivsize(esp->conf.tfm);
esp->conf.tfm = tfm;
esp->conf.ivlen = crypto_blkcipher_ivsize(tfm);
esp->conf.padlen = 0;
if (esp->conf.ivlen) {
esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL);
@ -365,7 +380,7 @@ static int esp_init_state(struct xfrm_state *x)
goto error;
get_random_bytes(esp->conf.ivec, esp->conf.ivlen);
}
if (crypto_cipher_setkey(esp->conf.tfm, esp->conf.key, esp->conf.key_len))
if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len))
goto error;
x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen;
if (x->props.mode)

View File

@ -32,7 +32,7 @@
struct ipcomp_tfms {
struct list_head list;
struct crypto_tfm **tfms;
struct crypto_comp **tfms;
int users;
};
@ -46,7 +46,7 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
int err, plen, dlen;
struct ipcomp_data *ipcd = x->data;
u8 *start, *scratch;
struct crypto_tfm *tfm;
struct crypto_comp *tfm;
int cpu;
plen = skb->len;
@ -107,7 +107,7 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
struct iphdr *iph = skb->nh.iph;
struct ipcomp_data *ipcd = x->data;
u8 *start, *scratch;
struct crypto_tfm *tfm;
struct crypto_comp *tfm;
int cpu;
ihlen = iph->ihl * 4;
@ -302,7 +302,7 @@ static void **ipcomp_alloc_scratches(void)
return scratches;
}
static void ipcomp_free_tfms(struct crypto_tfm **tfms)
static void ipcomp_free_tfms(struct crypto_comp **tfms)
{
struct ipcomp_tfms *pos;
int cpu;
@ -324,28 +324,28 @@ static void ipcomp_free_tfms(struct crypto_tfm **tfms)
return;
for_each_possible_cpu(cpu) {
struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu);
crypto_free_tfm(tfm);
struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu);
crypto_free_comp(tfm);
}
free_percpu(tfms);
}
static struct crypto_tfm **ipcomp_alloc_tfms(const char *alg_name)
static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name)
{
struct ipcomp_tfms *pos;
struct crypto_tfm **tfms;
struct crypto_comp **tfms;
int cpu;
/* This can be any valid CPU ID so we don't need locking. */
cpu = raw_smp_processor_id();
list_for_each_entry(pos, &ipcomp_tfms_list, list) {
struct crypto_tfm *tfm;
struct crypto_comp *tfm;
tfms = pos->tfms;
tfm = *per_cpu_ptr(tfms, cpu);
if (!strcmp(crypto_tfm_alg_name(tfm), alg_name)) {
if (!strcmp(crypto_comp_name(tfm), alg_name)) {
pos->users++;
return tfms;
}
@ -359,12 +359,13 @@ static struct crypto_tfm **ipcomp_alloc_tfms(const char *alg_name)
INIT_LIST_HEAD(&pos->list);
list_add(&pos->list, &ipcomp_tfms_list);
pos->tfms = tfms = alloc_percpu(struct crypto_tfm *);
pos->tfms = tfms = alloc_percpu(struct crypto_comp *);
if (!tfms)
goto error;
for_each_possible_cpu(cpu) {
struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0);
struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
CRYPTO_ALG_ASYNC);
if (!tfm)
goto error;
*per_cpu_ptr(tfms, cpu) = tfm;

View File

@ -77,6 +77,7 @@ config INET6_ESP
select CRYPTO
select CRYPTO_HMAC
select CRYPTO_MD5
select CRYPTO_CBC
select CRYPTO_SHA1
select CRYPTO_DES
---help---

View File

@ -213,7 +213,10 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
ah->spi = x->id.spi;
ah->seq_no = htonl(++x->replay.oseq);
xfrm_aevent_doreplay(x);
ahp->icv(ahp, skb, ah->auth_data);
err = ah_mac_digest(ahp, skb, ah->auth_data);
if (err)
goto error_free_iph;
memcpy(ah->auth_data, ahp->work_icv, ahp->icv_trunc_len);
err = 0;
@ -251,6 +254,7 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
u16 hdr_len;
u16 ah_hlen;
int nexthdr;
int err = -EINVAL;
if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr)))
goto out;
@ -292,8 +296,11 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
memset(ah->auth_data, 0, ahp->icv_trunc_len);
skb_push(skb, hdr_len);
ahp->icv(ahp, skb, ah->auth_data);
if (memcmp(ah->auth_data, auth_data, ahp->icv_trunc_len)) {
err = ah_mac_digest(ahp, skb, ah->auth_data);
if (err)
goto free_out;
err = -EINVAL;
if (memcmp(ahp->work_icv, auth_data, ahp->icv_trunc_len)) {
LIMIT_NETDEBUG(KERN_WARNING "ipsec ah authentication error\n");
x->stats.integrity_failed++;
goto free_out;
@ -310,7 +317,7 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
free_out:
kfree(tmp_hdr);
out:
return -EINVAL;
return err;
}
static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
@ -338,6 +345,7 @@ static int ah6_init_state(struct xfrm_state *x)
{
struct ah_data *ahp = NULL;
struct xfrm_algo_desc *aalg_desc;
struct crypto_hash *tfm;
if (!x->aalg)
goto error;
@ -355,24 +363,27 @@ static int ah6_init_state(struct xfrm_state *x)
ahp->key = x->aalg->alg_key;
ahp->key_len = (x->aalg->alg_key_len+7)/8;
ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, 0);
if (!ahp->tfm)
tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
goto error;
ahp->tfm = tfm;
if (crypto_hash_setkey(tfm, ahp->key, ahp->key_len))
goto error;
ahp->icv = ah_hmac_digest;
/*
* Lookup the algorithm description maintained by xfrm_algo,
* verify crypto transform properties, and store information
* we need for AH processing. This lookup cannot fail here
* after a successful crypto_alloc_tfm().
* after a successful crypto_alloc_hash().
*/
aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
BUG_ON(!aalg_desc);
if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
crypto_tfm_alg_digestsize(ahp->tfm)) {
crypto_hash_digestsize(tfm)) {
printk(KERN_INFO "AH: %s digestsize %u != %hu\n",
x->aalg->alg_name, crypto_tfm_alg_digestsize(ahp->tfm),
x->aalg->alg_name, crypto_hash_digestsize(tfm),
aalg_desc->uinfo.auth.icv_fullbits/8);
goto error;
}
@ -396,7 +407,7 @@ static int ah6_init_state(struct xfrm_state *x)
error:
if (ahp) {
kfree(ahp->work_icv);
crypto_free_tfm(ahp->tfm);
crypto_free_hash(ahp->tfm);
kfree(ahp);
}
return -EINVAL;
@ -411,7 +422,7 @@ static void ah6_destroy(struct xfrm_state *x)
kfree(ahp->work_icv);
ahp->work_icv = NULL;
crypto_free_tfm(ahp->tfm);
crypto_free_hash(ahp->tfm);
ahp->tfm = NULL;
kfree(ahp);
}

View File

@ -24,6 +24,7 @@
* This file is derived from net/ipv4/esp.c
*/
#include <linux/err.h>
#include <linux/module.h>
#include <net/ip.h>
#include <net/xfrm.h>
@ -44,7 +45,8 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
int hdr_len;
struct ipv6hdr *top_iph;
struct ipv6_esp_hdr *esph;
struct crypto_tfm *tfm;
struct crypto_blkcipher *tfm;
struct blkcipher_desc desc;
struct esp_data *esp;
struct sk_buff *trailer;
int blksize;
@ -67,7 +69,9 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
alen = esp->auth.icv_trunc_len;
tfm = esp->conf.tfm;
blksize = ALIGN(crypto_tfm_alg_blocksize(tfm), 4);
desc.tfm = tfm;
desc.flags = 0;
blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
clen = ALIGN(clen + 2, blksize);
if (esp->conf.padlen)
clen = ALIGN(clen, esp->conf.padlen);
@ -96,7 +100,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
xfrm_aevent_doreplay(x);
if (esp->conf.ivlen)
crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));
crypto_blkcipher_set_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
do {
struct scatterlist *sg = &esp->sgbuf[0];
@ -107,24 +111,25 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
goto error;
}
skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
crypto_cipher_encrypt(tfm, sg, sg, clen);
err = crypto_blkcipher_encrypt(&desc, sg, sg, clen);
if (unlikely(sg != &esp->sgbuf[0]))
kfree(sg);
} while (0);
if (unlikely(err))
goto error;
if (esp->conf.ivlen) {
memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));
crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));
memcpy(esph->enc_data, esp->conf.ivec, esp->conf.ivlen);
crypto_blkcipher_get_iv(tfm, esp->conf.ivec, esp->conf.ivlen);
}
if (esp->auth.icv_full_len) {
esp->auth.icv(esp, skb, (u8*)esph-skb->data,
sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen+clen, trailer->tail);
pskb_put(skb, trailer, alen);
err = esp_mac_digest(esp, skb, (u8 *)esph - skb->data,
sizeof(*esph) + esp->conf.ivlen + clen);
memcpy(pskb_put(skb, trailer, alen), esp->auth.work_icv, alen);
}
err = 0;
error:
return err;
}
@ -134,8 +139,10 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
struct ipv6hdr *iph;
struct ipv6_esp_hdr *esph;
struct esp_data *esp = x->data;
struct crypto_blkcipher *tfm = esp->conf.tfm;
struct blkcipher_desc desc = { .tfm = tfm };
struct sk_buff *trailer;
int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4);
int blksize = ALIGN(crypto_blkcipher_blocksize(tfm), 4);
int alen = esp->auth.icv_trunc_len;
int elen = skb->len - sizeof(struct ipv6_esp_hdr) - esp->conf.ivlen - alen;
@ -155,15 +162,16 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
/* If integrity check is required, do this. */
if (esp->auth.icv_full_len) {
u8 sum[esp->auth.icv_full_len];
u8 sum1[alen];
u8 sum[alen];
esp->auth.icv(esp, skb, 0, skb->len-alen, sum);
ret = esp_mac_digest(esp, skb, 0, skb->len - alen);
if (ret)
goto out;
if (skb_copy_bits(skb, skb->len-alen, sum1, alen))
if (skb_copy_bits(skb, skb->len - alen, sum, alen))
BUG();
if (unlikely(memcmp(sum, sum1, alen))) {
if (unlikely(memcmp(esp->auth.work_icv, sum, alen))) {
x->stats.integrity_failed++;
ret = -EINVAL;
goto out;
@ -182,7 +190,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
/* Get ivec. This can be wrong, check against another impls. */
if (esp->conf.ivlen)
crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm));
crypto_blkcipher_set_iv(tfm, esph->enc_data, esp->conf.ivlen);
{
u8 nexthdr[2];
@ -197,9 +205,11 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
}
}
skb_to_sgvec(skb, sg, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen, elen);
crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen);
ret = crypto_blkcipher_decrypt(&desc, sg, sg, elen);
if (unlikely(sg != &esp->sgbuf[0]))
kfree(sg);
if (unlikely(ret))
goto out;
if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
BUG();
@ -225,7 +235,7 @@ out:
static u32 esp6_get_max_size(struct xfrm_state *x, int mtu)
{
struct esp_data *esp = x->data;
u32 blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4);
u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4);
if (x->props.mode) {
mtu = ALIGN(mtu + 2, blksize);
@ -266,11 +276,11 @@ static void esp6_destroy(struct xfrm_state *x)
if (!esp)
return;
crypto_free_tfm(esp->conf.tfm);
crypto_free_blkcipher(esp->conf.tfm);
esp->conf.tfm = NULL;
kfree(esp->conf.ivec);
esp->conf.ivec = NULL;
crypto_free_tfm(esp->auth.tfm);
crypto_free_hash(esp->auth.tfm);
esp->auth.tfm = NULL;
kfree(esp->auth.work_icv);
esp->auth.work_icv = NULL;
@ -280,6 +290,7 @@ static void esp6_destroy(struct xfrm_state *x)
static int esp6_init_state(struct xfrm_state *x)
{
struct esp_data *esp = NULL;
struct crypto_blkcipher *tfm;
/* null auth and encryption can have zero length keys */
if (x->aalg) {
@ -298,24 +309,29 @@ static int esp6_init_state(struct xfrm_state *x)
if (x->aalg) {
struct xfrm_algo_desc *aalg_desc;
struct crypto_hash *hash;
esp->auth.key = x->aalg->alg_key;
esp->auth.key_len = (x->aalg->alg_key_len+7)/8;
esp->auth.tfm = crypto_alloc_tfm(x->aalg->alg_name, 0);
if (esp->auth.tfm == NULL)
hash = crypto_alloc_hash(x->aalg->alg_name, 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(hash))
goto error;
esp->auth.tfm = hash;
if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len))
goto error;
esp->auth.icv = esp_hmac_digest;
aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
BUG_ON(!aalg_desc);
if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
crypto_tfm_alg_digestsize(esp->auth.tfm)) {
printk(KERN_INFO "ESP: %s digestsize %u != %hu\n",
x->aalg->alg_name,
crypto_tfm_alg_digestsize(esp->auth.tfm),
aalg_desc->uinfo.auth.icv_fullbits/8);
goto error;
crypto_hash_digestsize(hash)) {
NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
x->aalg->alg_name,
crypto_hash_digestsize(hash),
aalg_desc->uinfo.auth.icv_fullbits/8);
goto error;
}
esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
@ -327,13 +343,11 @@ static int esp6_init_state(struct xfrm_state *x)
}
esp->conf.key = x->ealg->alg_key;
esp->conf.key_len = (x->ealg->alg_key_len+7)/8;
if (x->props.ealgo == SADB_EALG_NULL)
esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_ECB);
else
esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_CBC);
if (esp->conf.tfm == NULL)
tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
goto error;
esp->conf.ivlen = crypto_tfm_alg_ivsize(esp->conf.tfm);
esp->conf.tfm = tfm;
esp->conf.ivlen = crypto_blkcipher_ivsize(tfm);
esp->conf.padlen = 0;
if (esp->conf.ivlen) {
esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL);
@ -341,7 +355,7 @@ static int esp6_init_state(struct xfrm_state *x)
goto error;
get_random_bytes(esp->conf.ivec, esp->conf.ivlen);
}
if (crypto_cipher_setkey(esp->conf.tfm, esp->conf.key, esp->conf.key_len))
if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len))
goto error;
x->props.header_len = sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen;
if (x->props.mode)

View File

@ -53,7 +53,7 @@
struct ipcomp6_tfms {
struct list_head list;
struct crypto_tfm **tfms;
struct crypto_comp **tfms;
int users;
};
@ -70,7 +70,7 @@ static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb)
int plen, dlen;
struct ipcomp_data *ipcd = x->data;
u8 *start, *scratch;
struct crypto_tfm *tfm;
struct crypto_comp *tfm;
int cpu;
if (skb_linearize_cow(skb))
@ -129,7 +129,7 @@ static int ipcomp6_output(struct xfrm_state *x, struct sk_buff *skb)
struct ipcomp_data *ipcd = x->data;
int plen, dlen;
u8 *start, *scratch;
struct crypto_tfm *tfm;
struct crypto_comp *tfm;
int cpu;
hdr_len = skb->h.raw - skb->data;
@ -301,7 +301,7 @@ static void **ipcomp6_alloc_scratches(void)
return scratches;
}
static void ipcomp6_free_tfms(struct crypto_tfm **tfms)
static void ipcomp6_free_tfms(struct crypto_comp **tfms)
{
struct ipcomp6_tfms *pos;
int cpu;
@ -323,28 +323,28 @@ static void ipcomp6_free_tfms(struct crypto_tfm **tfms)
return;
for_each_possible_cpu(cpu) {
struct crypto_tfm *tfm = *per_cpu_ptr(tfms, cpu);
crypto_free_tfm(tfm);
struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu);
crypto_free_comp(tfm);
}
free_percpu(tfms);
}
static struct crypto_tfm **ipcomp6_alloc_tfms(const char *alg_name)
static struct crypto_comp **ipcomp6_alloc_tfms(const char *alg_name)
{
struct ipcomp6_tfms *pos;
struct crypto_tfm **tfms;
struct crypto_comp **tfms;
int cpu;
/* This can be any valid CPU ID so we don't need locking. */
cpu = raw_smp_processor_id();
list_for_each_entry(pos, &ipcomp6_tfms_list, list) {
struct crypto_tfm *tfm;
struct crypto_comp *tfm;
tfms = pos->tfms;
tfm = *per_cpu_ptr(tfms, cpu);
if (!strcmp(crypto_tfm_alg_name(tfm), alg_name)) {
if (!strcmp(crypto_comp_name(tfm), alg_name)) {
pos->users++;
return tfms;
}
@ -358,12 +358,13 @@ static struct crypto_tfm **ipcomp6_alloc_tfms(const char *alg_name)
INIT_LIST_HEAD(&pos->list);
list_add(&pos->list, &ipcomp6_tfms_list);
pos->tfms = tfms = alloc_percpu(struct crypto_tfm *);
pos->tfms = tfms = alloc_percpu(struct crypto_comp *);
if (!tfms)
goto error;
for_each_possible_cpu(cpu) {
struct crypto_tfm *tfm = crypto_alloc_tfm(alg_name, 0);
struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
CRYPTO_ALG_ASYNC);
if (!tfm)
goto error;
*per_cpu_ptr(tfms, cpu) = tfm;

View File

@ -173,7 +173,7 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
/* Free up the HMAC transform. */
sctp_crypto_free_tfm(sctp_sk(ep->base.sk)->hmac);
crypto_free_hash(sctp_sk(ep->base.sk)->hmac);
/* Cleanup. */
sctp_inq_free(&ep->base.inqueue);

View File

@ -1282,10 +1282,8 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
retval = kmalloc(*cookie_len, GFP_ATOMIC);
if (!retval) {
*cookie_len = 0;
if (!retval)
goto nodata;
}
/* Clear this memory since we are sending this data structure
* out on the network.
@ -1321,19 +1319,29 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len);
if (sctp_sk(ep->base.sk)->hmac) {
struct hash_desc desc;
/* Sign the message. */
sg.page = virt_to_page(&cookie->c);
sg.offset = (unsigned long)(&cookie->c) % PAGE_SIZE;
sg.length = bodysize;
keylen = SCTP_SECRET_SIZE;
key = (char *)ep->secret_key[ep->current_key];
desc.tfm = sctp_sk(ep->base.sk)->hmac;
desc.flags = 0;
sctp_crypto_hmac(sctp_sk(ep->base.sk)->hmac, key, &keylen,
&sg, 1, cookie->signature);
if (crypto_hash_setkey(desc.tfm, key, keylen) ||
crypto_hash_digest(&desc, &sg, bodysize, cookie->signature))
goto free_cookie;
}
nodata:
return retval;
free_cookie:
kfree(retval);
nodata:
*cookie_len = 0;
return NULL;
}
/* Unpack the cookie from COOKIE ECHO chunk, recreating the association. */
@ -1354,6 +1362,7 @@ struct sctp_association *sctp_unpack_cookie(
sctp_scope_t scope;
struct sk_buff *skb = chunk->skb;
struct timeval tv;
struct hash_desc desc;
/* Header size is static data prior to the actual cookie, including
* any padding.
@ -1389,17 +1398,25 @@ struct sctp_association *sctp_unpack_cookie(
sg.offset = (unsigned long)(bear_cookie) % PAGE_SIZE;
sg.length = bodysize;
key = (char *)ep->secret_key[ep->current_key];
desc.tfm = sctp_sk(ep->base.sk)->hmac;
desc.flags = 0;
memset(digest, 0x00, SCTP_SIGNATURE_SIZE);
sctp_crypto_hmac(sctp_sk(ep->base.sk)->hmac, key, &keylen, &sg,
1, digest);
if (crypto_hash_setkey(desc.tfm, key, keylen) ||
crypto_hash_digest(&desc, &sg, bodysize, digest)) {
*error = -SCTP_IERROR_NOMEM;
goto fail;
}
if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) {
/* Try the previous key. */
key = (char *)ep->secret_key[ep->last_key];
memset(digest, 0x00, SCTP_SIGNATURE_SIZE);
sctp_crypto_hmac(sctp_sk(ep->base.sk)->hmac, key, &keylen,
&sg, 1, digest);
if (crypto_hash_setkey(desc.tfm, key, keylen) ||
crypto_hash_digest(&desc, &sg, bodysize, digest)) {
*error = -SCTP_IERROR_NOMEM;
goto fail;
}
if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) {
/* Yikes! Still bad signature! */

View File

@ -4898,7 +4898,7 @@ SCTP_STATIC int sctp_stream_listen(struct sock *sk, int backlog)
int sctp_inet_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
struct crypto_tfm *tfm=NULL;
struct crypto_hash *tfm = NULL;
int err = -EINVAL;
if (unlikely(backlog < 0))
@ -4911,7 +4911,7 @@ int sctp_inet_listen(struct socket *sock, int backlog)
/* Allocate HMAC for generating cookie. */
if (sctp_hmac_alg) {
tfm = sctp_crypto_alloc_tfm(sctp_hmac_alg, 0);
tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC);
if (!tfm) {
err = -ENOSYS;
goto out;
@ -4937,7 +4937,7 @@ out:
sctp_release_sock(sk);
return err;
cleanup:
sctp_crypto_free_tfm(tfm);
crypto_free_hash(tfm);
goto out;
}

View File

@ -34,6 +34,7 @@
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#include <linux/err.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/slab.h>
@ -49,7 +50,7 @@
u32
krb5_encrypt(
struct crypto_tfm *tfm,
struct crypto_blkcipher *tfm,
void * iv,
void * in,
void * out,
@ -58,26 +59,27 @@ krb5_encrypt(
u32 ret = -EINVAL;
struct scatterlist sg[1];
u8 local_iv[16] = {0};
struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
dprintk("RPC: krb5_encrypt: input data:\n");
print_hexl((u32 *)in, length, 0);
if (length % crypto_tfm_alg_blocksize(tfm) != 0)
if (length % crypto_blkcipher_blocksize(tfm) != 0)
goto out;
if (crypto_tfm_alg_ivsize(tfm) > 16) {
if (crypto_blkcipher_ivsize(tfm) > 16) {
dprintk("RPC: gss_k5encrypt: tfm iv size to large %d\n",
crypto_tfm_alg_ivsize(tfm));
crypto_blkcipher_ivsize(tfm));
goto out;
}
if (iv)
memcpy(local_iv, iv, crypto_tfm_alg_ivsize(tfm));
memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
memcpy(out, in, length);
sg_set_buf(sg, out, length);
ret = crypto_cipher_encrypt_iv(tfm, sg, sg, length, local_iv);
ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
dprintk("RPC: krb5_encrypt: output data:\n");
print_hexl((u32 *)out, length, 0);
@ -90,7 +92,7 @@ EXPORT_SYMBOL(krb5_encrypt);
u32
krb5_decrypt(
struct crypto_tfm *tfm,
struct crypto_blkcipher *tfm,
void * iv,
void * in,
void * out,
@ -99,25 +101,26 @@ krb5_decrypt(
u32 ret = -EINVAL;
struct scatterlist sg[1];
u8 local_iv[16] = {0};
struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
dprintk("RPC: krb5_decrypt: input data:\n");
print_hexl((u32 *)in, length, 0);
if (length % crypto_tfm_alg_blocksize(tfm) != 0)
if (length % crypto_blkcipher_blocksize(tfm) != 0)
goto out;
if (crypto_tfm_alg_ivsize(tfm) > 16) {
if (crypto_blkcipher_ivsize(tfm) > 16) {
dprintk("RPC: gss_k5decrypt: tfm iv size to large %d\n",
crypto_tfm_alg_ivsize(tfm));
crypto_blkcipher_ivsize(tfm));
goto out;
}
if (iv)
memcpy(local_iv,iv, crypto_tfm_alg_ivsize(tfm));
memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm));
memcpy(out, in, length);
sg_set_buf(sg, out, length);
ret = crypto_cipher_decrypt_iv(tfm, sg, sg, length, local_iv);
ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
dprintk("RPC: krb5_decrypt: output_data:\n");
print_hexl((u32 *)out, length, 0);
@ -197,11 +200,9 @@ out:
static int
checksummer(struct scatterlist *sg, void *data)
{
struct crypto_tfm *tfm = (struct crypto_tfm *)data;
struct hash_desc *desc = data;
crypto_digest_update(tfm, sg, 1);
return 0;
return crypto_hash_update(desc, sg, sg->length);
}
/* checksum the plaintext data and hdrlen bytes of the token header */
@ -210,8 +211,9 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
int body_offset, struct xdr_netobj *cksum)
{
char *cksumname;
struct crypto_tfm *tfm = NULL; /* XXX add to ctx? */
struct hash_desc desc; /* XXX add to ctx? */
struct scatterlist sg[1];
int err;
switch (cksumtype) {
case CKSUMTYPE_RSA_MD5:
@ -222,25 +224,35 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
" unsupported checksum %d", cksumtype);
return GSS_S_FAILURE;
}
if (!(tfm = crypto_alloc_tfm(cksumname, CRYPTO_TFM_REQ_MAY_SLEEP)))
desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(desc.tfm))
return GSS_S_FAILURE;
cksum->len = crypto_tfm_alg_digestsize(tfm);
cksum->len = crypto_hash_digestsize(desc.tfm);
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
crypto_digest_init(tfm);
err = crypto_hash_init(&desc);
if (err)
goto out;
sg_set_buf(sg, header, hdrlen);
crypto_digest_update(tfm, sg, 1);
process_xdr_buf(body, body_offset, body->len - body_offset,
checksummer, tfm);
crypto_digest_final(tfm, cksum->data);
crypto_free_tfm(tfm);
return 0;
err = crypto_hash_update(&desc, sg, hdrlen);
if (err)
goto out;
err = process_xdr_buf(body, body_offset, body->len - body_offset,
checksummer, &desc);
if (err)
goto out;
err = crypto_hash_final(&desc, cksum->data);
out:
crypto_free_hash(desc.tfm);
return err ? GSS_S_FAILURE : 0;
}
EXPORT_SYMBOL(make_checksum);
struct encryptor_desc {
u8 iv[8]; /* XXX hard-coded blocksize */
struct crypto_tfm *tfm;
struct blkcipher_desc desc;
int pos;
struct xdr_buf *outbuf;
struct page **pages;
@ -285,8 +297,8 @@ encryptor(struct scatterlist *sg, void *data)
if (thislen == 0)
return 0;
ret = crypto_cipher_encrypt_iv(desc->tfm, desc->outfrags, desc->infrags,
thislen, desc->iv);
ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,
desc->infrags, thislen);
if (ret)
return ret;
if (fraglen) {
@ -305,16 +317,18 @@ encryptor(struct scatterlist *sg, void *data)
}
int
gss_encrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset,
struct page **pages)
gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
int offset, struct page **pages)
{
int ret;
struct encryptor_desc desc;
BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0);
BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
memset(desc.iv, 0, sizeof(desc.iv));
desc.tfm = tfm;
desc.desc.tfm = tfm;
desc.desc.info = desc.iv;
desc.desc.flags = 0;
desc.pos = offset;
desc.outbuf = buf;
desc.pages = pages;
@ -329,7 +343,7 @@ EXPORT_SYMBOL(gss_encrypt_xdr_buf);
struct decryptor_desc {
u8 iv[8]; /* XXX hard-coded blocksize */
struct crypto_tfm *tfm;
struct blkcipher_desc desc;
struct scatterlist frags[4];
int fragno;
int fraglen;
@ -355,8 +369,8 @@ decryptor(struct scatterlist *sg, void *data)
if (thislen == 0)
return 0;
ret = crypto_cipher_decrypt_iv(desc->tfm, desc->frags, desc->frags,
thislen, desc->iv);
ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,
desc->frags, thislen);
if (ret)
return ret;
if (fraglen) {
@ -373,15 +387,18 @@ decryptor(struct scatterlist *sg, void *data)
}
int
gss_decrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset)
gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
int offset)
{
struct decryptor_desc desc;
/* XXXJBF: */
BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0);
BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
memset(desc.iv, 0, sizeof(desc.iv));
desc.tfm = tfm;
desc.desc.tfm = tfm;
desc.desc.info = desc.iv;
desc.desc.flags = 0;
desc.fragno = 0;
desc.fraglen = 0;
return process_xdr_buf(buf, offset, buf->len - offset, decryptor, &desc);

View File

@ -34,6 +34,7 @@
*
*/
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
@ -78,10 +79,10 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
}
static inline const void *
get_key(const void *p, const void *end, struct crypto_tfm **res)
get_key(const void *p, const void *end, struct crypto_blkcipher **res)
{
struct xdr_netobj key;
int alg, alg_mode;
int alg;
char *alg_name;
p = simple_get_bytes(p, end, &alg, sizeof(alg));
@ -93,18 +94,19 @@ get_key(const void *p, const void *end, struct crypto_tfm **res)
switch (alg) {
case ENCTYPE_DES_CBC_RAW:
alg_name = "des";
alg_mode = CRYPTO_TFM_MODE_CBC;
alg_name = "cbc(des)";
break;
default:
printk("gss_kerberos_mech: unsupported algorithm %d\n", alg);
goto out_err_free_key;
}
if (!(*res = crypto_alloc_tfm(alg_name, alg_mode))) {
*res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(*res)) {
printk("gss_kerberos_mech: unable to initialize crypto algorithm %s\n", alg_name);
*res = NULL;
goto out_err_free_key;
}
if (crypto_cipher_setkey(*res, key.data, key.len)) {
if (crypto_blkcipher_setkey(*res, key.data, key.len)) {
printk("gss_kerberos_mech: error setting key for crypto algorithm %s\n", alg_name);
goto out_err_free_tfm;
}
@ -113,7 +115,7 @@ get_key(const void *p, const void *end, struct crypto_tfm **res)
return p;
out_err_free_tfm:
crypto_free_tfm(*res);
crypto_free_blkcipher(*res);
out_err_free_key:
kfree(key.data);
p = ERR_PTR(-EINVAL);
@ -172,9 +174,9 @@ gss_import_sec_context_kerberos(const void *p,
return 0;
out_err_free_key2:
crypto_free_tfm(ctx->seq);
crypto_free_blkcipher(ctx->seq);
out_err_free_key1:
crypto_free_tfm(ctx->enc);
crypto_free_blkcipher(ctx->enc);
out_err_free_mech:
kfree(ctx->mech_used.data);
out_err_free_ctx:
@ -187,8 +189,8 @@ static void
gss_delete_sec_context_kerberos(void *internal_ctx) {
struct krb5_ctx *kctx = internal_ctx;
crypto_free_tfm(kctx->seq);
crypto_free_tfm(kctx->enc);
crypto_free_blkcipher(kctx->seq);
crypto_free_blkcipher(kctx->enc);
kfree(kctx->mech_used.data);
kfree(kctx);
}

View File

@ -41,7 +41,7 @@
#endif
s32
krb5_make_seq_num(struct crypto_tfm *key,
krb5_make_seq_num(struct crypto_blkcipher *key,
int direction,
s32 seqnum,
unsigned char *cksum, unsigned char *buf)
@ -62,7 +62,7 @@ krb5_make_seq_num(struct crypto_tfm *key,
}
s32
krb5_get_seq_num(struct crypto_tfm *key,
krb5_get_seq_num(struct crypto_blkcipher *key,
unsigned char *cksum,
unsigned char *buf,
int *direction, s32 * seqnum)

View File

@ -149,7 +149,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
goto out_err;
}
blocksize = crypto_tfm_alg_blocksize(kctx->enc);
blocksize = crypto_blkcipher_blocksize(kctx->enc);
gss_krb5_add_padding(buf, offset, blocksize);
BUG_ON((buf->len - offset) % blocksize);
plainlen = blocksize + buf->len - offset;
@ -346,7 +346,7 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
/* Copy the data back to the right position. XXX: Would probably be
* better to copy and encrypt at the same time. */
blocksize = crypto_tfm_alg_blocksize(kctx->enc);
blocksize = crypto_blkcipher_blocksize(kctx->enc);
data_start = ptr + 22 + blocksize;
orig_start = buf->head[0].iov_base + offset;
data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;

View File

@ -34,6 +34,7 @@
*
*/
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
@ -83,10 +84,11 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
}
static inline const void *
get_key(const void *p, const void *end, struct crypto_tfm **res, int *resalg)
get_key(const void *p, const void *end, struct crypto_blkcipher **res,
int *resalg)
{
struct xdr_netobj key = { 0 };
int alg_mode,setkey = 0;
int setkey = 0;
char *alg_name;
p = simple_get_bytes(p, end, resalg, sizeof(*resalg));
@ -98,14 +100,12 @@ get_key(const void *p, const void *end, struct crypto_tfm **res, int *resalg)
switch (*resalg) {
case NID_des_cbc:
alg_name = "des";
alg_mode = CRYPTO_TFM_MODE_CBC;
alg_name = "cbc(des)";
setkey = 1;
break;
case NID_cast5_cbc:
/* XXXX here in name only, not used */
alg_name = "cast5";
alg_mode = CRYPTO_TFM_MODE_CBC;
alg_name = "cbc(cast5)";
setkey = 0; /* XXX will need to set to 1 */
break;
case NID_md5:
@ -113,19 +113,20 @@ get_key(const void *p, const void *end, struct crypto_tfm **res, int *resalg)
dprintk("RPC: SPKM3 get_key: NID_md5 zero Key length\n");
}
alg_name = "md5";
alg_mode = 0;
setkey = 0;
break;
default:
dprintk("gss_spkm3_mech: unsupported algorithm %d\n", *resalg);
goto out_err_free_key;
}
if (!(*res = crypto_alloc_tfm(alg_name, alg_mode))) {
*res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(*res)) {
printk("gss_spkm3_mech: unable to initialize crypto algorthm %s\n", alg_name);
*res = NULL;
goto out_err_free_key;
}
if (setkey) {
if (crypto_cipher_setkey(*res, key.data, key.len)) {
if (crypto_blkcipher_setkey(*res, key.data, key.len)) {
printk("gss_spkm3_mech: error setting key for crypto algorthm %s\n", alg_name);
goto out_err_free_tfm;
}
@ -136,7 +137,7 @@ get_key(const void *p, const void *end, struct crypto_tfm **res, int *resalg)
return p;
out_err_free_tfm:
crypto_free_tfm(*res);
crypto_free_blkcipher(*res);
out_err_free_key:
if(key.len > 0)
kfree(key.data);
@ -204,9 +205,9 @@ gss_import_sec_context_spkm3(const void *p, size_t len,
return 0;
out_err_free_key2:
crypto_free_tfm(ctx->derived_integ_key);
crypto_free_blkcipher(ctx->derived_integ_key);
out_err_free_key1:
crypto_free_tfm(ctx->derived_conf_key);
crypto_free_blkcipher(ctx->derived_conf_key);
out_err_free_s_key:
kfree(ctx->share_key.data);
out_err_free_mech:
@ -223,8 +224,8 @@ static void
gss_delete_sec_context_spkm3(void *internal_ctx) {
struct spkm3_ctx *sctx = internal_ctx;
crypto_free_tfm(sctx->derived_integ_key);
crypto_free_tfm(sctx->derived_conf_key);
crypto_free_blkcipher(sctx->derived_integ_key);
crypto_free_blkcipher(sctx->derived_conf_key);
kfree(sctx->share_key.data);
kfree(sctx->mech_used.data);
kfree(sctx);

View File

@ -30,7 +30,8 @@
*/
static struct xfrm_algo_desc aalg_list[] = {
{
.name = "digest_null",
.name = "hmac(digest_null)",
.compat = "digest_null",
.uinfo = {
.auth = {
@ -47,7 +48,8 @@ static struct xfrm_algo_desc aalg_list[] = {
}
},
{
.name = "md5",
.name = "hmac(md5)",
.compat = "md5",
.uinfo = {
.auth = {
@ -64,7 +66,8 @@ static struct xfrm_algo_desc aalg_list[] = {
}
},
{
.name = "sha1",
.name = "hmac(sha1)",
.compat = "sha1",
.uinfo = {
.auth = {
@ -81,7 +84,8 @@ static struct xfrm_algo_desc aalg_list[] = {
}
},
{
.name = "sha256",
.name = "hmac(sha256)",
.compat = "sha256",
.uinfo = {
.auth = {
@ -98,7 +102,8 @@ static struct xfrm_algo_desc aalg_list[] = {
}
},
{
.name = "ripemd160",
.name = "hmac(ripemd160)",
.compat = "ripemd160",
.uinfo = {
.auth = {
@ -118,7 +123,8 @@ static struct xfrm_algo_desc aalg_list[] = {
static struct xfrm_algo_desc ealg_list[] = {
{
.name = "cipher_null",
.name = "ecb(cipher_null)",
.compat = "cipher_null",
.uinfo = {
.encr = {
@ -135,7 +141,8 @@ static struct xfrm_algo_desc ealg_list[] = {
}
},
{
.name = "des",
.name = "cbc(des)",
.compat = "des",
.uinfo = {
.encr = {
@ -152,7 +159,8 @@ static struct xfrm_algo_desc ealg_list[] = {
}
},
{
.name = "des3_ede",
.name = "cbc(des3_ede)",
.compat = "des3_ede",
.uinfo = {
.encr = {
@ -169,7 +177,8 @@ static struct xfrm_algo_desc ealg_list[] = {
}
},
{
.name = "cast128",
.name = "cbc(cast128)",
.compat = "cast128",
.uinfo = {
.encr = {
@ -186,7 +195,8 @@ static struct xfrm_algo_desc ealg_list[] = {
}
},
{
.name = "blowfish",
.name = "cbc(blowfish)",
.compat = "blowfish",
.uinfo = {
.encr = {
@ -203,7 +213,8 @@ static struct xfrm_algo_desc ealg_list[] = {
}
},
{
.name = "aes",
.name = "cbc(aes)",
.compat = "aes",
.uinfo = {
.encr = {
@ -220,7 +231,8 @@ static struct xfrm_algo_desc ealg_list[] = {
}
},
{
.name = "serpent",
.name = "cbc(serpent)",
.compat = "serpent",
.uinfo = {
.encr = {
@ -237,7 +249,8 @@ static struct xfrm_algo_desc ealg_list[] = {
}
},
{
.name = "twofish",
.name = "cbc(twofish)",
.compat = "twofish",
.uinfo = {
.encr = {
@ -350,8 +363,8 @@ struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id)
EXPORT_SYMBOL_GPL(xfrm_calg_get_byid);
static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
int entries, char *name,
int probe)
int entries, u32 type, u32 mask,
char *name, int probe)
{
int i, status;
@ -359,7 +372,8 @@ static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
return NULL;
for (i = 0; i < entries; i++) {
if (strcmp(name, list[i].name))
if (strcmp(name, list[i].name) &&
(!list[i].compat || strcmp(name, list[i].compat)))
continue;
if (list[i].available)
@ -368,7 +382,7 @@ static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
if (!probe)
break;
status = crypto_alg_available(name, 0);
status = crypto_has_alg(name, type, mask | CRYPTO_ALG_ASYNC);
if (!status)
break;
@ -380,19 +394,25 @@ static struct xfrm_algo_desc *xfrm_get_byname(struct xfrm_algo_desc *list,
struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe)
{
return xfrm_get_byname(aalg_list, aalg_entries(), name, probe);
return xfrm_get_byname(aalg_list, aalg_entries(),
CRYPTO_ALG_TYPE_HASH, CRYPTO_ALG_TYPE_HASH_MASK,
name, probe);
}
EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname);
struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe)
{
return xfrm_get_byname(ealg_list, ealg_entries(), name, probe);
return xfrm_get_byname(ealg_list, ealg_entries(),
CRYPTO_ALG_TYPE_BLKCIPHER, CRYPTO_ALG_TYPE_MASK,
name, probe);
}
EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname);
struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe)
{
return xfrm_get_byname(calg_list, calg_entries(), name, probe);
return xfrm_get_byname(calg_list, calg_entries(),
CRYPTO_ALG_TYPE_COMPRESS, CRYPTO_ALG_TYPE_MASK,
name, probe);
}
EXPORT_SYMBOL_GPL(xfrm_calg_get_byname);
@ -427,19 +447,22 @@ void xfrm_probe_algs(void)
BUG_ON(in_softirq());
for (i = 0; i < aalg_entries(); i++) {
status = crypto_alg_available(aalg_list[i].name, 0);
status = crypto_has_hash(aalg_list[i].name, 0,
CRYPTO_ALG_ASYNC);
if (aalg_list[i].available != status)
aalg_list[i].available = status;
}
for (i = 0; i < ealg_entries(); i++) {
status = crypto_alg_available(ealg_list[i].name, 0);
status = crypto_has_blkcipher(ealg_list[i].name, 0,
CRYPTO_ALG_ASYNC);
if (ealg_list[i].available != status)
ealg_list[i].available = status;
}
for (i = 0; i < calg_entries(); i++) {
status = crypto_alg_available(calg_list[i].name, 0);
status = crypto_has_comp(calg_list[i].name, 0,
CRYPTO_ALG_ASYNC);
if (calg_list[i].available != status)
calg_list[i].available = status;
}
@ -471,11 +494,12 @@ EXPORT_SYMBOL_GPL(xfrm_count_enc_supported);
/* Move to common area: it is shared with AH. */
void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
int offset, int len, icv_update_fn_t icv_update)
int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *desc,
int offset, int len, icv_update_fn_t icv_update)
{
int start = skb_headlen(skb);
int i, copy = start - offset;
int err;
struct scatterlist sg;
/* Checksum header. */
@ -487,10 +511,12 @@ void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
sg.length = copy;
icv_update(tfm, &sg, 1);
err = icv_update(desc, &sg, copy);
if (unlikely(err))
return err;
if ((len -= copy) == 0)
return;
return 0;
offset += copy;
}
@ -510,10 +536,12 @@ void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
sg.offset = frag->page_offset + offset-start;
sg.length = copy;
icv_update(tfm, &sg, 1);
err = icv_update(desc, &sg, copy);
if (unlikely(err))
return err;
if (!(len -= copy))
return;
return 0;
offset += copy;
}
start = end;
@ -531,15 +559,19 @@ void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
skb_icv_walk(list, tfm, offset-start, copy, icv_update);
err = skb_icv_walk(list, desc, offset-start,
copy, icv_update);
if (unlikely(err))
return err;
if ((len -= copy) == 0)
return;
return 0;
offset += copy;
}
start = end;
}
}
BUG_ON(len);
return 0;
}
EXPORT_SYMBOL_GPL(skb_icv_walk);

View File

@ -10,6 +10,7 @@
*
*/
#include <linux/crypto.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
@ -212,6 +213,7 @@ static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
return -ENOMEM;
memcpy(p, ualg, len);
strcpy(p->alg_name, algo->name);
*algpp = p;
return 0;
}

View File

@ -16,6 +16,7 @@
* (at your option) any later version.
*/
#include <linux/err.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@ -197,26 +198,27 @@ static unsigned char hashedPassword[SHA1_DIGEST_SIZE];
static int
plaintext_to_sha1(unsigned char *hash, const char *plaintext, unsigned int len)
{
struct crypto_tfm *tfm;
struct hash_desc desc;
struct scatterlist sg;
int err;
if (len > PAGE_SIZE) {
seclvl_printk(0, KERN_ERR, "Plaintext password too large (%d "
"characters). Largest possible is %lu "
"bytes.\n", len, PAGE_SIZE);
return -EINVAL;
}
tfm = crypto_alloc_tfm("sha1", CRYPTO_TFM_REQ_MAY_SLEEP);
if (tfm == NULL) {
desc.tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(desc.tfm)) {
seclvl_printk(0, KERN_ERR,
"Failed to load transform for SHA1\n");
return -EINVAL;
}
sg_init_one(&sg, (u8 *)plaintext, len);
crypto_digest_init(tfm);
crypto_digest_update(tfm, &sg, 1);
crypto_digest_final(tfm, hash);
crypto_free_tfm(tfm);
return 0;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_digest(&desc, &sg, len, hash);
crypto_free_hash(desc.tfm);
return err;
}
/**