[BLOCK] dm-crypt: benbi IV, big endian narrow block count for LRW-32-AES

LRW-32-AES needs a certain IV. This IV should be provided dm-crypt.
The block cipher mode could, in principle generate the correct IV from
the plain IV, but I think that it is cleaner to supply the right IV
directly.

The sector -> narrow block calculation uses a shift for performance reasons.
This shift is computed in .ctr and stored in cc->iv_gen_private (as a void *).

Signed-off-by: Rik Snel <rsnel@cube.dyndns.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Rik Snel 2006-09-03 08:56:39 +10:00 committed by David S. Miller
parent 9fe757b0cf
commit 48527fa7cf

View file

@ -20,6 +20,7 @@
#include <asm/atomic.h>
#include <linux/scatterlist.h>
#include <asm/page.h>
#include <asm/unaligned.h>
#include "dm.h"
@ -113,6 +114,9 @@ static kmem_cache_t *_crypt_io_pool;
* encrypted with the bulk cipher using a salt as key. The salt
* should be derived from the bulk cipher's key via hashing.
*
* benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
* (needed for LRW-32-AES and possible other narrow block modes)
*
* plumb: unimplemented, see:
* http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
*/
@ -209,6 +213,44 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
return 0;
}
static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
const char *opts)
{
unsigned int bs = crypto_blkcipher_blocksize(cc->tfm);
int log = long_log2(bs);
/* we need to calculate how far we must shift the sector count
* to get the cipher block count, we use this shift in _gen */
if (1 << log != bs) {
ti->error = "cypher blocksize is not a power of 2";
return -EINVAL;
}
if (log > 9) {
ti->error = "cypher blocksize is > 512";
return -EINVAL;
}
cc->iv_gen_private = (void *)(9 - log);
return 0;
}
static void crypt_iv_benbi_dtr(struct crypt_config *cc)
{
cc->iv_gen_private = NULL;
}
static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
put_unaligned(cpu_to_be64(((u64)sector << (u32)cc->iv_gen_private) + 1),
(__be64 *)(iv + cc->iv_size - sizeof(u64)));
return 0;
}
static struct crypt_iv_operations crypt_iv_plain_ops = {
.generator = crypt_iv_plain_gen
};
@ -219,6 +261,11 @@ static struct crypt_iv_operations crypt_iv_essiv_ops = {
.generator = crypt_iv_essiv_gen
};
static struct crypt_iv_operations crypt_iv_benbi_ops = {
.ctr = crypt_iv_benbi_ctr,
.dtr = crypt_iv_benbi_dtr,
.generator = crypt_iv_benbi_gen
};
static int
crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
@ -768,7 +815,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
cc->tfm = tfm;
/*
* Choose ivmode. Valid modes: "plain", "essiv:<esshash>".
* Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
* See comments at iv code
*/
@ -778,6 +825,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
cc->iv_gen_ops = &crypt_iv_plain_ops;
else if (strcmp(ivmode, "essiv") == 0)
cc->iv_gen_ops = &crypt_iv_essiv_ops;
else if (strcmp(ivmode, "benbi") == 0)
cc->iv_gen_ops = &crypt_iv_benbi_ops;
else {
ti->error = "Invalid IV mode";
goto bad2;