crypto: caam - add ecb(*) support

Add ecb mode support for aes, des, 3des and arc4 ciphers.
ecb(*) reuses existing skcipher implementation, updating it with support
for no IV.

Signed-off-by: Iuliana Prodan <iuliana.prodan@nxp.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Iuliana Prodan 2019-02-08 15:50:09 +02:00 committed by Herbert Xu
parent bd30cf533b
commit eaed71a44a
4 changed files with 161 additions and 49 deletions

View File

@ -91,6 +91,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
select CRYPTO_AEAD select CRYPTO_AEAD
select CRYPTO_AUTHENC select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER select CRYPTO_BLKCIPHER
select CRYPTO_DES
help help
Selecting this will offload crypto for users of the Selecting this will offload crypto for users of the
scatterlist crypto API (such as the linux native IPSec scatterlist crypto API (such as the linux native IPSec

View File

@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for crypto API * caam - Freescale FSL CAAM support for crypto API
* *
* Copyright 2008-2011 Freescale Semiconductor, Inc. * Copyright 2008-2011 Freescale Semiconductor, Inc.
* Copyright 2016-2018 NXP * Copyright 2016-2019 NXP
* *
* Based on talitos crypto API driver. * Based on talitos crypto API driver.
* *
@ -766,6 +766,27 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
return 0; return 0;
} }
static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
const u8 *key, unsigned int keylen)
{
u32 tmp[DES3_EDE_EXPKEY_WORDS];
struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
if (keylen == DES3_EDE_KEY_SIZE &&
__des3_ede_setkey(tmp, &tfm->crt_flags, key, DES3_EDE_KEY_SIZE)) {
return -EINVAL;
}
if (!des_ekey(tmp, key) && (crypto_skcipher_get_flags(skcipher) &
CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
crypto_skcipher_set_flags(skcipher,
CRYPTO_TFM_RES_WEAK_KEY);
return -EINVAL;
}
return skcipher_setkey(skcipher, key, keylen);
}
static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
@ -970,8 +991,9 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
* The crypto API expects us to set the IV (req->iv) to the last * The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block. This is used e.g. by the CTS mode. * ciphertext block. This is used e.g. by the CTS mode.
*/ */
scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize, if (ivsize)
ivsize, 0); scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
ivsize, ivsize, 0);
kfree(edesc); kfree(edesc);
@ -1196,9 +1218,9 @@ static void init_skcipher_job(struct skcipher_request *req,
int ivsize = crypto_skcipher_ivsize(skcipher); int ivsize = crypto_skcipher_ivsize(skcipher);
u32 *desc = edesc->hw_desc; u32 *desc = edesc->hw_desc;
u32 *sh_desc; u32 *sh_desc;
u32 out_options = 0; u32 in_options = 0, out_options = 0;
dma_addr_t dst_dma, ptr; dma_addr_t src_dma, dst_dma, ptr;
int len; int len, sec4_sg_index = 0;
#ifdef DEBUG #ifdef DEBUG
print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
@ -1216,21 +1238,27 @@ static void init_skcipher_job(struct skcipher_request *req,
len = desc_len(sh_desc); len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->cryptlen + ivsize, if (ivsize || edesc->mapped_src_nents > 1) {
LDST_SGF); src_dma = edesc->sec4_sg_dma;
sec4_sg_index = edesc->mapped_src_nents + !!ivsize;
in_options = LDST_SGF;
} else {
src_dma = sg_dma_address(req->src);
}
append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options);
if (likely(req->src == req->dst)) { if (likely(req->src == req->dst)) {
dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry); dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry);
out_options = LDST_SGF; out_options = in_options;
} else if (edesc->mapped_dst_nents == 1) {
dst_dma = sg_dma_address(req->dst);
} else { } else {
if (edesc->mapped_dst_nents == 1) { dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
dst_dma = sg_dma_address(req->dst); sizeof(struct sec4_sg_entry);
} else { out_options = LDST_SGF;
dst_dma = edesc->sec4_sg_dma + (edesc->mapped_src_nents
+ 1) * sizeof(struct sec4_sg_entry);
out_options = LDST_SGF;
}
} }
append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
} }
@ -1608,7 +1636,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
GFP_KERNEL : GFP_ATOMIC; GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
struct skcipher_edesc *edesc; struct skcipher_edesc *edesc;
dma_addr_t iv_dma; dma_addr_t iv_dma = 0;
u8 *iv; u8 *iv;
int ivsize = crypto_skcipher_ivsize(skcipher); int ivsize = crypto_skcipher_ivsize(skcipher);
int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes; int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
@ -1643,7 +1671,6 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
dev_err(jrdev, "unable to map source\n"); dev_err(jrdev, "unable to map source\n");
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents, mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (unlikely(!mapped_dst_nents)) { if (unlikely(!mapped_dst_nents)) {
@ -1653,7 +1680,10 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
} }
} }
sec4_sg_ents = 1 + mapped_src_nents; if (!ivsize && mapped_src_nents == 1)
sec4_sg_ents = 0; // no need for an input hw s/g table
else
sec4_sg_ents = mapped_src_nents + !!ivsize;
dst_sg_idx = sec4_sg_ents; dst_sg_idx = sec4_sg_ents;
sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry); sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
@ -1679,34 +1709,41 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
desc_bytes); desc_bytes);
/* Make sure IV is located in a DMAable area */ /* Make sure IV is located in a DMAable area */
iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes; if (ivsize) {
memcpy(iv, req->iv, ivsize); iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
memcpy(iv, req->iv, ivsize);
iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE); iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, iv_dma)) { if (dma_mapping_error(jrdev, iv_dma)) {
dev_err(jrdev, "unable to map IV\n"); dev_err(jrdev, "unable to map IV\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, caam_unmap(jrdev, req->src, req->dst, src_nents,
0, 0, 0); dst_nents, 0, 0, 0, 0);
kfree(edesc); kfree(edesc);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
}
dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
} }
if (dst_sg_idx)
dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg +
sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0); !!ivsize, 0);
if (mapped_dst_nents > 1) { if (mapped_dst_nents > 1) {
sg_to_sec4_sg_last(req->dst, mapped_dst_nents, sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
edesc->sec4_sg + dst_sg_idx, 0); edesc->sec4_sg + dst_sg_idx, 0);
} }
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, if (sec4_sg_bytes) {
sec4_sg_bytes, DMA_TO_DEVICE); edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { sec4_sg_bytes,
dev_err(jrdev, "unable to map S/G table\n"); DMA_TO_DEVICE);
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
iv_dma, ivsize, 0, 0); dev_err(jrdev, "unable to map S/G table\n");
kfree(edesc); caam_unmap(jrdev, req->src, req->dst, src_nents,
return ERR_PTR(-ENOMEM); dst_nents, iv_dma, ivsize, 0, 0);
kfree(edesc);
return ERR_PTR(-ENOMEM);
}
} }
edesc->iv_dma = iv_dma; edesc->iv_dma = iv_dma;
@ -1773,8 +1810,9 @@ static int skcipher_decrypt(struct skcipher_request *req)
* The crypto API expects us to set the IV (req->iv) to the last * The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block. * ciphertext block.
*/ */
scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize, if (ivsize)
ivsize, 0); scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
ivsize, ivsize, 0);
/* Create and submit job descriptor*/ /* Create and submit job descriptor*/
init_skcipher_job(req, edesc, false); init_skcipher_job(req, edesc, false);
@ -1902,6 +1940,66 @@ static struct caam_skcipher_alg driver_algs[] = {
}, },
.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
}, },
{
.skcipher = {
.base = {
.cra_name = "ecb(des)",
.cra_driver_name = "ecb-des-caam",
.cra_blocksize = DES_BLOCK_SIZE,
},
.setkey = des_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
},
.caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_ECB,
},
{
.skcipher = {
.base = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-aes-caam",
.cra_blocksize = AES_BLOCK_SIZE,
},
.setkey = skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
},
.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_ECB,
},
{
.skcipher = {
.base = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "ecb-des3-caam",
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
},
.setkey = des_skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
},
.caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB,
},
{
.skcipher = {
.base = {
.cra_name = "ecb(arc4)",
.cra_driver_name = "ecb-arc4-caam",
.cra_blocksize = ARC4_BLOCK_SIZE,
},
.setkey = skcipher_setkey,
.encrypt = skcipher_encrypt,
.decrypt = skcipher_decrypt,
.min_keysize = ARC4_MIN_KEY_SIZE,
.max_keysize = ARC4_MAX_KEY_SIZE,
},
.caam.class1_alg_type = OP_ALG_ALGSEL_ARC4 | OP_ALG_AAI_ECB,
},
}; };
static struct caam_aead_alg driver_aeads[] = { static struct caam_aead_alg driver_aeads[] = {
@ -3361,6 +3459,7 @@ static int __init caam_algapi_init(void)
struct caam_drv_private *priv; struct caam_drv_private *priv;
int i = 0, err = 0; int i = 0, err = 0;
u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst; u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
u32 arc4_inst;
unsigned int md_limit = SHA512_DIGEST_SIZE; unsigned int md_limit = SHA512_DIGEST_SIZE;
bool registered = false; bool registered = false;
@ -3405,6 +3504,8 @@ static int __init caam_algapi_init(void)
CHA_ID_LS_DES_SHIFT; CHA_ID_LS_DES_SHIFT;
aes_inst = cha_inst & CHA_ID_LS_AES_MASK; aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
arc4_inst = (cha_inst & CHA_ID_LS_ARC4_MASK) >>
CHA_ID_LS_ARC4_SHIFT;
ccha_inst = 0; ccha_inst = 0;
ptha_inst = 0; ptha_inst = 0;
} else { } else {
@ -3421,6 +3522,7 @@ static int __init caam_algapi_init(void)
md_inst = mdha & CHA_VER_NUM_MASK; md_inst = mdha & CHA_VER_NUM_MASK;
ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK; ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK; ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
arc4_inst = rd_reg32(&priv->ctrl->vreg.afha) & CHA_VER_NUM_MASK;
} }
/* If MD is present, limit digest size based on LP256 */ /* If MD is present, limit digest size based on LP256 */
@ -3441,6 +3543,10 @@ static int __init caam_algapi_init(void)
if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
continue; continue;
/* Skip ARC4 algorithms if not supported by device */
if (!arc4_inst && alg_sel == OP_ALG_ALGSEL_ARC4)
continue;
/* /*
* Check support for AES modes not available * Check support for AES modes not available
* on LP devices. * on LP devices.

View File

@ -2,7 +2,7 @@
/* /*
* Shared descriptors for aead, skcipher algorithms * Shared descriptors for aead, skcipher algorithms
* *
* Copyright 2016-2018 NXP * Copyright 2016-2019 NXP
*/ */
#include "compat.h" #include "compat.h"
@ -1396,9 +1396,11 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
set_jump_tgt_here(desc, key_jump_cmd); set_jump_tgt_here(desc, key_jump_cmd);
/* Load iv */ /* Load IV, if there is one */
append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | if (ivsize)
LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT)); append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
LDST_CLASS_1_CCB | (ctx1_iv_off <<
LDST_OFFSET_SHIFT));
/* Load counter into CONTEXT1 reg */ /* Load counter into CONTEXT1 reg */
if (is_rfc3686) if (is_rfc3686)
@ -1462,9 +1464,11 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
set_jump_tgt_here(desc, key_jump_cmd); set_jump_tgt_here(desc, key_jump_cmd);
/* load IV */ /* Load IV, if there is one */
append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | if (ivsize)
LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT)); append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
LDST_CLASS_1_CCB | (ctx1_iv_off <<
LDST_OFFSET_SHIFT));
/* Load counter into CONTEXT1 reg */ /* Load counter into CONTEXT1 reg */
if (is_rfc3686) if (is_rfc3686)

View File

@ -43,6 +43,7 @@
#include <crypto/akcipher.h> #include <crypto/akcipher.h>
#include <crypto/scatterwalk.h> #include <crypto/scatterwalk.h>
#include <crypto/skcipher.h> #include <crypto/skcipher.h>
#include <crypto/arc4.h>
#include <crypto/internal/skcipher.h> #include <crypto/internal/skcipher.h>
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/rsa.h> #include <crypto/internal/rsa.h>