This push fixes a regression in the caam driver.

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEn51F/lCuNhUwmDeSxycdCkmxi6cFAmP9vBEACgkQxycdCkmx
 i6dZTRAAwLbPc5PDEHGU16VldY8tJnn4Zb+LTjmeWMBhqcROLhl9ofIl97g/88DH
 n3tHZ8Rxcj2+y9mSp7xu4qXZGE9ECo9ka3SWBGBRaNI05bNDWuWBXh9tBcnf9yst
 TETw9Bmv7fsKbMo6qi8ZICJobGqCd5wFGUYqYJNcET+jLY84PX2PxcyJTul7tMy6
 tzWjgiankQRGknpF3nWJ3v9kXOu+aOgU6eEJ56ty27nTgsAlD4frkTUmlx3+Dckl
 V0D6t8Ctgy4JOvqWr+2Ng2VuO2r9k0OkqXR7/jdT4pV990bmdbocKWWDEuTktjTq
 qtJOKZ2v9QyNFYX9yMGHYocP7CSG8vYyjf24y16jjUZYATWs7XVdENIldjSnr4hG
 wmIpZ3Nsc1R8usWzA+B4Puknc/GXgawryDPO+v5WBtRDWC6xBQdRo6vZqRS3FWB5
 sgWFh1lSCU9jEL3NXUK/GF8AgsUCBX+1L1JViTju760ABXq5+PvdSr9o/8DrbMUJ
 HXQo3Ia54vt2RuwkAB/lOZx2Phsg38kXS2pjoFh4l1O0qvifc+jZfvsF2gSUQpud
 +ugoSSL6U8C9uCpgColodECOiCToGjJ58i3kOFA5W2RbD5Wn7Tk/xU/n4DFFnE4L
 6AZqaxYxYkDvdhFgIASID1GRtZmedCEgaTB8/VKY6qRPJvjbd6c=
 =a8T+
 -----END PGP SIGNATURE-----

Merge tag 'v6.3-p2' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fix from Herbert Xu:
 "Fix a regression in the caam driver"

* tag 'v6.3-p2' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  crypto: caam - Fix edesc/iv ordering mixup
This commit is contained in:
Linus Torvalds 2023-03-05 11:32:30 -08:00
commit f915322fe0
3 changed files with 53 additions and 23 deletions

View File

@ -60,7 +60,11 @@
#include <crypto/xts.h>
#include <asm/unaligned.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
/*
* crypto alg
@ -1000,6 +1004,13 @@ static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err,
crypto_finalize_aead_request(jrp->engine, req, ecode);
}
static inline u8 *skcipher_edesc_iv(struct skcipher_edesc *edesc)
{
return PTR_ALIGN((u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
dma_get_cache_alignment());
}
static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
@ -1027,8 +1038,7 @@ static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err,
* This is used e.g. by the CTS mode.
*/
if (ivsize && !ecode) {
memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes,
ivsize);
memcpy(req->iv, skcipher_edesc_iv(edesc), ivsize);
print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
@ -1683,18 +1693,19 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
/*
* allocate space for base edesc and hw desc commands, link tables, IV
*/
aligned_size = ALIGN(ivsize, __alignof__(*edesc));
aligned_size += sizeof(*edesc) + desc_bytes + sec4_sg_bytes;
aligned_size = sizeof(*edesc) + desc_bytes + sec4_sg_bytes;
aligned_size = ALIGN(aligned_size, dma_get_cache_alignment());
iv = kzalloc(aligned_size, flags);
if (!iv) {
aligned_size += ~(ARCH_KMALLOC_MINALIGN - 1) &
(dma_get_cache_alignment() - 1);
aligned_size += ALIGN(ivsize, dma_get_cache_alignment());
edesc = kzalloc(aligned_size, flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
0, 0, 0);
return ERR_PTR(-ENOMEM);
}
edesc = (void *)(iv + ALIGN(ivsize, __alignof__(*edesc)));
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
edesc->mapped_src_nents = mapped_src_nents;
@ -1706,6 +1717,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
/* Make sure IV is located in a DMAable area */
if (ivsize) {
iv = skcipher_edesc_iv(edesc);
memcpy(iv, req->iv, ivsize);
iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL);

View File

@ -20,8 +20,11 @@
#include "caamalg_desc.h"
#include <crypto/xts.h>
#include <asm/unaligned.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/string.h>
/*
* crypto alg
@ -1204,6 +1207,12 @@ static int ipsec_gcm_decrypt(struct aead_request *req)
false);
}
static inline u8 *skcipher_edesc_iv(struct skcipher_edesc *edesc)
{
return PTR_ALIGN((u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
dma_get_cache_alignment());
}
static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
{
struct skcipher_edesc *edesc;
@ -1236,8 +1245,7 @@ static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
* This is used e.g. by the CTS mode.
*/
if (!ecode)
memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
ivsize);
memcpy(req->iv, skcipher_edesc_iv(edesc), ivsize);
qi_cache_free(edesc);
skcipher_request_complete(req, ecode);
@ -1259,6 +1267,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
struct qm_sg_entry *sg_table, *fd_sgt;
struct caam_drv_ctx *drv_ctx;
unsigned int len;
drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
if (IS_ERR(drv_ctx))
@ -1319,9 +1328,12 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
if (unlikely(ALIGN(ivsize, __alignof__(*edesc)) +
offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes >
CAAM_QI_MEMCACHE_SIZE)) {
len = offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes;
len = ALIGN(len, dma_get_cache_alignment());
len += ivsize;
if (unlikely(len > CAAM_QI_MEMCACHE_SIZE)) {
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_ents, ivsize);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
@ -1330,18 +1342,24 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
}
/* allocate space for base edesc, link tables and IV */
iv = qi_cache_alloc(flags);
if (unlikely(!iv)) {
edesc = qi_cache_alloc(flags);
if (unlikely(!edesc)) {
dev_err(qidev, "could not allocate extended descriptor\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM);
}
edesc = (void *)(iv + ALIGN(ivsize, __alignof__(*edesc)));
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
edesc->qm_sg_bytes = qm_sg_bytes;
edesc->drv_req.app_ctx = req;
edesc->drv_req.cbk = skcipher_done;
edesc->drv_req.drv_ctx = drv_ctx;
/* Make sure IV is located in a DMAable area */
sg_table = &edesc->sgt[0];
iv = skcipher_edesc_iv(edesc);
memcpy(iv, req->iv, ivsize);
iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL);
@ -1353,13 +1371,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
return ERR_PTR(-ENOMEM);
}
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
edesc->iv_dma = iv_dma;
edesc->qm_sg_bytes = qm_sg_bytes;
edesc->drv_req.app_ctx = req;
edesc->drv_req.cbk = skcipher_done;
edesc->drv_req.drv_ctx = drv_ctx;
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);

View File

@ -8,7 +8,13 @@
*/
#include <linux/cpumask.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <soc/fsl/qman.h>
#include "debugfs.h"
@ -755,8 +761,8 @@ int caam_qi_init(struct platform_device *caam_pdev)
napi_enable(irqtask);
}
qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
0, NULL);
qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE,
dma_get_cache_alignment(), 0, NULL);
if (!qi_cache) {
dev_err(qidev, "Can't allocate CAAM cache\n");
free_rsp_fqs();