mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-29 23:53:32 +00:00
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto fixes from Herbert Xu: "A number of fixes to the omap and nitrox drivers" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: crypto: cavium/nitrox - Fix 'nitrox_get_first_device()' when ndevlist is fully iterated crypto: omap-sham - add proper load balancing support for multicore crypto: omap-aes - prevent unregistering algorithms twice crypto: omap-sham - fix very small data size handling crypto: omap-sham - huge buffer access fixes crypto: omap-crypto - fix userspace copied buffer access crypto: omap-sham - force kernel driver usage for sha algos crypto: omap-aes - avoid spamming console with self tests
This commit is contained in:
commit
e8de4575cf
5 changed files with 74 additions and 50 deletions
|
@ -278,7 +278,7 @@ static void nitrox_remove_from_devlist(struct nitrox_device *ndev)
|
|||
|
||||
struct nitrox_device *nitrox_get_first_device(void)
|
||||
{
|
||||
struct nitrox_device *ndev = NULL;
|
||||
struct nitrox_device *ndev;
|
||||
|
||||
mutex_lock(&devlist_lock);
|
||||
list_for_each_entry(ndev, &ndevlist, list) {
|
||||
|
@ -286,7 +286,7 @@ struct nitrox_device *nitrox_get_first_device(void)
|
|||
break;
|
||||
}
|
||||
mutex_unlock(&devlist_lock);
|
||||
if (!ndev)
|
||||
if (&ndev->list == &ndevlist)
|
||||
return NULL;
|
||||
|
||||
refcount_inc(&ndev->refcnt);
|
||||
|
|
|
@ -77,7 +77,6 @@ static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
|
|||
tag = (u8 *)rctx->auth_tag;
|
||||
for (i = 0; i < dd->authsize; i++) {
|
||||
if (tag[i]) {
|
||||
dev_err(dd->dev, "GCM decryption: Tag Message is wrong\n");
|
||||
ret = -EBADMSG;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1269,13 +1269,17 @@ static int omap_aes_remove(struct platform_device *pdev)
|
|||
spin_unlock(&list_lock);
|
||||
|
||||
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
|
||||
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
|
||||
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
|
||||
crypto_unregister_skcipher(
|
||||
&dd->pdata->algs_info[i].algs_list[j]);
|
||||
dd->pdata->algs_info[i].registered--;
|
||||
}
|
||||
|
||||
for (i = dd->pdata->aead_algs_info->size - 1; i >= 0; i--) {
|
||||
for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) {
|
||||
aalg = &dd->pdata->aead_algs_info->algs_list[i];
|
||||
crypto_unregister_aead(aalg);
|
||||
dd->pdata->aead_algs_info->registered--;
|
||||
|
||||
}
|
||||
|
||||
crypto_engine_exit(dd->engine);
|
||||
|
|
|
@ -178,11 +178,17 @@ static void omap_crypto_copy_data(struct scatterlist *src,
|
|||
amt = min(src->length - srco, dst->length - dsto);
|
||||
amt = min(len, amt);
|
||||
|
||||
srcb = sg_virt(src) + srco;
|
||||
dstb = sg_virt(dst) + dsto;
|
||||
srcb = kmap_atomic(sg_page(src)) + srco + src->offset;
|
||||
dstb = kmap_atomic(sg_page(dst)) + dsto + dst->offset;
|
||||
|
||||
memcpy(dstb, srcb, amt);
|
||||
|
||||
if (!PageSlab(sg_page(dst)))
|
||||
flush_kernel_dcache_page(sg_page(dst));
|
||||
|
||||
kunmap_atomic(srcb);
|
||||
kunmap_atomic(dstb);
|
||||
|
||||
srco += amt;
|
||||
dsto += amt;
|
||||
len -= amt;
|
||||
|
|
|
@ -168,8 +168,6 @@ struct omap_sham_hmac_ctx {
|
|||
};
|
||||
|
||||
struct omap_sham_ctx {
|
||||
struct omap_sham_dev *dd;
|
||||
|
||||
unsigned long flags;
|
||||
|
||||
/* fallback stuff */
|
||||
|
@ -750,8 +748,17 @@ static int omap_sham_align_sgs(struct scatterlist *sg,
|
|||
int offset = rctx->offset;
|
||||
int bufcnt = rctx->bufcnt;
|
||||
|
||||
if (!sg || !sg->length || !nbytes)
|
||||
if (!sg || !sg->length || !nbytes) {
|
||||
if (bufcnt) {
|
||||
bufcnt = DIV_ROUND_UP(bufcnt, bs) * bs;
|
||||
sg_init_table(rctx->sgl, 1);
|
||||
sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, bufcnt);
|
||||
rctx->sg = rctx->sgl;
|
||||
rctx->sg_len = 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
new_len = nbytes;
|
||||
|
||||
|
@ -895,7 +902,7 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update)
|
|||
if (hash_later < 0)
|
||||
hash_later = 0;
|
||||
|
||||
if (hash_later) {
|
||||
if (hash_later && hash_later <= rctx->buflen) {
|
||||
scatterwalk_map_and_copy(rctx->buffer,
|
||||
req->src,
|
||||
req->nbytes - hash_later,
|
||||
|
@ -925,27 +932,35 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx)
|
||||
{
|
||||
struct omap_sham_dev *dd;
|
||||
|
||||
if (ctx->dd)
|
||||
return ctx->dd;
|
||||
|
||||
spin_lock_bh(&sham.lock);
|
||||
dd = list_first_entry(&sham.dev_list, struct omap_sham_dev, list);
|
||||
list_move_tail(&dd->list, &sham.dev_list);
|
||||
ctx->dd = dd;
|
||||
spin_unlock_bh(&sham.lock);
|
||||
|
||||
return dd;
|
||||
}
|
||||
|
||||
static int omap_sham_init(struct ahash_request *req)
|
||||
{
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
|
||||
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
|
||||
struct omap_sham_dev *dd = NULL, *tmp;
|
||||
struct omap_sham_dev *dd;
|
||||
int bs = 0;
|
||||
|
||||
spin_lock_bh(&sham.lock);
|
||||
if (!tctx->dd) {
|
||||
list_for_each_entry(tmp, &sham.dev_list, list) {
|
||||
dd = tmp;
|
||||
break;
|
||||
}
|
||||
tctx->dd = dd;
|
||||
} else {
|
||||
dd = tctx->dd;
|
||||
}
|
||||
spin_unlock_bh(&sham.lock);
|
||||
ctx->dd = NULL;
|
||||
|
||||
ctx->dd = dd;
|
||||
dd = omap_sham_find_dev(ctx);
|
||||
if (!dd)
|
||||
return -ENODEV;
|
||||
|
||||
ctx->flags = 0;
|
||||
|
||||
|
@ -1215,8 +1230,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd,
|
|||
static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
|
||||
{
|
||||
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
|
||||
struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
|
||||
struct omap_sham_dev *dd = tctx->dd;
|
||||
struct omap_sham_dev *dd = ctx->dd;
|
||||
|
||||
ctx->op = op;
|
||||
|
||||
|
@ -1226,7 +1240,7 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
|
|||
static int omap_sham_update(struct ahash_request *req)
|
||||
{
|
||||
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
|
||||
struct omap_sham_dev *dd = ctx->dd;
|
||||
struct omap_sham_dev *dd = omap_sham_find_dev(ctx);
|
||||
|
||||
if (!req->nbytes)
|
||||
return 0;
|
||||
|
@ -1319,21 +1333,8 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|||
struct omap_sham_hmac_ctx *bctx = tctx->base;
|
||||
int bs = crypto_shash_blocksize(bctx->shash);
|
||||
int ds = crypto_shash_digestsize(bctx->shash);
|
||||
struct omap_sham_dev *dd = NULL, *tmp;
|
||||
int err, i;
|
||||
|
||||
spin_lock_bh(&sham.lock);
|
||||
if (!tctx->dd) {
|
||||
list_for_each_entry(tmp, &sham.dev_list, list) {
|
||||
dd = tmp;
|
||||
break;
|
||||
}
|
||||
tctx->dd = dd;
|
||||
} else {
|
||||
dd = tctx->dd;
|
||||
}
|
||||
spin_unlock_bh(&sham.lock);
|
||||
|
||||
err = crypto_shash_setkey(tctx->fallback, key, keylen);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -1350,7 +1351,7 @@ static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
|
|||
|
||||
memset(bctx->ipad + keylen, 0, bs - keylen);
|
||||
|
||||
if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
|
||||
if (!test_bit(FLAGS_AUTO_XOR, &sham.flags)) {
|
||||
memcpy(bctx->opad, bctx->ipad, bs);
|
||||
|
||||
for (i = 0; i < bs; i++) {
|
||||
|
@ -1571,7 +1572,8 @@ static struct ahash_alg algs_sha224_sha256[] = {
|
|||
.cra_name = "sha224",
|
||||
.cra_driver_name = "omap-sha224",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = SHA224_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct omap_sham_ctx),
|
||||
|
@ -1592,7 +1594,8 @@ static struct ahash_alg algs_sha224_sha256[] = {
|
|||
.cra_name = "sha256",
|
||||
.cra_driver_name = "omap-sha256",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = SHA256_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct omap_sham_ctx),
|
||||
|
@ -1614,7 +1617,8 @@ static struct ahash_alg algs_sha224_sha256[] = {
|
|||
.cra_name = "hmac(sha224)",
|
||||
.cra_driver_name = "omap-hmac-sha224",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = SHA224_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
|
||||
|
@ -1637,7 +1641,8 @@ static struct ahash_alg algs_sha224_sha256[] = {
|
|||
.cra_name = "hmac(sha256)",
|
||||
.cra_driver_name = "omap-hmac-sha256",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = SHA256_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
|
||||
|
@ -1662,7 +1667,8 @@ static struct ahash_alg algs_sha384_sha512[] = {
|
|||
.cra_name = "sha384",
|
||||
.cra_driver_name = "omap-sha384",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = SHA384_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct omap_sham_ctx),
|
||||
|
@ -1683,7 +1689,8 @@ static struct ahash_alg algs_sha384_sha512[] = {
|
|||
.cra_name = "sha512",
|
||||
.cra_driver_name = "omap-sha512",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = SHA512_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct omap_sham_ctx),
|
||||
|
@ -1705,7 +1712,8 @@ static struct ahash_alg algs_sha384_sha512[] = {
|
|||
.cra_name = "hmac(sha384)",
|
||||
.cra_driver_name = "omap-hmac-sha384",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = SHA384_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
|
||||
|
@ -1728,7 +1736,8 @@ static struct ahash_alg algs_sha384_sha512[] = {
|
|||
.cra_name = "hmac(sha512)",
|
||||
.cra_driver_name = "omap-hmac-sha512",
|
||||
.cra_priority = 400,
|
||||
.cra_flags = CRYPTO_ALG_ASYNC |
|
||||
.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
|
||||
CRYPTO_ALG_ASYNC |
|
||||
CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = SHA512_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
|
||||
|
@ -2154,6 +2163,7 @@ static int omap_sham_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
dd->flags |= dd->pdata->flags;
|
||||
sham.flags |= dd->pdata->flags;
|
||||
|
||||
pm_runtime_use_autosuspend(dev);
|
||||
pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
|
||||
|
@ -2181,6 +2191,9 @@ static int omap_sham_probe(struct platform_device *pdev)
|
|||
spin_unlock(&sham.lock);
|
||||
|
||||
for (i = 0; i < dd->pdata->algs_info_size; i++) {
|
||||
if (dd->pdata->algs_info[i].registered)
|
||||
break;
|
||||
|
||||
for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
|
||||
struct ahash_alg *alg;
|
||||
|
||||
|
@ -2232,9 +2245,11 @@ static int omap_sham_remove(struct platform_device *pdev)
|
|||
list_del(&dd->list);
|
||||
spin_unlock(&sham.lock);
|
||||
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
|
||||
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
|
||||
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
|
||||
crypto_unregister_ahash(
|
||||
&dd->pdata->algs_info[i].algs_list[j]);
|
||||
dd->pdata->algs_info[i].registered--;
|
||||
}
|
||||
tasklet_kill(&dd->done_task);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
|
|
Loading…
Reference in a new issue