crypto: cryptd - Protect per-CPU resource by disabling BH.

[ Upstream commit 91e8bcd7b4 ]

The access to cryptd_queue::cpu_queue is synchronized by disabling
preemption in cryptd_enqueue_request() and disabling BH in
cryptd_queue_worker(). This implies that access is allowed from BH.

If cryptd_enqueue_request() is invoked from preemptible context _and_
soft interrupt then this can lead to list corruption since
cryptd_enqueue_request() is not protected against access from
soft interrupt.

Replace get_cpu() in cryptd_enqueue_request() with local_bh_disable()
to ensure BH is always disabled.
Remove preempt_disable() from cryptd_queue_worker() since it is not
needed because local_bh_disable() ensures synchronisation.

Fixes: 254eff7714 ("crypto: cryptd - Per-CPU thread implementation...")
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Sebastian Andrzej Siewior 2022-05-04 17:07:36 +02:00 committed by Greg Kroah-Hartman
parent 5c7ea7e944
commit 882fd789b8
1 changed files with 11 additions and 12 deletions

View File

@ -39,6 +39,10 @@ struct cryptd_cpu_queue {
}; };
struct cryptd_queue { struct cryptd_queue {
/*
* Protected by disabling BH to allow enqueueing from softinterrupt and
* dequeuing from kworker (cryptd_queue_worker()).
*/
struct cryptd_cpu_queue __percpu *cpu_queue; struct cryptd_cpu_queue __percpu *cpu_queue;
}; };
@ -125,28 +129,28 @@ static void cryptd_fini_queue(struct cryptd_queue *queue)
static int cryptd_enqueue_request(struct cryptd_queue *queue, static int cryptd_enqueue_request(struct cryptd_queue *queue,
struct crypto_async_request *request) struct crypto_async_request *request)
{ {
int cpu, err; int err;
struct cryptd_cpu_queue *cpu_queue; struct cryptd_cpu_queue *cpu_queue;
refcount_t *refcnt; refcount_t *refcnt;
cpu = get_cpu(); local_bh_disable();
cpu_queue = this_cpu_ptr(queue->cpu_queue); cpu_queue = this_cpu_ptr(queue->cpu_queue);
err = crypto_enqueue_request(&cpu_queue->queue, request); err = crypto_enqueue_request(&cpu_queue->queue, request);
refcnt = crypto_tfm_ctx(request->tfm); refcnt = crypto_tfm_ctx(request->tfm);
if (err == -ENOSPC) if (err == -ENOSPC)
goto out_put_cpu; goto out;
queue_work_on(cpu, cryptd_wq, &cpu_queue->work); queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work);
if (!refcount_read(refcnt)) if (!refcount_read(refcnt))
goto out_put_cpu; goto out;
refcount_inc(refcnt); refcount_inc(refcnt);
out_put_cpu: out:
put_cpu(); local_bh_enable();
return err; return err;
} }
@ -162,15 +166,10 @@ static void cryptd_queue_worker(struct work_struct *work)
cpu_queue = container_of(work, struct cryptd_cpu_queue, work); cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
/* /*
* Only handle one request at a time to avoid hogging crypto workqueue. * Only handle one request at a time to avoid hogging crypto workqueue.
* preempt_disable/enable is used to prevent being preempted by
* cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
* cryptd_enqueue_request() being accessed from software interrupts.
*/ */
local_bh_disable(); local_bh_disable();
preempt_disable();
backlog = crypto_get_backlog(&cpu_queue->queue); backlog = crypto_get_backlog(&cpu_queue->queue);
req = crypto_dequeue_request(&cpu_queue->queue); req = crypto_dequeue_request(&cpu_queue->queue);
preempt_enable();
local_bh_enable(); local_bh_enable();
if (!req) if (!req)