diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index f736c8895089..efa8b620fee8 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -482,12 +482,12 @@ static void acm_read_bulk_callback(struct urb *urb) struct acm *acm = rb->instance; unsigned long flags; int status = urb->status; + bool stopped = false; + bool stalled = false; dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n", rb->index, urb->actual_length, status); - set_bit(rb->index, &acm->read_urbs_free); - if (!acm->dev) { dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__); return; @@ -500,15 +500,16 @@ static void acm_read_bulk_callback(struct urb *urb) break; case -EPIPE: set_bit(EVENT_RX_STALL, &acm->flags); - schedule_work(&acm->work); - return; + stalled = true; + break; case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: dev_dbg(&acm->data->dev, "%s - urb shutting down with status: %d\n", __func__, status); - return; + stopped = true; + break; default: dev_dbg(&acm->data->dev, "%s - nonzero urb status received: %d\n", @@ -517,10 +518,24 @@ static void acm_read_bulk_callback(struct urb *urb) } /* - * Unthrottle may run on another CPU which needs to see events - * in the same order. Submission has an implict barrier + * Make sure URB processing is done before marking as free to avoid + * racing with unthrottle() on another CPU. Matches the barriers + * implied by the test_and_clear_bit() in acm_submit_read_urb(). */ smp_mb__before_atomic(); + set_bit(rb->index, &acm->read_urbs_free); + /* + * Make sure URB is marked as free before checking the throttled flag + * to avoid racing with unthrottle() on another CPU. Matches the + * smp_mb() in unthrottle(). + */ + smp_mb__after_atomic(); + + if (stopped || stalled) { + if (stalled) + schedule_work(&acm->work); + return; + } /* throttle device if requested by tty */ spin_lock_irqsave(&acm->read_lock, flags); @@ -854,6 +869,9 @@ static void acm_tty_unthrottle(struct tty_struct *tty) acm->throttle_req = 0; spin_unlock_irq(&acm->read_lock); + /* Matches the smp_mb__after_atomic() in acm_read_bulk_callback(). */ + smp_mb(); + if (was_throttled) acm_submit_read_urbs(acm, GFP_KERNEL); }