diff options
author | Johan Hovold <johan@kernel.org> | 2019-04-25 18:05:39 +0200 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2019-04-29 16:14:41 +0200 |
commit | 764478f41130f1b8d8057575b89e69980a0f600d (patch) | |
tree | a87e988c5598697b4baf92fed4d6b99b183ea771 | |
parent | c2d1812600f5a8c9341b2db7c863cd2725444de5 (diff) |
USB: cdc-acm: fix unthrottle races
Fix two long-standing bugs which could potentially lead to memory
corruption or leave the port throttled until it is reopened (on weakly
ordered systems), respectively, when read-URB completion races with
unthrottle().
First, the URB must not be marked as free before processing is complete
to prevent it from being submitted by unthrottle() on another CPU.
CPU 1 CPU 2
================ ================
complete() unthrottle()
process_urb();
smp_mb__before_atomic();
set_bit(i, free); if (test_and_clear_bit(i, free))
submit_urb();
Second, the URB must be marked as free before checking the throttled
flag to prevent unthrottle() on another CPU from failing to observe that
the URB needs to be submitted if complete() sees that the throttled flag
is set.
CPU 1 CPU 2
================ ================
complete() unthrottle()
set_bit(i, free); throttled = 0;
smp_mb__after_atomic(); smp_mb();
if (throttled) if (test_and_clear_bit(i, free))
return; submit_urb();
Note that test_and_clear_bit() only implies barriers when the test is
successful. To handle the case where the URB is still in use an explicit
barrier needs to be added to unthrottle() for the second race condition.
Also note that the first race was fixed by 36e59e0d70d6 ("cdc-acm: fix
race between callback and unthrottle") back in 2015, but the bug was
reintroduced a year later.
Fixes: 1aba579f3cf5 ("cdc-acm: handle read pipe errors")
Fixes: 088c64f81284 ("USB: cdc-acm: re-write read processing")
Signed-off-by: Johan Hovold <johan@kernel.org>
Acked-by: Oliver Neukum <oneukum@suse.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | drivers/usb/class/cdc-acm.c | 32 |
1 files changed, 25 insertions, 7 deletions
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index ec666eb4b7b4..c03aa8550980 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -470,12 +470,12 @@ static void acm_read_bulk_callback(struct urb *urb) struct acm *acm = rb->instance; unsigned long flags; int status = urb->status; + bool stopped = false; + bool stalled = false; dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n", rb->index, urb->actual_length, status); - set_bit(rb->index, &acm->read_urbs_free); - if (!acm->dev) { dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__); return; @@ -488,15 +488,16 @@ static void acm_read_bulk_callback(struct urb *urb) break; case -EPIPE: set_bit(EVENT_RX_STALL, &acm->flags); - schedule_work(&acm->work); - return; + stalled = true; + break; case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: dev_dbg(&acm->data->dev, "%s - urb shutting down with status: %d\n", __func__, status); - return; + stopped = true; + break; default: dev_dbg(&acm->data->dev, "%s - nonzero urb status received: %d\n", @@ -505,10 +506,24 @@ static void acm_read_bulk_callback(struct urb *urb) } /* - * Unthrottle may run on another CPU which needs to see events - * in the same order. Submission has an implict barrier + * Make sure URB processing is done before marking as free to avoid + * racing with unthrottle() on another CPU. Matches the barriers + * implied by the test_and_clear_bit() in acm_submit_read_urb(). */ smp_mb__before_atomic(); + set_bit(rb->index, &acm->read_urbs_free); + /* + * Make sure URB is marked as free before checking the throttled flag + * to avoid racing with unthrottle() on another CPU. Matches the + * smp_mb() in unthrottle(). + */ + smp_mb__after_atomic(); + + if (stopped || stalled) { + if (stalled) + schedule_work(&acm->work); + return; + } /* throttle device if requested by tty */ spin_lock_irqsave(&acm->read_lock, flags); @@ -842,6 +857,9 @@ static void acm_tty_unthrottle(struct tty_struct *tty) acm->throttle_req = 0; spin_unlock_irq(&acm->read_lock); + /* Matches the smp_mb__after_atomic() in acm_read_bulk_callback(). */ + smp_mb(); + if (was_throttled) acm_submit_read_urbs(acm, GFP_KERNEL); } |