hci_usb.h: fix hard-to-trigger race
If someone tries to _urb_unlink while _urb_queue_head is running, he'll see _urb->queue == NULL and fail to do any locking. Prevent that from happening by strategically placed barriers. Signed-off-by: Pavel Machek <pavel@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
84994e16f2
Коммит
026672d099
|
@ -70,7 +70,8 @@ static inline void _urb_queue_head(struct _urb_queue *q, struct _urb *_urb)
|
|||
{
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
list_add(&_urb->list, &q->head); _urb->queue = q;
|
||||
/* _urb_unlink needs to know which spinlock to use, thus mb(). */
|
||||
_urb->queue = q; mb(); list_add(&_urb->list, &q->head);
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
}
|
||||
|
||||
|
@ -78,19 +79,23 @@ static inline void _urb_queue_tail(struct _urb_queue *q, struct _urb *_urb)
|
|||
{
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
list_add_tail(&_urb->list, &q->head); _urb->queue = q;
|
||||
/* _urb_unlink needs to know which spinlock to use, thus mb(). */
|
||||
_urb->queue = q; mb(); list_add_tail(&_urb->list, &q->head);
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
}
|
||||
|
||||
static inline void _urb_unlink(struct _urb *_urb)
|
||||
{
|
||||
struct _urb_queue *q = _urb->queue;
|
||||
struct _urb_queue *q;
|
||||
unsigned long flags;
|
||||
if (q) {
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
list_del(&_urb->list); _urb->queue = NULL;
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
}
|
||||
|
||||
mb();
|
||||
q = _urb->queue;
|
||||
/* If q is NULL, it will die at easy-to-debug NULL pointer dereference.
|
||||
No need to BUG(). */
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
list_del(&_urb->list); _urb->queue = NULL;
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
}
|
||||
|
||||
struct hci_usb {
|
||||
|
|
Загрузка…
Ссылка в новой задаче