scsi: sg: Allow waiting for commands to complete on removed device

commit 3455607fd7 upstream.

When a SCSI device is removed while in active use, currently sg will
immediately return -ENODEV on any attempt to wait for active commands that
were sent before the removal.  This is problematic for commands that use
SG_FLAG_DIRECT_IO since the data buffer may still be in use by the kernel
when userspace frees or reuses it after getting ENODEV, leading to
corrupted userspace memory (in the case of READ-type commands) or corrupted
data being sent to the device (in the case of WRITE-type commands).  This
has been seen in practice when logging out of a iscsi_tcp session, where
the iSCSI driver may still be processing commands after the device has been
marked for removal.

Change the policy to allow userspace to wait for active sg commands even
when the device is being removed.  Return -ENODEV only when there are no
more responses to read.

Link: https://lore.kernel.org/r/5ebea46f-fe83-2d0b-233d-d0dcb362dd0a@cybernetics.com
Cc: <stable@vger.kernel.org>
Acked-by: Douglas Gilbert <dgilbert@interlog.com>
Signed-off-by: Tony Battersby <tonyb@cybernetics.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Tony Battersby 2022-07-11 10:51:32 -04:00 коммит произвёл Greg Kroah-Hartman
Родитель fac589fb76
Коммит 8c004b7dbb
1 изменённых файлов: 33 добавлений и 20 удалений

Просмотреть файл

@ -191,7 +191,7 @@ static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
static Sg_fd *sg_add_sfp(Sg_device * sdp);
static void sg_remove_sfp(struct kref *);
static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy);
static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
static Sg_device *sg_get_dev(int dev);
@ -445,6 +445,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
Sg_fd *sfp;
Sg_request *srp;
int req_pack_id = -1;
bool busy;
sg_io_hdr_t *hp;
struct sg_header *old_hdr;
int retval;
@ -467,20 +468,16 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
if (retval)
return retval;
srp = sg_get_rq_mark(sfp, req_pack_id);
srp = sg_get_rq_mark(sfp, req_pack_id, &busy);
if (!srp) { /* now wait on packet to arrive */
if (atomic_read(&sdp->detaching))
return -ENODEV;
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
retval = wait_event_interruptible(sfp->read_wait,
(atomic_read(&sdp->detaching) ||
(srp = sg_get_rq_mark(sfp, req_pack_id))));
if (atomic_read(&sdp->detaching))
return -ENODEV;
if (retval)
/* -ERESTARTSYS as signal hit process */
return retval;
((srp = sg_get_rq_mark(sfp, req_pack_id, &busy)) ||
(!busy && atomic_read(&sdp->detaching))));
if (!srp)
/* signal or detaching */
return retval ? retval : -ENODEV;
}
if (srp->header.interface_id != '\0')
return sg_new_read(sfp, buf, count, srp);
@ -941,9 +938,7 @@ sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp,
if (result < 0)
return result;
result = wait_event_interruptible(sfp->read_wait,
(srp_done(sfp, srp) || atomic_read(&sdp->detaching)));
if (atomic_read(&sdp->detaching))
return -ENODEV;
srp_done(sfp, srp));
write_lock_irq(&sfp->rq_list_lock);
if (srp->done) {
srp->done = 2;
@ -2056,19 +2051,28 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
}
static Sg_request *
sg_get_rq_mark(Sg_fd * sfp, int pack_id)
sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy)
{
Sg_request *resp;
unsigned long iflags;
*busy = false;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
list_for_each_entry(resp, &sfp->rq_list, entry) {
/* look for requests that are ready + not SG_IO owned */
if ((1 == resp->done) && (!resp->sg_io_owned) &&
/* look for requests that are not SG_IO owned */
if ((!resp->sg_io_owned) &&
((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
resp->done = 2; /* guard against other readers */
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return resp;
switch (resp->done) {
case 0: /* request active */
*busy = true;
break;
case 1: /* request done; response ready to return */
resp->done = 2; /* guard against other readers */
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return resp;
case 2: /* response already being returned */
break;
}
}
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
@ -2122,6 +2126,15 @@ sg_remove_request(Sg_fd * sfp, Sg_request * srp)
res = 1;
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
/*
* If the device is detaching, wakeup any readers in case we just
* removed the last response, which would leave nothing for them to
* return other than -ENODEV.
*/
if (unlikely(atomic_read(&sfp->parentdp->detaching)))
wake_up_interruptible_all(&sfp->read_wait);
return res;
}