vhost/scsi: Fix incorrect early vhost_scsi_handle_vq failures
This patch fixes vhost_scsi_handle_vq() failure cases that result in BUG_ON() getting triggered when vhost_scsi_free_cmd() is called, and ->tvc_se_cmd has not been initialized by target_submit_cmd_map_sgls(). It changes tcm_vhost_release_cmd() to use tcm_vhost_cmd->tvc_nexus for obtaining se_session pointer reference. Also, avoid calling put_page() on NULL sg->page entries in vhost_scsi_map_to_sgl() failure path. Cc: Michael S. Tsirkin <mst@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
This commit is contained in:
Родитель
79c14141a4
Коммит
de1419e420
|
@ -462,7 +462,7 @@ static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
|
||||||
{
|
{
|
||||||
struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
|
struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
|
||||||
struct tcm_vhost_cmd, tvc_se_cmd);
|
struct tcm_vhost_cmd, tvc_se_cmd);
|
||||||
struct se_session *se_sess = se_cmd->se_sess;
|
struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (tv_cmd->tvc_sgl_count) {
|
if (tv_cmd->tvc_sgl_count) {
|
||||||
|
@ -864,9 +864,11 @@ vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
|
||||||
ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i],
|
ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i],
|
||||||
cmd->tvc_upages, write);
|
cmd->tvc_upages, write);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
for (i = 0; i < cmd->tvc_sgl_count; i++)
|
for (i = 0; i < cmd->tvc_sgl_count; i++) {
|
||||||
put_page(sg_page(&cmd->tvc_sgl[i]));
|
struct page *page = sg_page(&cmd->tvc_sgl[i]);
|
||||||
|
if (page)
|
||||||
|
put_page(page);
|
||||||
|
}
|
||||||
cmd->tvc_sgl_count = 0;
|
cmd->tvc_sgl_count = 0;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -905,9 +907,11 @@ vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
|
||||||
ret = vhost_scsi_map_to_sgl(cmd, prot_sg, prot_sgl_count, &iov[i],
|
ret = vhost_scsi_map_to_sgl(cmd, prot_sg, prot_sgl_count, &iov[i],
|
||||||
cmd->tvc_upages, write);
|
cmd->tvc_upages, write);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
for (i = 0; i < cmd->tvc_prot_sgl_count; i++)
|
for (i = 0; i < cmd->tvc_prot_sgl_count; i++) {
|
||||||
put_page(sg_page(&cmd->tvc_prot_sgl[i]));
|
struct page *page = sg_page(&cmd->tvc_prot_sgl[i]);
|
||||||
|
if (page)
|
||||||
|
put_page(page);
|
||||||
|
}
|
||||||
cmd->tvc_prot_sgl_count = 0;
|
cmd->tvc_prot_sgl_count = 0;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1065,12 +1069,14 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
|
||||||
if (unlikely(vq->iov[0].iov_len < req_size)) {
|
if (unlikely(vq->iov[0].iov_len < req_size)) {
|
||||||
pr_err("Expecting virtio-scsi header: %zu, got %zu\n",
|
pr_err("Expecting virtio-scsi header: %zu, got %zu\n",
|
||||||
req_size, vq->iov[0].iov_len);
|
req_size, vq->iov[0].iov_len);
|
||||||
break;
|
vhost_scsi_send_bad_target(vs, vq, head, out);
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
ret = memcpy_fromiovecend(req, &vq->iov[0], 0, req_size);
|
ret = memcpy_fromiovecend(req, &vq->iov[0], 0, req_size);
|
||||||
if (unlikely(ret)) {
|
if (unlikely(ret)) {
|
||||||
vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
|
vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
|
||||||
break;
|
vhost_scsi_send_bad_target(vs, vq, head, out);
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* virtio-scsi spec requires byte 0 of the lun to be 1 */
|
/* virtio-scsi spec requires byte 0 of the lun to be 1 */
|
||||||
|
@ -1101,14 +1107,16 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
|
||||||
if (data_direction != DMA_TO_DEVICE) {
|
if (data_direction != DMA_TO_DEVICE) {
|
||||||
vq_err(vq, "Received non zero do_pi_niov"
|
vq_err(vq, "Received non zero do_pi_niov"
|
||||||
", but wrong data_direction\n");
|
", but wrong data_direction\n");
|
||||||
goto err_cmd;
|
vhost_scsi_send_bad_target(vs, vq, head, out);
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
|
prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
|
||||||
} else if (v_req_pi.pi_bytesin) {
|
} else if (v_req_pi.pi_bytesin) {
|
||||||
if (data_direction != DMA_FROM_DEVICE) {
|
if (data_direction != DMA_FROM_DEVICE) {
|
||||||
vq_err(vq, "Received non zero di_pi_niov"
|
vq_err(vq, "Received non zero di_pi_niov"
|
||||||
", but wrong data_direction\n");
|
", but wrong data_direction\n");
|
||||||
goto err_cmd;
|
vhost_scsi_send_bad_target(vs, vq, head, out);
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
|
prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
|
||||||
}
|
}
|
||||||
|
@ -1148,7 +1156,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
|
||||||
vq_err(vq, "Received SCSI CDB with command_size: %d that"
|
vq_err(vq, "Received SCSI CDB with command_size: %d that"
|
||||||
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
|
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
|
||||||
scsi_command_size(cdb), TCM_VHOST_MAX_CDB_SIZE);
|
scsi_command_size(cdb), TCM_VHOST_MAX_CDB_SIZE);
|
||||||
goto err_cmd;
|
vhost_scsi_send_bad_target(vs, vq, head, out);
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
|
cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
|
||||||
|
@ -1157,7 +1166,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
|
||||||
if (IS_ERR(cmd)) {
|
if (IS_ERR(cmd)) {
|
||||||
vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
|
vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
|
||||||
PTR_ERR(cmd));
|
PTR_ERR(cmd));
|
||||||
goto err_cmd;
|
vhost_scsi_send_bad_target(vs, vq, head, out);
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
|
pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
|
||||||
|
@ -1178,7 +1188,9 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
|
||||||
if (unlikely(ret)) {
|
if (unlikely(ret)) {
|
||||||
vq_err(vq, "Failed to map iov to"
|
vq_err(vq, "Failed to map iov to"
|
||||||
" prot_sgl\n");
|
" prot_sgl\n");
|
||||||
goto err_free;
|
tcm_vhost_release_cmd(&cmd->tvc_se_cmd);
|
||||||
|
vhost_scsi_send_bad_target(vs, vq, head, out);
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (data_direction != DMA_NONE) {
|
if (data_direction != DMA_NONE) {
|
||||||
|
@ -1187,7 +1199,9 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
|
||||||
data_direction == DMA_FROM_DEVICE);
|
data_direction == DMA_FROM_DEVICE);
|
||||||
if (unlikely(ret)) {
|
if (unlikely(ret)) {
|
||||||
vq_err(vq, "Failed to map iov to sgl\n");
|
vq_err(vq, "Failed to map iov to sgl\n");
|
||||||
goto err_free;
|
tcm_vhost_release_cmd(&cmd->tvc_se_cmd);
|
||||||
|
vhost_scsi_send_bad_target(vs, vq, head, out);
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
|
@ -1205,14 +1219,6 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
|
||||||
INIT_WORK(&cmd->work, tcm_vhost_submission_work);
|
INIT_WORK(&cmd->work, tcm_vhost_submission_work);
|
||||||
queue_work(tcm_vhost_workqueue, &cmd->work);
|
queue_work(tcm_vhost_workqueue, &cmd->work);
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&vq->mutex);
|
|
||||||
return;
|
|
||||||
|
|
||||||
err_free:
|
|
||||||
vhost_scsi_free_cmd(cmd);
|
|
||||||
err_cmd:
|
|
||||||
vhost_scsi_send_bad_target(vs, vq, head, out);
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&vq->mutex);
|
mutex_unlock(&vq->mutex);
|
||||||
}
|
}
|
||||||
|
|
Загрузка…
Ссылка в новой задаче