SCSI misc on 20200613
This is the set of changes collected since just before the merge window opened. It's mostly minor fixes in drivers. The one non-driver set is the three optical disk (sr) changes where two are error path fixes and one is a helper conversion. The big driver change is the hpsa compat_alloc_userspace rework by Al so he can kill the remaining user. This has been tested and acked by the maintainer. Signed-off-by: James E.J. Bottomley <jejb@linux.ibm.com> -----BEGIN PGP SIGNATURE----- iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCXuTsoCYcamFtZXMuYm90 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishc1zAP9yJpct +Lrac+htBQQ41bAiayPFJ3qj4HtwC4TE4l5DmgD9EbaoJkRtl/F5NP8knzUQ5+wQ k0GG1Vriyj/2um75ezo= =PVTc -----END PGP SIGNATURE----- Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi Pull more SCSI updates from James Bottomley: "This is the set of changes collected since just before the merge window opened. It's mostly minor fixes in drivers. The one non-driver set is the three optical disk (sr) changes where two are error path fixes and one is a helper conversion. The big driver change is the hpsa compat_alloc_userspace rework by Al so he can kill the remaining user. This has been tested and acked by the maintainer" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (21 commits) scsi: acornscsi: Fix an error handling path in acornscsi_probe() scsi: storvsc: Remove memset before memory freeing in storvsc_suspend() scsi: cxlflash: Remove an unnecessary NULL check scsi: ibmvscsi: Don't send host info in adapter info MAD after LPM scsi: sr: Fix sr_probe() missing deallocate of device minor scsi: sr: Fix sr_probe() missing mutex_destroy scsi: st: Convert convert get_user_pages() --> pin_user_pages() scsi: target: Rename target_setup_cmd_from_cdb() to target_cmd_parse_cdb() scsi: target: Fix NULL pointer dereference scsi: target: Initialize LUN in transport_init_se_cmd() scsi: target: Factor out a new helper, target_cmd_init_cdb() scsi: hpsa: hpsa_ioctl(): Tidy up a bit scsi: hpsa: Get rid of compat_alloc_user_space() scsi: hpsa: Don't bother with vmalloc for BIG_IOCTL_Command_struct scsi: hpsa: Lift {BIG_,}IOCTL_Command_struct copy{in,out} into hpsa_ioctl() scsi: ufs: Remove redundant urgent_bkop_lvl initialization scsi: ufs: Don't update urgent bkops level when toggling auto bkops scsi: qedf: Remove redundant initialization of variable rc scsi: mpt3sas: Fix memset() in non-RDPQ mode scsi: iscsi: Fix reference count leak in iscsi_boot_create_kobj ...
This commit is contained in:
Коммит
3df83e164f
|
@ -2911,8 +2911,10 @@ static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
|
|||
|
||||
ashost->base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
|
||||
ashost->fast = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
|
||||
if (!ashost->base || !ashost->fast)
|
||||
if (!ashost->base || !ashost->fast) {
|
||||
ret = -ENOMEM;
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
host->irq = ec->irq;
|
||||
ashost->host = host;
|
||||
|
|
|
@ -47,9 +47,6 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
|
|||
struct sisl_ioasa *ioasa;
|
||||
u32 resid;
|
||||
|
||||
if (unlikely(!cmd))
|
||||
return;
|
||||
|
||||
ioasa = &(cmd->sa);
|
||||
|
||||
if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
|
||||
|
|
|
@ -254,6 +254,10 @@ static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
|
|||
static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
|
||||
static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
|
||||
void __user *arg);
|
||||
static int hpsa_passthru_ioctl(struct ctlr_info *h,
|
||||
IOCTL_Command_struct *iocommand);
|
||||
static int hpsa_big_passthru_ioctl(struct ctlr_info *h,
|
||||
BIG_IOCTL_Command_struct *ioc);
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
|
||||
|
@ -6217,75 +6221,63 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c)
|
|||
static int hpsa_ioctl32_passthru(struct scsi_device *dev, unsigned int cmd,
|
||||
void __user *arg)
|
||||
{
|
||||
IOCTL32_Command_struct __user *arg32 =
|
||||
(IOCTL32_Command_struct __user *) arg;
|
||||
struct ctlr_info *h = sdev_to_hba(dev);
|
||||
IOCTL32_Command_struct __user *arg32 = arg;
|
||||
IOCTL_Command_struct arg64;
|
||||
IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
|
||||
int err;
|
||||
u32 cp;
|
||||
|
||||
if (!arg)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&arg64, 0, sizeof(arg64));
|
||||
err = 0;
|
||||
err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
|
||||
sizeof(arg64.LUN_info));
|
||||
err |= copy_from_user(&arg64.Request, &arg32->Request,
|
||||
sizeof(arg64.Request));
|
||||
err |= copy_from_user(&arg64.error_info, &arg32->error_info,
|
||||
sizeof(arg64.error_info));
|
||||
err |= get_user(arg64.buf_size, &arg32->buf_size);
|
||||
err |= get_user(cp, &arg32->buf);
|
||||
arg64.buf = compat_ptr(cp);
|
||||
err |= copy_to_user(p, &arg64, sizeof(arg64));
|
||||
|
||||
if (err)
|
||||
if (copy_from_user(&arg64, arg32, offsetof(IOCTL_Command_struct, buf)))
|
||||
return -EFAULT;
|
||||
if (get_user(cp, &arg32->buf))
|
||||
return -EFAULT;
|
||||
arg64.buf = compat_ptr(cp);
|
||||
|
||||
err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
|
||||
if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
|
||||
return -EAGAIN;
|
||||
err = hpsa_passthru_ioctl(h, &arg64);
|
||||
atomic_inc(&h->passthru_cmds_avail);
|
||||
if (err)
|
||||
return err;
|
||||
err |= copy_in_user(&arg32->error_info, &p->error_info,
|
||||
sizeof(arg32->error_info));
|
||||
if (err)
|
||||
if (copy_to_user(&arg32->error_info, &arg64.error_info,
|
||||
sizeof(arg32->error_info)))
|
||||
return -EFAULT;
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
|
||||
unsigned int cmd, void __user *arg)
|
||||
{
|
||||
BIG_IOCTL32_Command_struct __user *arg32 =
|
||||
(BIG_IOCTL32_Command_struct __user *) arg;
|
||||
struct ctlr_info *h = sdev_to_hba(dev);
|
||||
BIG_IOCTL32_Command_struct __user *arg32 = arg;
|
||||
BIG_IOCTL_Command_struct arg64;
|
||||
BIG_IOCTL_Command_struct __user *p =
|
||||
compat_alloc_user_space(sizeof(arg64));
|
||||
int err;
|
||||
u32 cp;
|
||||
|
||||
if (!arg)
|
||||
return -EINVAL;
|
||||
memset(&arg64, 0, sizeof(arg64));
|
||||
err = 0;
|
||||
err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
|
||||
sizeof(arg64.LUN_info));
|
||||
err |= copy_from_user(&arg64.Request, &arg32->Request,
|
||||
sizeof(arg64.Request));
|
||||
err |= copy_from_user(&arg64.error_info, &arg32->error_info,
|
||||
sizeof(arg64.error_info));
|
||||
err |= get_user(arg64.buf_size, &arg32->buf_size);
|
||||
err |= get_user(arg64.malloc_size, &arg32->malloc_size);
|
||||
err |= get_user(cp, &arg32->buf);
|
||||
arg64.buf = compat_ptr(cp);
|
||||
err |= copy_to_user(p, &arg64, sizeof(arg64));
|
||||
|
||||
if (err)
|
||||
if (copy_from_user(&arg64, arg32,
|
||||
offsetof(BIG_IOCTL32_Command_struct, buf)))
|
||||
return -EFAULT;
|
||||
if (get_user(cp, &arg32->buf))
|
||||
return -EFAULT;
|
||||
arg64.buf = compat_ptr(cp);
|
||||
|
||||
err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
|
||||
if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
|
||||
return -EAGAIN;
|
||||
err = hpsa_big_passthru_ioctl(h, &arg64);
|
||||
atomic_inc(&h->passthru_cmds_avail);
|
||||
if (err)
|
||||
return err;
|
||||
err |= copy_in_user(&arg32->error_info, &p->error_info,
|
||||
sizeof(arg32->error_info));
|
||||
if (err)
|
||||
if (copy_to_user(&arg32->error_info, &arg64.error_info,
|
||||
sizeof(arg32->error_info)))
|
||||
return -EFAULT;
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
|
||||
|
@ -6358,37 +6350,33 @@ static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
|
||||
static int hpsa_passthru_ioctl(struct ctlr_info *h,
|
||||
IOCTL_Command_struct *iocommand)
|
||||
{
|
||||
IOCTL_Command_struct iocommand;
|
||||
struct CommandList *c;
|
||||
char *buff = NULL;
|
||||
u64 temp64;
|
||||
int rc = 0;
|
||||
|
||||
if (!argp)
|
||||
return -EINVAL;
|
||||
if (!capable(CAP_SYS_RAWIO))
|
||||
return -EPERM;
|
||||
if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
|
||||
return -EFAULT;
|
||||
if ((iocommand.buf_size < 1) &&
|
||||
(iocommand.Request.Type.Direction != XFER_NONE)) {
|
||||
if ((iocommand->buf_size < 1) &&
|
||||
(iocommand->Request.Type.Direction != XFER_NONE)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
if (iocommand.buf_size > 0) {
|
||||
buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
|
||||
if (iocommand->buf_size > 0) {
|
||||
buff = kmalloc(iocommand->buf_size, GFP_KERNEL);
|
||||
if (buff == NULL)
|
||||
return -ENOMEM;
|
||||
if (iocommand.Request.Type.Direction & XFER_WRITE) {
|
||||
if (iocommand->Request.Type.Direction & XFER_WRITE) {
|
||||
/* Copy the data into the buffer we created */
|
||||
if (copy_from_user(buff, iocommand.buf,
|
||||
iocommand.buf_size)) {
|
||||
if (copy_from_user(buff, iocommand->buf,
|
||||
iocommand->buf_size)) {
|
||||
rc = -EFAULT;
|
||||
goto out_kfree;
|
||||
}
|
||||
} else {
|
||||
memset(buff, 0, iocommand.buf_size);
|
||||
memset(buff, 0, iocommand->buf_size);
|
||||
}
|
||||
}
|
||||
c = cmd_alloc(h);
|
||||
|
@ -6398,23 +6386,23 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
|
|||
c->scsi_cmd = SCSI_CMD_BUSY;
|
||||
/* Fill in Command Header */
|
||||
c->Header.ReplyQueue = 0; /* unused in simple mode */
|
||||
if (iocommand.buf_size > 0) { /* buffer to fill */
|
||||
if (iocommand->buf_size > 0) { /* buffer to fill */
|
||||
c->Header.SGList = 1;
|
||||
c->Header.SGTotal = cpu_to_le16(1);
|
||||
} else { /* no buffers to fill */
|
||||
c->Header.SGList = 0;
|
||||
c->Header.SGTotal = cpu_to_le16(0);
|
||||
}
|
||||
memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
|
||||
memcpy(&c->Header.LUN, &iocommand->LUN_info, sizeof(c->Header.LUN));
|
||||
|
||||
/* Fill in Request block */
|
||||
memcpy(&c->Request, &iocommand.Request,
|
||||
memcpy(&c->Request, &iocommand->Request,
|
||||
sizeof(c->Request));
|
||||
|
||||
/* Fill in the scatter gather information */
|
||||
if (iocommand.buf_size > 0) {
|
||||
if (iocommand->buf_size > 0) {
|
||||
temp64 = dma_map_single(&h->pdev->dev, buff,
|
||||
iocommand.buf_size, DMA_BIDIRECTIONAL);
|
||||
iocommand->buf_size, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
|
||||
c->SG[0].Addr = cpu_to_le64(0);
|
||||
c->SG[0].Len = cpu_to_le32(0);
|
||||
|
@ -6422,12 +6410,12 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
|
|||
goto out;
|
||||
}
|
||||
c->SG[0].Addr = cpu_to_le64(temp64);
|
||||
c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
|
||||
c->SG[0].Len = cpu_to_le32(iocommand->buf_size);
|
||||
c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
|
||||
}
|
||||
rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
|
||||
NO_TIMEOUT);
|
||||
if (iocommand.buf_size > 0)
|
||||
if (iocommand->buf_size > 0)
|
||||
hpsa_pci_unmap(h->pdev, c, 1, DMA_BIDIRECTIONAL);
|
||||
check_ioctl_unit_attention(h, c);
|
||||
if (rc) {
|
||||
|
@ -6436,16 +6424,12 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
|
|||
}
|
||||
|
||||
/* Copy the error information out */
|
||||
memcpy(&iocommand.error_info, c->err_info,
|
||||
sizeof(iocommand.error_info));
|
||||
if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
|
||||
rc = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
if ((iocommand.Request.Type.Direction & XFER_READ) &&
|
||||
iocommand.buf_size > 0) {
|
||||
memcpy(&iocommand->error_info, c->err_info,
|
||||
sizeof(iocommand->error_info));
|
||||
if ((iocommand->Request.Type.Direction & XFER_READ) &&
|
||||
iocommand->buf_size > 0) {
|
||||
/* Copy the data out of the buffer we created */
|
||||
if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
|
||||
if (copy_to_user(iocommand->buf, buff, iocommand->buf_size)) {
|
||||
rc = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
@ -6457,9 +6441,9 @@ out_kfree:
|
|||
return rc;
|
||||
}
|
||||
|
||||
static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
|
||||
static int hpsa_big_passthru_ioctl(struct ctlr_info *h,
|
||||
BIG_IOCTL_Command_struct *ioc)
|
||||
{
|
||||
BIG_IOCTL_Command_struct *ioc;
|
||||
struct CommandList *c;
|
||||
unsigned char **buff = NULL;
|
||||
int *buff_size = NULL;
|
||||
|
@ -6470,29 +6454,17 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
|
|||
u32 sz;
|
||||
BYTE __user *data_ptr;
|
||||
|
||||
if (!argp)
|
||||
return -EINVAL;
|
||||
if (!capable(CAP_SYS_RAWIO))
|
||||
return -EPERM;
|
||||
ioc = vmemdup_user(argp, sizeof(*ioc));
|
||||
if (IS_ERR(ioc)) {
|
||||
status = PTR_ERR(ioc);
|
||||
goto cleanup1;
|
||||
}
|
||||
|
||||
if ((ioc->buf_size < 1) &&
|
||||
(ioc->Request.Type.Direction != XFER_NONE)) {
|
||||
status = -EINVAL;
|
||||
goto cleanup1;
|
||||
}
|
||||
(ioc->Request.Type.Direction != XFER_NONE))
|
||||
return -EINVAL;
|
||||
/* Check kmalloc limits using all SGs */
|
||||
if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
|
||||
status = -EINVAL;
|
||||
goto cleanup1;
|
||||
}
|
||||
if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
|
||||
status = -EINVAL;
|
||||
goto cleanup1;
|
||||
}
|
||||
if (ioc->malloc_size > MAX_KMALLOC_SIZE)
|
||||
return -EINVAL;
|
||||
if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD)
|
||||
return -EINVAL;
|
||||
buff = kcalloc(SG_ENTRIES_IN_CMD, sizeof(char *), GFP_KERNEL);
|
||||
if (!buff) {
|
||||
status = -ENOMEM;
|
||||
|
@ -6565,10 +6537,6 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
|
|||
|
||||
/* Copy the error information out */
|
||||
memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
|
||||
if (copy_to_user(argp, ioc, sizeof(*ioc))) {
|
||||
status = -EFAULT;
|
||||
goto cleanup0;
|
||||
}
|
||||
if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
|
||||
int i;
|
||||
|
||||
|
@ -6594,7 +6562,6 @@ cleanup1:
|
|||
kfree(buff);
|
||||
}
|
||||
kfree(buff_size);
|
||||
kvfree(ioc);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -6610,14 +6577,11 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,
|
|||
* ioctl
|
||||
*/
|
||||
static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
|
||||
void __user *arg)
|
||||
void __user *argp)
|
||||
{
|
||||
struct ctlr_info *h;
|
||||
void __user *argp = (void __user *)arg;
|
||||
struct ctlr_info *h = sdev_to_hba(dev);
|
||||
int rc;
|
||||
|
||||
h = sdev_to_hba(dev);
|
||||
|
||||
switch (cmd) {
|
||||
case CCISS_DEREGDISK:
|
||||
case CCISS_REGNEWDISK:
|
||||
|
@ -6628,18 +6592,35 @@ static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
|
|||
return hpsa_getpciinfo_ioctl(h, argp);
|
||||
case CCISS_GETDRIVVER:
|
||||
return hpsa_getdrivver_ioctl(h, argp);
|
||||
case CCISS_PASSTHRU:
|
||||
case CCISS_PASSTHRU: {
|
||||
IOCTL_Command_struct iocommand;
|
||||
|
||||
if (!argp)
|
||||
return -EINVAL;
|
||||
if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
|
||||
return -EFAULT;
|
||||
if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
|
||||
return -EAGAIN;
|
||||
rc = hpsa_passthru_ioctl(h, argp);
|
||||
rc = hpsa_passthru_ioctl(h, &iocommand);
|
||||
atomic_inc(&h->passthru_cmds_avail);
|
||||
if (!rc && copy_to_user(argp, &iocommand, sizeof(iocommand)))
|
||||
rc = -EFAULT;
|
||||
return rc;
|
||||
case CCISS_BIG_PASSTHRU:
|
||||
}
|
||||
case CCISS_BIG_PASSTHRU: {
|
||||
BIG_IOCTL_Command_struct ioc;
|
||||
if (!argp)
|
||||
return -EINVAL;
|
||||
if (copy_from_user(&ioc, argp, sizeof(ioc)))
|
||||
return -EFAULT;
|
||||
if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
|
||||
return -EAGAIN;
|
||||
rc = hpsa_big_passthru_ioctl(h, argp);
|
||||
rc = hpsa_big_passthru_ioctl(h, &ioc);
|
||||
atomic_inc(&h->passthru_cmds_avail);
|
||||
if (!rc && copy_to_user(argp, &ioc, sizeof(ioc)))
|
||||
rc = -EFAULT;
|
||||
return rc;
|
||||
}
|
||||
default:
|
||||
return -ENOTTY;
|
||||
}
|
||||
|
|
|
@ -415,6 +415,8 @@ static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
|
|||
int rc = 0;
|
||||
struct vio_dev *vdev = to_vio_dev(hostdata->dev);
|
||||
|
||||
set_adapter_info(hostdata);
|
||||
|
||||
/* Re-enable the CRQ */
|
||||
do {
|
||||
if (rc)
|
||||
|
|
|
@ -352,7 +352,7 @@ iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset,
|
|||
boot_kobj->kobj.kset = boot_kset->kset;
|
||||
if (kobject_init_and_add(&boot_kobj->kobj, &iscsi_boot_ktype,
|
||||
NULL, name, index)) {
|
||||
kfree(boot_kobj);
|
||||
kobject_put(&boot_kobj->kobj);
|
||||
return NULL;
|
||||
}
|
||||
boot_kobj->data = data;
|
||||
|
|
|
@ -4984,7 +4984,7 @@ base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz)
|
|||
for (i = 0; i < count; i++) {
|
||||
if ((i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) {
|
||||
ioc->reply_post[i].reply_post_free =
|
||||
dma_pool_alloc(ioc->reply_post_free_dma_pool,
|
||||
dma_pool_zalloc(ioc->reply_post_free_dma_pool,
|
||||
GFP_KERNEL,
|
||||
&ioc->reply_post[i].reply_post_free_dma);
|
||||
if (!ioc->reply_post[i].reply_post_free)
|
||||
|
@ -5008,9 +5008,6 @@ base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz)
|
|||
ioc->reply_post[i].reply_post_free_dma));
|
||||
return -EAGAIN;
|
||||
}
|
||||
memset(ioc->reply_post[i].reply_post_free, 0,
|
||||
RDPQ_MAX_INDEX_IN_ONE_CHUNK *
|
||||
reply_post_free_sz);
|
||||
dma_alloc_count--;
|
||||
|
||||
} else {
|
||||
|
|
|
@ -20,7 +20,7 @@ void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf)
|
|||
#define MY_FIP_ALL_FCF_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 2 })
|
||||
static u8 my_fcoe_all_fcfs[ETH_ALEN] = MY_FIP_ALL_FCF_MACS;
|
||||
unsigned long flags = 0;
|
||||
int rc = -1;
|
||||
int rc;
|
||||
|
||||
skb = dev_alloc_skb(sizeof(struct fip_vlan));
|
||||
if (!skb) {
|
||||
|
|
|
@ -792,7 +792,7 @@ static int sr_probe(struct device *dev)
|
|||
disk->queue = sdev->request_queue;
|
||||
|
||||
if (register_cdrom(disk, &cd->cdi))
|
||||
goto fail_put;
|
||||
goto fail_minor;
|
||||
|
||||
/*
|
||||
* Initialize block layer runtime PM stuffs before the
|
||||
|
@ -810,8 +810,13 @@ static int sr_probe(struct device *dev)
|
|||
|
||||
return 0;
|
||||
|
||||
fail_minor:
|
||||
spin_lock(&sr_index_lock);
|
||||
clear_bit(minor, sr_index_bits);
|
||||
spin_unlock(&sr_index_lock);
|
||||
fail_put:
|
||||
put_disk(disk);
|
||||
mutex_destroy(&cd->lock);
|
||||
fail_free:
|
||||
kfree(cd);
|
||||
fail:
|
||||
|
|
|
@ -4921,7 +4921,7 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
|
|||
unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
unsigned long start = uaddr >> PAGE_SHIFT;
|
||||
const int nr_pages = end - start;
|
||||
int res, i, j;
|
||||
int res, i;
|
||||
struct page **pages;
|
||||
struct rq_map_data *mdata = &STbp->map_data;
|
||||
|
||||
|
@ -4943,7 +4943,7 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
|
|||
|
||||
/* Try to fault in all of the necessary pages */
|
||||
/* rw==READ means read from drive, write into memory area */
|
||||
res = get_user_pages_fast(uaddr, nr_pages, rw == READ ? FOLL_WRITE : 0,
|
||||
res = pin_user_pages_fast(uaddr, nr_pages, rw == READ ? FOLL_WRITE : 0,
|
||||
pages);
|
||||
|
||||
/* Errors and no page mapped should return here */
|
||||
|
@ -4963,8 +4963,7 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
|
|||
return nr_pages;
|
||||
out_unmap:
|
||||
if (res > 0) {
|
||||
for (j=0; j < res; j++)
|
||||
put_page(pages[j]);
|
||||
unpin_user_pages(pages, res);
|
||||
res = 0;
|
||||
}
|
||||
kfree(pages);
|
||||
|
@ -4976,18 +4975,9 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
|
|||
static int sgl_unmap_user_pages(struct st_buffer *STbp,
|
||||
const unsigned int nr_pages, int dirtied)
|
||||
{
|
||||
int i;
|
||||
/* FIXME: cache flush missing for rw==READ */
|
||||
unpin_user_pages_dirty_lock(STbp->mapped_pages, nr_pages, dirtied);
|
||||
|
||||
for (i=0; i < nr_pages; i++) {
|
||||
struct page *page = STbp->mapped_pages[i];
|
||||
|
||||
if (dirtied)
|
||||
SetPageDirty(page);
|
||||
/* FIXME: cache flush missing for rw==READ
|
||||
* FIXME: call the correct reference counting function
|
||||
*/
|
||||
put_page(page);
|
||||
}
|
||||
kfree(STbp->mapped_pages);
|
||||
STbp->mapped_pages = NULL;
|
||||
|
||||
|
|
|
@ -2035,9 +2035,6 @@ static int storvsc_suspend(struct hv_device *hv_dev)
|
|||
|
||||
vmbus_close(hv_dev->channel);
|
||||
|
||||
memset(stor_device->stor_chns, 0,
|
||||
num_possible_cpus() * sizeof(void *));
|
||||
|
||||
kfree(stor_device->stor_chns);
|
||||
stor_device->stor_chns = NULL;
|
||||
|
||||
|
|
|
@ -5076,6 +5076,7 @@ static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
|
|||
hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
|
||||
ufshcd_disable_auto_bkops(hba);
|
||||
}
|
||||
hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
|
||||
hba->is_urgent_bkops_lvl_checked = false;
|
||||
}
|
||||
|
||||
|
@ -5123,7 +5124,6 @@ static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
|
|||
err = ufshcd_enable_auto_bkops(hba);
|
||||
else
|
||||
err = ufshcd_disable_auto_bkops(hba);
|
||||
hba->urgent_bkops_lvl = curr_status;
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
@ -7373,10 +7373,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* set the default level for urgent bkops */
|
||||
hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
|
||||
hba->is_urgent_bkops_lvl_checked = false;
|
||||
|
||||
/* Debug counters initialization */
|
||||
ufshcd_clear_dbg_ufs_stats(hba);
|
||||
|
||||
|
|
|
@ -1158,7 +1158,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
|||
transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
|
||||
conn->sess->se_sess, be32_to_cpu(hdr->data_length),
|
||||
cmd->data_direction, sam_task_attr,
|
||||
cmd->sense_buffer + 2);
|
||||
cmd->sense_buffer + 2, scsilun_to_int(&hdr->lun));
|
||||
|
||||
pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
|
||||
" ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
|
||||
|
@ -1167,22 +1167,25 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
|||
|
||||
target_get_sess_cmd(&cmd->se_cmd, true);
|
||||
|
||||
cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
|
||||
scsilun_to_int(&hdr->lun));
|
||||
cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, hdr->cdb);
|
||||
if (cmd->sense_reason) {
|
||||
if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
|
||||
return iscsit_add_reject_cmd(cmd,
|
||||
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
|
||||
}
|
||||
|
||||
goto attach_cmd;
|
||||
}
|
||||
|
||||
cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd);
|
||||
if (cmd->sense_reason)
|
||||
goto attach_cmd;
|
||||
|
||||
/* only used for printks or comparing with ->ref_task_tag */
|
||||
cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
|
||||
cmd->sense_reason = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
|
||||
if (cmd->sense_reason) {
|
||||
if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
|
||||
return iscsit_add_reject_cmd(cmd,
|
||||
ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
|
||||
}
|
||||
|
||||
cmd->sense_reason = target_cmd_parse_cdb(&cmd->se_cmd);
|
||||
if (cmd->sense_reason)
|
||||
goto attach_cmd;
|
||||
}
|
||||
|
||||
if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) {
|
||||
return iscsit_add_reject_cmd(cmd,
|
||||
|
@ -2000,7 +2003,8 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
|||
|
||||
transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
|
||||
conn->sess->se_sess, 0, DMA_NONE,
|
||||
TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
|
||||
TCM_SIMPLE_TAG, cmd->sense_buffer + 2,
|
||||
scsilun_to_int(&hdr->lun));
|
||||
|
||||
target_get_sess_cmd(&cmd->se_cmd, true);
|
||||
|
||||
|
@ -2038,8 +2042,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
|
|||
* Locate the struct se_lun for all TMRs not related to ERL=2 TASK_REASSIGN
|
||||
*/
|
||||
if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
|
||||
ret = transport_lookup_tmr_lun(&cmd->se_cmd,
|
||||
scsilun_to_int(&hdr->lun));
|
||||
ret = transport_lookup_tmr_lun(&cmd->se_cmd);
|
||||
if (ret < 0) {
|
||||
se_tmr->response = ISCSI_TMF_RSP_NO_LUN;
|
||||
goto attach;
|
||||
|
|
|
@ -45,7 +45,7 @@ static struct se_hba *lun0_hba;
|
|||
struct se_device *g_lun0_dev;
|
||||
|
||||
sense_reason_t
|
||||
transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
|
||||
transport_lookup_cmd_lun(struct se_cmd *se_cmd)
|
||||
{
|
||||
struct se_lun *se_lun = NULL;
|
||||
struct se_session *se_sess = se_cmd->se_sess;
|
||||
|
@ -54,7 +54,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
|
|||
sense_reason_t ret = TCM_NO_SENSE;
|
||||
|
||||
rcu_read_lock();
|
||||
deve = target_nacl_find_deve(nacl, unpacked_lun);
|
||||
deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
|
||||
if (deve) {
|
||||
atomic_long_inc(&deve->total_cmds);
|
||||
|
||||
|
@ -74,7 +74,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
|
|||
|
||||
se_cmd->se_lun = se_lun;
|
||||
se_cmd->pr_res_key = deve->pr_res_key;
|
||||
se_cmd->orig_fe_lun = unpacked_lun;
|
||||
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
|
||||
se_cmd->lun_ref_active = true;
|
||||
|
||||
|
@ -83,7 +82,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
|
|||
pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
|
||||
" Access for 0x%08llx\n",
|
||||
se_cmd->se_tfo->fabric_name,
|
||||
unpacked_lun);
|
||||
se_cmd->orig_fe_lun);
|
||||
rcu_read_unlock();
|
||||
ret = TCM_WRITE_PROTECTED;
|
||||
goto ref_dev;
|
||||
|
@ -98,18 +97,17 @@ out_unlock:
|
|||
* REPORT_LUNS, et al to be returned when no active
|
||||
* MappedLUN=0 exists for this Initiator Port.
|
||||
*/
|
||||
if (unpacked_lun != 0) {
|
||||
if (se_cmd->orig_fe_lun != 0) {
|
||||
pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
|
||||
" Access for 0x%08llx from %s\n",
|
||||
se_cmd->se_tfo->fabric_name,
|
||||
unpacked_lun,
|
||||
se_cmd->orig_fe_lun,
|
||||
nacl->initiatorname);
|
||||
return TCM_NON_EXISTENT_LUN;
|
||||
}
|
||||
|
||||
se_lun = se_sess->se_tpg->tpg_virt_lun0;
|
||||
se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
|
||||
se_cmd->orig_fe_lun = 0;
|
||||
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
|
||||
|
||||
percpu_ref_get(&se_lun->lun_ref);
|
||||
|
@ -145,7 +143,7 @@ ref_dev:
|
|||
}
|
||||
EXPORT_SYMBOL(transport_lookup_cmd_lun);
|
||||
|
||||
int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
|
||||
int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
|
||||
{
|
||||
struct se_dev_entry *deve;
|
||||
struct se_lun *se_lun = NULL;
|
||||
|
@ -155,7 +153,7 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
|
|||
unsigned long flags;
|
||||
|
||||
rcu_read_lock();
|
||||
deve = target_nacl_find_deve(nacl, unpacked_lun);
|
||||
deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
|
||||
if (deve) {
|
||||
se_lun = rcu_dereference(deve->se_lun);
|
||||
|
||||
|
@ -166,7 +164,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
|
|||
|
||||
se_cmd->se_lun = se_lun;
|
||||
se_cmd->pr_res_key = deve->pr_res_key;
|
||||
se_cmd->orig_fe_lun = unpacked_lun;
|
||||
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
|
||||
se_cmd->lun_ref_active = true;
|
||||
}
|
||||
|
@ -177,7 +174,7 @@ out_unlock:
|
|||
pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
|
||||
" Access for 0x%08llx for %s\n",
|
||||
se_cmd->se_tfo->fabric_name,
|
||||
unpacked_lun,
|
||||
se_cmd->orig_fe_lun,
|
||||
nacl->initiatorname);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
|
|
@ -148,8 +148,8 @@ void core_tmr_abort_task(
|
|||
* code.
|
||||
*/
|
||||
if (!tmr->tmr_dev)
|
||||
WARN_ON_ONCE(transport_lookup_tmr_lun(tmr->task_cmd,
|
||||
se_cmd->orig_fe_lun) < 0);
|
||||
WARN_ON_ONCE(transport_lookup_tmr_lun(tmr->task_cmd) <
|
||||
0);
|
||||
|
||||
target_put_cmd_and_wait(se_cmd);
|
||||
|
||||
|
|
|
@ -1364,7 +1364,7 @@ void transport_init_se_cmd(
|
|||
u32 data_length,
|
||||
int data_direction,
|
||||
int task_attr,
|
||||
unsigned char *sense_buffer)
|
||||
unsigned char *sense_buffer, u64 unpacked_lun)
|
||||
{
|
||||
INIT_LIST_HEAD(&cmd->se_delayed_node);
|
||||
INIT_LIST_HEAD(&cmd->se_qf_node);
|
||||
|
@ -1383,6 +1383,7 @@ void transport_init_se_cmd(
|
|||
cmd->data_direction = data_direction;
|
||||
cmd->sam_task_attr = task_attr;
|
||||
cmd->sense_buffer = sense_buffer;
|
||||
cmd->orig_fe_lun = unpacked_lun;
|
||||
|
||||
cmd->state_active = false;
|
||||
}
|
||||
|
@ -1410,11 +1411,11 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
|
|||
}
|
||||
|
||||
sense_reason_t
|
||||
target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
|
||||
target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
sense_reason_t ret;
|
||||
|
||||
cmd->t_task_cdb = &cmd->__t_task_cdb[0];
|
||||
/*
|
||||
* Ensure that the received CDB is less than the max (252 + 8) bytes
|
||||
* for VARIABLE_LENGTH_CMD
|
||||
|
@ -1423,7 +1424,8 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
|
|||
pr_err("Received SCSI CDB with command_size: %d that"
|
||||
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
|
||||
scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
|
||||
return TCM_INVALID_CDB_FIELD;
|
||||
ret = TCM_INVALID_CDB_FIELD;
|
||||
goto err;
|
||||
}
|
||||
/*
|
||||
* If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
|
||||
|
@ -1438,16 +1440,34 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
|
|||
" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
|
||||
scsi_command_size(cdb),
|
||||
(unsigned long)sizeof(cmd->__t_task_cdb));
|
||||
return TCM_OUT_OF_RESOURCES;
|
||||
ret = TCM_OUT_OF_RESOURCES;
|
||||
goto err;
|
||||
}
|
||||
} else
|
||||
cmd->t_task_cdb = &cmd->__t_task_cdb[0];
|
||||
}
|
||||
/*
|
||||
* Copy the original CDB into cmd->
|
||||
*/
|
||||
memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
|
||||
|
||||
trace_target_sequencer_start(cmd);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
/*
|
||||
* Copy the CDB here to allow trace_target_cmd_complete() to
|
||||
* print the cdb to the trace buffers.
|
||||
*/
|
||||
memcpy(cmd->t_task_cdb, cdb, min(scsi_command_size(cdb),
|
||||
(unsigned int)TCM_MAX_COMMAND_SIZE));
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(target_cmd_init_cdb);
|
||||
|
||||
sense_reason_t
|
||||
target_cmd_parse_cdb(struct se_cmd *cmd)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
sense_reason_t ret;
|
||||
|
||||
ret = dev->transport->parse_cdb(cmd);
|
||||
if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
|
||||
|
@ -1466,7 +1486,7 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
|
|||
atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(target_setup_cmd_from_cdb);
|
||||
EXPORT_SYMBOL(target_cmd_parse_cdb);
|
||||
|
||||
/*
|
||||
* Used by fabric module frontends to queue tasks directly.
|
||||
|
@ -1588,7 +1608,8 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
|
|||
* target_core_fabric_ops->queue_status() callback
|
||||
*/
|
||||
transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
|
||||
data_length, data_dir, task_attr, sense);
|
||||
data_length, data_dir, task_attr, sense,
|
||||
unpacked_lun);
|
||||
|
||||
if (flags & TARGET_SCF_USE_CPUID)
|
||||
se_cmd->se_cmd_flags |= SCF_USE_CPUID;
|
||||
|
@ -1611,17 +1632,25 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
|
|||
*/
|
||||
if (flags & TARGET_SCF_BIDI_OP)
|
||||
se_cmd->se_cmd_flags |= SCF_BIDI;
|
||||
/*
|
||||
* Locate se_lun pointer and attach it to struct se_cmd
|
||||
*/
|
||||
rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
|
||||
|
||||
rc = target_cmd_init_cdb(se_cmd, cdb);
|
||||
if (rc) {
|
||||
transport_send_check_condition_and_sense(se_cmd, rc, 0);
|
||||
target_put_sess_cmd(se_cmd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
rc = target_setup_cmd_from_cdb(se_cmd, cdb);
|
||||
/*
|
||||
* Locate se_lun pointer and attach it to struct se_cmd
|
||||
*/
|
||||
rc = transport_lookup_cmd_lun(se_cmd);
|
||||
if (rc) {
|
||||
transport_send_check_condition_and_sense(se_cmd, rc, 0);
|
||||
target_put_sess_cmd(se_cmd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
rc = target_cmd_parse_cdb(se_cmd);
|
||||
if (rc != 0) {
|
||||
transport_generic_request_failure(se_cmd, rc);
|
||||
return 0;
|
||||
|
@ -1782,7 +1811,7 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
|
|||
BUG_ON(!se_tpg);
|
||||
|
||||
transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
|
||||
0, DMA_NONE, TCM_SIMPLE_TAG, sense);
|
||||
0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun);
|
||||
/*
|
||||
* FIXME: Currently expect caller to handle se_cmd->se_tmr_req
|
||||
* allocation failure.
|
||||
|
@ -1810,7 +1839,7 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
|
|||
goto failure;
|
||||
}
|
||||
|
||||
ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
|
||||
ret = transport_lookup_tmr_lun(se_cmd);
|
||||
if (ret)
|
||||
goto failure;
|
||||
|
||||
|
|
|
@ -1007,7 +1007,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
|
|||
entry->hdr.cmd_id = 0; /* not used for PAD */
|
||||
entry->hdr.kflags = 0;
|
||||
entry->hdr.uflags = 0;
|
||||
tcmu_flush_dcache_range(entry, sizeof(*entry));
|
||||
tcmu_flush_dcache_range(entry, sizeof(entry->hdr));
|
||||
|
||||
UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
|
||||
tcmu_flush_dcache_range(mb, sizeof(*mb));
|
||||
|
@ -1072,7 +1072,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
|
|||
cdb_off = CMDR_OFF + cmd_head + base_command_size;
|
||||
memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
|
||||
entry->req.cdb_off = cdb_off;
|
||||
tcmu_flush_dcache_range(entry, sizeof(*entry));
|
||||
tcmu_flush_dcache_range(entry, command_size);
|
||||
|
||||
UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
|
||||
tcmu_flush_dcache_range(mb, sizeof(*mb));
|
||||
|
|
|
@ -526,8 +526,11 @@ static int target_xcopy_setup_pt_cmd(
|
|||
}
|
||||
cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
|
||||
|
||||
if (target_cmd_init_cdb(cmd, cdb))
|
||||
return -EINVAL;
|
||||
|
||||
cmd->tag = 0;
|
||||
if (target_setup_cmd_from_cdb(cmd, cdb))
|
||||
if (target_cmd_parse_cdb(cmd))
|
||||
return -EINVAL;
|
||||
|
||||
if (transport_generic_map_mem_to_cmd(cmd, xop->xop_data_sg,
|
||||
|
@ -585,7 +588,7 @@ static int target_xcopy_read_source(
|
|||
(unsigned long long)src_lba, src_sectors, length);
|
||||
|
||||
transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
|
||||
DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0]);
|
||||
DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0);
|
||||
|
||||
rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, src_dev, &cdb[0],
|
||||
remote_port);
|
||||
|
@ -630,7 +633,7 @@ static int target_xcopy_write_destination(
|
|||
(unsigned long long)dst_lba, dst_sectors, length);
|
||||
|
||||
transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
|
||||
DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0]);
|
||||
DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0], 0);
|
||||
|
||||
rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, dst_dev, &cdb[0],
|
||||
remote_port);
|
||||
|
|
|
@ -1052,7 +1052,8 @@ static void usbg_cmd_work(struct work_struct *work)
|
|||
transport_init_se_cmd(se_cmd,
|
||||
tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
|
||||
tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
|
||||
cmd->prio_attr, cmd->sense_iu.sense);
|
||||
cmd->prio_attr, cmd->sense_iu.sense,
|
||||
cmd->unpacked_lun);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -1182,7 +1183,8 @@ static void bot_cmd_work(struct work_struct *work)
|
|||
transport_init_se_cmd(se_cmd,
|
||||
tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
|
||||
tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
|
||||
cmd->prio_attr, cmd->sense_iu.sense);
|
||||
cmd->prio_attr, cmd->sense_iu.sense,
|
||||
cmd->unpacked_lun);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -150,9 +150,10 @@ void transport_deregister_session(struct se_session *);
|
|||
|
||||
void transport_init_se_cmd(struct se_cmd *,
|
||||
const struct target_core_fabric_ops *,
|
||||
struct se_session *, u32, int, int, unsigned char *);
|
||||
sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u64);
|
||||
sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *);
|
||||
struct se_session *, u32, int, int, unsigned char *, u64);
|
||||
sense_reason_t transport_lookup_cmd_lun(struct se_cmd *);
|
||||
sense_reason_t target_cmd_init_cdb(struct se_cmd *, unsigned char *);
|
||||
sense_reason_t target_cmd_parse_cdb(struct se_cmd *);
|
||||
int target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *,
|
||||
unsigned char *, unsigned char *, u64, u32, int, int, int,
|
||||
struct scatterlist *, u32, struct scatterlist *, u32,
|
||||
|
@ -187,7 +188,7 @@ int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
|
|||
void core_tmr_release_req(struct se_tmr_req *);
|
||||
int transport_generic_handle_tmr(struct se_cmd *);
|
||||
void transport_generic_request_failure(struct se_cmd *, sense_reason_t);
|
||||
int transport_lookup_tmr_lun(struct se_cmd *, u64);
|
||||
int transport_lookup_tmr_lun(struct se_cmd *);
|
||||
void core_allocate_nexus_loss_ua(struct se_node_acl *acl);
|
||||
|
||||
struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
|
||||
|
|
Загрузка…
Ссылка в новой задаче