Merge branch '5.19/scsi-fixes' into 5.20/scsi-staging
Bring in fixes to resolve a merge conflict in the lpfc driver update. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Коммит
11e50ed239
|
@ -2782,6 +2782,7 @@ static int slave_configure_v3_hw(struct scsi_device *sdev)
|
|||
struct hisi_hba *hisi_hba = shost_priv(shost);
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int ret = sas_slave_configure(sdev);
|
||||
unsigned int max_sectors;
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -2799,6 +2800,12 @@ static int slave_configure_v3_hw(struct scsi_device *sdev)
|
|||
}
|
||||
}
|
||||
|
||||
/* Set according to IOMMU IOVA caching limit */
|
||||
max_sectors = min_t(size_t, queue_max_hw_sectors(sdev->request_queue),
|
||||
(PAGE_SIZE * 32) >> SECTOR_SHIFT);
|
||||
|
||||
blk_queue_max_hw_sectors(sdev->request_queue, max_sectors);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -160,8 +160,8 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *);
|
|||
static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
|
||||
static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
|
||||
|
||||
static void ibmvfc_release_sub_crqs(struct ibmvfc_host *);
|
||||
static void ibmvfc_init_sub_crqs(struct ibmvfc_host *);
|
||||
static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *);
|
||||
static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *);
|
||||
|
||||
static const char *unknown_error = "unknown error";
|
||||
|
||||
|
@ -917,7 +917,7 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
|
|||
struct vio_dev *vdev = to_vio_dev(vhost->dev);
|
||||
unsigned long flags;
|
||||
|
||||
ibmvfc_release_sub_crqs(vhost);
|
||||
ibmvfc_dereg_sub_crqs(vhost);
|
||||
|
||||
/* Re-enable the CRQ */
|
||||
do {
|
||||
|
@ -936,7 +936,7 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
|
|||
spin_unlock(vhost->crq.q_lock);
|
||||
spin_unlock_irqrestore(vhost->host->host_lock, flags);
|
||||
|
||||
ibmvfc_init_sub_crqs(vhost);
|
||||
ibmvfc_reg_sub_crqs(vhost);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -955,7 +955,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
|
|||
struct vio_dev *vdev = to_vio_dev(vhost->dev);
|
||||
struct ibmvfc_queue *crq = &vhost->crq;
|
||||
|
||||
ibmvfc_release_sub_crqs(vhost);
|
||||
ibmvfc_dereg_sub_crqs(vhost);
|
||||
|
||||
/* Close the CRQ */
|
||||
do {
|
||||
|
@ -988,7 +988,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
|
|||
spin_unlock(vhost->crq.q_lock);
|
||||
spin_unlock_irqrestore(vhost->host->host_lock, flags);
|
||||
|
||||
ibmvfc_init_sub_crqs(vhost);
|
||||
ibmvfc_reg_sub_crqs(vhost);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -5682,6 +5682,8 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
|
|||
queue->cur = 0;
|
||||
queue->fmt = fmt;
|
||||
queue->size = PAGE_SIZE / fmt_size;
|
||||
|
||||
queue->vhost = vhost;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -5757,9 +5759,6 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
|
|||
|
||||
ENTER;
|
||||
|
||||
if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT))
|
||||
return -ENOMEM;
|
||||
|
||||
rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE,
|
||||
&scrq->cookie, &scrq->hw_irq);
|
||||
|
||||
|
@ -5790,7 +5789,6 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
|
|||
}
|
||||
|
||||
scrq->hwq_id = index;
|
||||
scrq->vhost = vhost;
|
||||
|
||||
LEAVE;
|
||||
return 0;
|
||||
|
@ -5800,7 +5798,6 @@ irq_failed:
|
|||
rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
|
||||
} while (rtas_busy_delay(rc));
|
||||
reg_failed:
|
||||
ibmvfc_free_queue(vhost, scrq);
|
||||
LEAVE;
|
||||
return rc;
|
||||
}
|
||||
|
@ -5826,12 +5823,50 @@ static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index)
|
|||
if (rc)
|
||||
dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc);
|
||||
|
||||
ibmvfc_free_queue(vhost, scrq);
|
||||
/* Clean out the queue */
|
||||
memset(scrq->msgs.crq, 0, PAGE_SIZE);
|
||||
scrq->cur = 0;
|
||||
|
||||
LEAVE;
|
||||
}
|
||||
|
||||
static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
ENTER;
|
||||
if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs)
|
||||
return;
|
||||
|
||||
for (i = 0; i < nr_scsi_hw_queues; i++) {
|
||||
if (ibmvfc_register_scsi_channel(vhost, i)) {
|
||||
for (j = i; j > 0; j--)
|
||||
ibmvfc_deregister_scsi_channel(vhost, j - 1);
|
||||
vhost->do_enquiry = 0;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
LEAVE;
|
||||
}
|
||||
|
||||
static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost)
|
||||
{
|
||||
int i;
|
||||
|
||||
ENTER;
|
||||
if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs)
|
||||
return;
|
||||
|
||||
for (i = 0; i < nr_scsi_hw_queues; i++)
|
||||
ibmvfc_deregister_scsi_channel(vhost, i);
|
||||
|
||||
LEAVE;
|
||||
}
|
||||
|
||||
static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
|
||||
{
|
||||
struct ibmvfc_queue *scrq;
|
||||
int i, j;
|
||||
|
||||
ENTER;
|
||||
|
@ -5847,30 +5882,41 @@ static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
|
|||
}
|
||||
|
||||
for (i = 0; i < nr_scsi_hw_queues; i++) {
|
||||
if (ibmvfc_register_scsi_channel(vhost, i)) {
|
||||
for (j = i; j > 0; j--)
|
||||
ibmvfc_deregister_scsi_channel(vhost, j - 1);
|
||||
scrq = &vhost->scsi_scrqs.scrqs[i];
|
||||
if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT)) {
|
||||
for (j = i; j > 0; j--) {
|
||||
scrq = &vhost->scsi_scrqs.scrqs[j - 1];
|
||||
ibmvfc_free_queue(vhost, scrq);
|
||||
}
|
||||
kfree(vhost->scsi_scrqs.scrqs);
|
||||
vhost->scsi_scrqs.scrqs = NULL;
|
||||
vhost->scsi_scrqs.active_queues = 0;
|
||||
vhost->do_enquiry = 0;
|
||||
break;
|
||||
vhost->mq_enabled = 0;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
ibmvfc_reg_sub_crqs(vhost);
|
||||
|
||||
LEAVE;
|
||||
}
|
||||
|
||||
static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
|
||||
{
|
||||
struct ibmvfc_queue *scrq;
|
||||
int i;
|
||||
|
||||
ENTER;
|
||||
if (!vhost->scsi_scrqs.scrqs)
|
||||
return;
|
||||
|
||||
for (i = 0; i < nr_scsi_hw_queues; i++)
|
||||
ibmvfc_deregister_scsi_channel(vhost, i);
|
||||
ibmvfc_dereg_sub_crqs(vhost);
|
||||
|
||||
for (i = 0; i < nr_scsi_hw_queues; i++) {
|
||||
scrq = &vhost->scsi_scrqs.scrqs[i];
|
||||
ibmvfc_free_queue(vhost, scrq);
|
||||
}
|
||||
|
||||
kfree(vhost->scsi_scrqs.scrqs);
|
||||
vhost->scsi_scrqs.scrqs = NULL;
|
||||
|
|
|
@ -789,6 +789,7 @@ struct ibmvfc_queue {
|
|||
spinlock_t _lock;
|
||||
spinlock_t *q_lock;
|
||||
|
||||
struct ibmvfc_host *vhost;
|
||||
struct ibmvfc_event_pool evt_pool;
|
||||
struct list_head sent;
|
||||
struct list_head free;
|
||||
|
@ -797,7 +798,6 @@ struct ibmvfc_queue {
|
|||
union ibmvfc_iu cancel_rsp;
|
||||
|
||||
/* Sub-CRQ fields */
|
||||
struct ibmvfc_host *vhost;
|
||||
unsigned long cookie;
|
||||
unsigned long vios_cookie;
|
||||
unsigned long hw_irq;
|
||||
|
|
|
@ -9795,7 +9795,7 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
|
|||
GFP_KERNEL);
|
||||
|
||||
if (!ioa_cfg->hrrq[i].host_rrq) {
|
||||
while (--i > 0)
|
||||
while (--i >= 0)
|
||||
dma_free_coherent(&pdev->dev,
|
||||
sizeof(u32) * ioa_cfg->hrrq[i].size,
|
||||
ioa_cfg->hrrq[i].host_rrq,
|
||||
|
@ -10068,7 +10068,7 @@ static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
|
|||
ioa_cfg->vectors_info[i].desc,
|
||||
&ioa_cfg->hrrq[i]);
|
||||
if (rc) {
|
||||
while (--i >= 0)
|
||||
while (--i > 0)
|
||||
free_irq(pci_irq_vector(pdev, i),
|
||||
&ioa_cfg->hrrq[i]);
|
||||
return rc;
|
||||
|
|
|
@ -420,8 +420,6 @@ int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t,
|
|||
uint32_t);
|
||||
void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
|
||||
struct lpfc_iocbq *);
|
||||
void lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba *h, struct lpfc_iocbq *i,
|
||||
struct lpfc_wcqe_complete *w);
|
||||
|
||||
void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
|
||||
|
||||
|
@ -630,7 +628,7 @@ void lpfc_nvmet_invalidate_host(struct lpfc_hba *phba,
|
|||
struct lpfc_nodelist *ndlp);
|
||||
void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
|
||||
struct lpfc_iocbq *cmdiocb,
|
||||
struct lpfc_wcqe_complete *abts_cmpl);
|
||||
struct lpfc_iocbq *rspiocb);
|
||||
void lpfc_create_multixri_pools(struct lpfc_hba *phba);
|
||||
void lpfc_create_destroy_pools(struct lpfc_hba *phba);
|
||||
void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid);
|
||||
|
|
|
@ -197,7 +197,7 @@ lpfc_ct_reject_event(struct lpfc_nodelist *ndlp,
|
|||
memset(bpl, 0, sizeof(struct ulp_bde64));
|
||||
bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
|
||||
bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
|
||||
bpl->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
|
||||
bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
|
||||
bpl->tus.f.bdeSize = (LPFC_CT_PREAMBLE - 4);
|
||||
bpl->tus.w = le32_to_cpu(bpl->tus.w);
|
||||
|
||||
|
|
|
@ -2998,10 +2998,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|||
ndlp->nlp_DID, ulp_status,
|
||||
ulp_word4);
|
||||
|
||||
/* Call NLP_EVT_DEVICE_RM if link is down or LOGO is aborted */
|
||||
if (lpfc_error_lost_link(ulp_status, ulp_word4)) {
|
||||
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
|
||||
NLP_EVT_DEVICE_RM);
|
||||
skip_recovery = 1;
|
||||
goto out;
|
||||
}
|
||||
|
@ -3021,18 +3018,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|||
spin_unlock_irq(&ndlp->lock);
|
||||
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
|
||||
NLP_EVT_DEVICE_RM);
|
||||
lpfc_els_free_iocb(phba, cmdiocb);
|
||||
lpfc_nlp_put(ndlp);
|
||||
|
||||
/* Presume the node was released. */
|
||||
return;
|
||||
goto out_rsrc_free;
|
||||
}
|
||||
|
||||
out:
|
||||
/* Driver is done with the IO. */
|
||||
lpfc_els_free_iocb(phba, cmdiocb);
|
||||
lpfc_nlp_put(ndlp);
|
||||
|
||||
/* At this point, the LOGO processing is complete. NOTE: For a
|
||||
* pt2pt topology, we are assuming the NPortID will only change
|
||||
* on link up processing. For a LOGO / PLOGI initiated by the
|
||||
|
@ -3059,6 +3048,10 @@ out:
|
|||
ndlp->nlp_DID, ulp_status,
|
||||
ulp_word4, tmo,
|
||||
vport->num_disc_nodes);
|
||||
|
||||
lpfc_els_free_iocb(phba, cmdiocb);
|
||||
lpfc_nlp_put(ndlp);
|
||||
|
||||
lpfc_disc_start(vport);
|
||||
return;
|
||||
}
|
||||
|
@ -3075,6 +3068,10 @@ out:
|
|||
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
|
||||
NLP_EVT_DEVICE_RM);
|
||||
}
|
||||
out_rsrc_free:
|
||||
/* Driver is done with the I/O. */
|
||||
lpfc_els_free_iocb(phba, cmdiocb);
|
||||
lpfc_nlp_put(ndlp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -4487,6 +4487,9 @@ struct wqe_common {
|
|||
#define wqe_sup_SHIFT 6
|
||||
#define wqe_sup_MASK 0x00000001
|
||||
#define wqe_sup_WORD word11
|
||||
#define wqe_ffrq_SHIFT 6
|
||||
#define wqe_ffrq_MASK 0x00000001
|
||||
#define wqe_ffrq_WORD word11
|
||||
#define wqe_wqec_SHIFT 7
|
||||
#define wqe_wqec_MASK 0x00000001
|
||||
#define wqe_wqec_WORD word11
|
||||
|
|
|
@ -12188,7 +12188,7 @@ lpfc_sli_enable_msi(struct lpfc_hba *phba)
|
|||
rc = pci_enable_msi(phba->pcidev);
|
||||
if (!rc)
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||||
"0462 PCI enable MSI mode success.\n");
|
||||
"0012 PCI enable MSI mode success.\n");
|
||||
else {
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||||
"0471 PCI enable MSI mode failed (%d)\n", rc);
|
||||
|
|
|
@ -834,7 +834,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|||
lpfc_nvmet_invalidate_host(phba, ndlp);
|
||||
|
||||
if (ndlp->nlp_DID == Fabric_DID) {
|
||||
if (vport->port_state <= LPFC_FDISC)
|
||||
if (vport->port_state <= LPFC_FDISC ||
|
||||
vport->fc_flag & FC_PT2PT)
|
||||
goto out;
|
||||
lpfc_linkdown_port(vport);
|
||||
spin_lock_irq(shost->host_lock);
|
||||
|
|
|
@ -1065,25 +1065,37 @@ lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
|
|||
nCmd->rcv_rsplen = wcqe->parameter;
|
||||
nCmd->status = 0;
|
||||
|
||||
/* Get the NVME cmd details for this unique error. */
|
||||
cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
|
||||
ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
|
||||
|
||||
/* Check if this is really an ERSP */
|
||||
if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) {
|
||||
lpfc_ncmd->status = IOSTAT_SUCCESS;
|
||||
lpfc_ncmd->result = 0;
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
|
||||
"6084 NVME Completion ERSP: "
|
||||
"xri %x placed x%x\n",
|
||||
"6084 NVME FCP_ERR ERSP: "
|
||||
"xri %x placed x%x opcode x%x cmd_id "
|
||||
"x%x cqe_status x%x\n",
|
||||
lpfc_ncmd->cur_iocbq.sli4_xritag,
|
||||
wcqe->total_data_placed);
|
||||
wcqe->total_data_placed,
|
||||
cp->sqe.common.opcode,
|
||||
cp->sqe.common.command_id,
|
||||
ep->cqe.status);
|
||||
break;
|
||||
}
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
|
||||
"6081 NVME Completion Protocol Error: "
|
||||
"xri %x status x%x result x%x "
|
||||
"placed x%x\n",
|
||||
"placed x%x opcode x%x cmd_id x%x, "
|
||||
"cqe_status x%x\n",
|
||||
lpfc_ncmd->cur_iocbq.sli4_xritag,
|
||||
lpfc_ncmd->status, lpfc_ncmd->result,
|
||||
wcqe->total_data_placed);
|
||||
wcqe->total_data_placed,
|
||||
cp->sqe.common.opcode,
|
||||
cp->sqe.common.command_id,
|
||||
ep->cqe.status);
|
||||
break;
|
||||
case IOSTAT_LOCAL_REJECT:
|
||||
/* Let fall through to set command final state. */
|
||||
|
@ -1195,7 +1207,8 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
|
|||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
|
||||
struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
|
||||
struct nvme_common_command *sqe;
|
||||
struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq;
|
||||
union lpfc_wqe128 *wqe = &pwqeq->wqe;
|
||||
uint32_t req_len;
|
||||
|
||||
|
@ -1252,8 +1265,14 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
|
|||
cstat->control_requests++;
|
||||
}
|
||||
|
||||
if (pnode->nlp_nvme_info & NLP_NVME_NSLER)
|
||||
if (pnode->nlp_nvme_info & NLP_NVME_NSLER) {
|
||||
bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
|
||||
sqe = &((struct nvme_fc_cmd_iu *)
|
||||
nCmd->cmdaddr)->sqe.common;
|
||||
if (sqe->opcode == nvme_admin_async_event)
|
||||
bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Finish initializing those WQE fields that are independent
|
||||
* of the nvme_cmnd request_buffer
|
||||
|
@ -1787,7 +1806,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
|
|||
* lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
|
||||
* @phba: Pointer to HBA context object
|
||||
* @cmdiocb: Pointer to command iocb object.
|
||||
* @abts_cmpl: Pointer to wcqe complete object.
|
||||
* @rspiocb: Pointer to response iocb object.
|
||||
*
|
||||
* This is the callback function for any NVME FCP IO that was aborted.
|
||||
*
|
||||
|
@ -1796,8 +1815,10 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
|
|||
**/
|
||||
void
|
||||
lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
struct lpfc_wcqe_complete *abts_cmpl)
|
||||
struct lpfc_iocbq *rspiocb)
|
||||
{
|
||||
struct lpfc_wcqe_complete *abts_cmpl = &rspiocb->wcqe_cmpl;
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
|
||||
"6145 ABORT_XRI_CN completing on rpi x%x "
|
||||
"original iotag x%x, abort cmd iotag x%x "
|
||||
|
@ -1840,6 +1861,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
|||
struct lpfc_nvme_fcpreq_priv *freqpriv;
|
||||
unsigned long flags;
|
||||
int ret_val;
|
||||
struct nvme_fc_cmd_iu *cp;
|
||||
|
||||
/* Validate pointers. LLDD fault handling with transport does
|
||||
* have timing races.
|
||||
|
@ -1963,10 +1985,16 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
|||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get Command Id from cmd to plug into response. This
|
||||
* code is not needed in the next NVME Transport drop.
|
||||
*/
|
||||
cp = (struct nvme_fc_cmd_iu *)lpfc_nbuf->nvmeCmd->cmdaddr;
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6138 Transport Abort NVME Request Issued for "
|
||||
"ox_id x%x\n",
|
||||
nvmereq_wqe->sli4_xritag);
|
||||
"ox_id x%x nvme opcode x%x nvme cmd_id x%x\n",
|
||||
nvmereq_wqe->sli4_xritag, cp->sqe.common.opcode,
|
||||
cp->sqe.common.command_id);
|
||||
return;
|
||||
|
||||
out_unlock:
|
||||
|
|
|
@ -6062,6 +6062,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
|
|||
int status;
|
||||
u32 logit = LOG_FCP;
|
||||
|
||||
if (!rport)
|
||||
return FAILED;
|
||||
|
||||
rdata = rport->dd_data;
|
||||
if (!rdata || !rdata->pnode) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
|
||||
|
@ -6140,6 +6143,9 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
|
|||
unsigned long flags;
|
||||
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
|
||||
|
||||
if (!rport)
|
||||
return FAILED;
|
||||
|
||||
rdata = rport->dd_data;
|
||||
if (!rdata || !rdata->pnode) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
|
||||
|
|
|
@ -1930,7 +1930,7 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
|
|||
sync_buf = __lpfc_sli_get_iocbq(phba);
|
||||
if (!sync_buf) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
|
||||
"6213 No available WQEs for CMF_SYNC_WQE\n");
|
||||
"6244 No available WQEs for CMF_SYNC_WQE\n");
|
||||
ret_val = ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
@ -3805,7 +3805,7 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
|||
set_job_ulpword4(cmdiocbp,
|
||||
IOERR_ABORT_REQUESTED);
|
||||
/*
|
||||
* For SLI4, irsiocb contains
|
||||
* For SLI4, irspiocb contains
|
||||
* NO_XRI in sli_xritag, it
|
||||
* shall not affect releasing
|
||||
* sgl (xri) process.
|
||||
|
@ -3823,7 +3823,7 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
|||
}
|
||||
}
|
||||
}
|
||||
(cmdiocbp->cmd_cmpl) (phba, cmdiocbp, saveq);
|
||||
cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq);
|
||||
} else
|
||||
lpfc_sli_release_iocbq(phba, cmdiocbp);
|
||||
} else {
|
||||
|
@ -4063,8 +4063,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
|
|||
cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
|
||||
if (cmdiocbq->cmd_cmpl) {
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||
(cmdiocbq->cmd_cmpl)(phba, cmdiocbq,
|
||||
&rspiocbq);
|
||||
cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq);
|
||||
spin_lock_irqsave(&phba->hbalock, iflag);
|
||||
}
|
||||
break;
|
||||
|
@ -10288,7 +10287,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
|
|||
* @flag: Flag indicating if this command can be put into txq.
|
||||
*
|
||||
* __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
|
||||
* send an iocb command to an HBA with SLI-4 interface spec.
|
||||
* send an iocb command to an HBA with SLI-3 interface spec.
|
||||
*
|
||||
* This function takes the hbalock before invoking the lockless version.
|
||||
* The function will return success after it successfully submit the wqe to
|
||||
|
@ -12740,7 +12739,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
|
|||
cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl;
|
||||
cmdiocbq->wait_cmd_cmpl = NULL;
|
||||
if (cmdiocbq->cmd_cmpl)
|
||||
(cmdiocbq->cmd_cmpl)(phba, cmdiocbq, NULL);
|
||||
cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL);
|
||||
else
|
||||
lpfc_sli_release_iocbq(phba, cmdiocbq);
|
||||
return;
|
||||
|
@ -13896,7 +13895,7 @@ void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
|
|||
* @irspiocbq: Pointer to work-queue completion queue entry.
|
||||
*
|
||||
* This routine handles an ELS work-queue completion event and construct
|
||||
* a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
|
||||
* a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common
|
||||
* discovery engine to handle.
|
||||
*
|
||||
* Return: Pointer to the receive IOCBQ, NULL otherwise.
|
||||
|
@ -13940,7 +13939,7 @@ lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
|
|||
|
||||
if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
|
||||
irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||
}
|
||||
|
||||
|
@ -14799,7 +14798,7 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
|
|||
/* Pass the cmd_iocb and the wcqe to the upper layer */
|
||||
memcpy(&cmdiocbq->wcqe_cmpl, wcqe,
|
||||
sizeof(struct lpfc_wcqe_complete));
|
||||
(cmdiocbq->cmd_cmpl)(phba, cmdiocbq, cmdiocbq);
|
||||
cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq);
|
||||
} else {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
|
||||
"0375 FCP cmdiocb not callback function "
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
* included with this package. *
|
||||
*******************************************************************/
|
||||
|
||||
#define LPFC_DRIVER_VERSION "14.2.0.3"
|
||||
#define LPFC_DRIVER_VERSION "14.2.0.4"
|
||||
#define LPFC_DRIVER_NAME "lpfc"
|
||||
|
||||
/* Used for SLI 2/3 */
|
||||
|
|
|
@ -5369,6 +5369,7 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
|
|||
Mpi2ConfigReply_t mpi_reply;
|
||||
Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
|
||||
Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1;
|
||||
u16 depth;
|
||||
int sz;
|
||||
int rc = 0;
|
||||
|
||||
|
@ -5380,7 +5381,7 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
|
|||
goto out;
|
||||
/* sas iounit page 1 */
|
||||
sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData);
|
||||
sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
|
||||
sas_iounit_pg1 = kzalloc(sizeof(Mpi2SasIOUnitPage1_t), GFP_KERNEL);
|
||||
if (!sas_iounit_pg1) {
|
||||
pr_err("%s: failure at %s:%d/%s()!\n",
|
||||
ioc->name, __FILE__, __LINE__, __func__);
|
||||
|
@ -5393,16 +5394,16 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
|
|||
ioc->name, __FILE__, __LINE__, __func__);
|
||||
goto out;
|
||||
}
|
||||
ioc->max_wideport_qd =
|
||||
(le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth)) ?
|
||||
le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth) :
|
||||
MPT3SAS_SAS_QUEUE_DEPTH;
|
||||
ioc->max_narrowport_qd =
|
||||
(le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth)) ?
|
||||
le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth) :
|
||||
MPT3SAS_SAS_QUEUE_DEPTH;
|
||||
ioc->max_sata_qd = (sas_iounit_pg1->SATAMaxQDepth) ?
|
||||
sas_iounit_pg1->SATAMaxQDepth : MPT3SAS_SATA_QUEUE_DEPTH;
|
||||
|
||||
depth = le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth);
|
||||
ioc->max_wideport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH);
|
||||
|
||||
depth = le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth);
|
||||
ioc->max_narrowport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH);
|
||||
|
||||
depth = sas_iounit_pg1->SATAMaxQDepth;
|
||||
ioc->max_sata_qd = (depth ? depth : MPT3SAS_SATA_QUEUE_DEPTH);
|
||||
|
||||
/* pcie iounit page 1 */
|
||||
rc = mpt3sas_config_get_pcie_iounit_pg1(ioc, &mpi_reply,
|
||||
&pcie_iounit_pg1, sizeof(Mpi26PCIeIOUnitPage1_t));
|
||||
|
|
|
@ -4031,7 +4031,7 @@ pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance)
|
|||
return 0;
|
||||
|
||||
out_unwind:
|
||||
while (--i > 0)
|
||||
while (--i >= 0)
|
||||
free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]);
|
||||
pci_free_irq_vectors(pdev);
|
||||
return rc;
|
||||
|
|
|
@ -2826,6 +2826,24 @@ static void zbc_open_zone(struct sdebug_dev_info *devip,
|
|||
}
|
||||
}
|
||||
|
||||
static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
|
||||
struct sdeb_zone_state *zsp)
|
||||
{
|
||||
switch (zsp->z_cond) {
|
||||
case ZC2_IMPLICIT_OPEN:
|
||||
devip->nr_imp_open--;
|
||||
break;
|
||||
case ZC3_EXPLICIT_OPEN:
|
||||
devip->nr_exp_open--;
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(true, "Invalid zone %llu condition %x\n",
|
||||
zsp->z_start, zsp->z_cond);
|
||||
break;
|
||||
}
|
||||
zsp->z_cond = ZC5_FULL;
|
||||
}
|
||||
|
||||
static void zbc_inc_wp(struct sdebug_dev_info *devip,
|
||||
unsigned long long lba, unsigned int num)
|
||||
{
|
||||
|
@ -2838,7 +2856,7 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip,
|
|||
if (zsp->z_type == ZBC_ZTYPE_SWR) {
|
||||
zsp->z_wp += num;
|
||||
if (zsp->z_wp >= zend)
|
||||
zsp->z_cond = ZC5_FULL;
|
||||
zbc_set_zone_full(devip, zsp);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2857,7 +2875,7 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip,
|
|||
n = num;
|
||||
}
|
||||
if (zsp->z_wp >= zend)
|
||||
zsp->z_cond = ZC5_FULL;
|
||||
zbc_set_zone_full(devip, zsp);
|
||||
|
||||
num -= n;
|
||||
lba += n;
|
||||
|
|
|
@ -212,7 +212,12 @@ iscsi_create_endpoint(int dd_size)
|
|||
return NULL;
|
||||
|
||||
mutex_lock(&iscsi_ep_idr_mutex);
|
||||
id = idr_alloc(&iscsi_ep_idr, ep, 0, -1, GFP_NOIO);
|
||||
|
||||
/*
|
||||
* First endpoint id should be 1 to comply with user space
|
||||
* applications (iscsid).
|
||||
*/
|
||||
id = idr_alloc(&iscsi_ep_idr, ep, 1, -1, GFP_NOIO);
|
||||
if (id < 0) {
|
||||
mutex_unlock(&iscsi_ep_idr_mutex);
|
||||
printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n",
|
||||
|
|
|
@ -3073,7 +3073,7 @@ static void sd_read_cpr(struct scsi_disk *sdkp)
|
|||
goto out;
|
||||
|
||||
/* We must have at least a 64B header and one 32B range descriptor */
|
||||
vpd_len = get_unaligned_be16(&buffer[2]) + 3;
|
||||
vpd_len = get_unaligned_be16(&buffer[2]) + 4;
|
||||
if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) {
|
||||
sd_printk(KERN_ERR, sdkp,
|
||||
"Invalid Concurrent Positioning Ranges VPD page\n");
|
||||
|
|
|
@ -1844,7 +1844,7 @@ static struct scsi_host_template scsi_driver = {
|
|||
.cmd_per_lun = 2048,
|
||||
.this_id = -1,
|
||||
/* Ensure there are no gaps in presented sgls */
|
||||
.virt_boundary_mask = PAGE_SIZE-1,
|
||||
.virt_boundary_mask = HV_HYP_PAGE_SIZE - 1,
|
||||
.no_write_same = 1,
|
||||
.track_queue_depth = 1,
|
||||
.change_queue_depth = storvsc_change_queue_depth,
|
||||
|
@ -1895,6 +1895,7 @@ static int storvsc_probe(struct hv_device *device,
|
|||
int target = 0;
|
||||
struct storvsc_device *stor_device;
|
||||
int max_sub_channels = 0;
|
||||
u32 max_xfer_bytes;
|
||||
|
||||
/*
|
||||
* We support sub-channels for storage on SCSI and FC controllers.
|
||||
|
@ -1968,12 +1969,28 @@ static int storvsc_probe(struct hv_device *device,
|
|||
}
|
||||
/* max cmd length */
|
||||
host->max_cmd_len = STORVSC_MAX_CMD_LEN;
|
||||
|
||||
/*
|
||||
* set the table size based on the info we got
|
||||
* from the host.
|
||||
* Any reasonable Hyper-V configuration should provide
|
||||
* max_transfer_bytes value aligning to HV_HYP_PAGE_SIZE,
|
||||
* protecting it from any weird value.
|
||||
*/
|
||||
host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT);
|
||||
max_xfer_bytes = round_down(stor_device->max_transfer_bytes, HV_HYP_PAGE_SIZE);
|
||||
/* max_hw_sectors_kb */
|
||||
host->max_sectors = max_xfer_bytes >> 9;
|
||||
/*
|
||||
* There are 2 requirements for Hyper-V storvsc sgl segments,
|
||||
* based on which the below calculation for max segments is
|
||||
* done:
|
||||
*
|
||||
* 1. Except for the first and last sgl segment, all sgl segments
|
||||
* should be align to HV_HYP_PAGE_SIZE, that also means the
|
||||
* maximum number of segments in a sgl can be calculated by
|
||||
* dividing the total max transfer length by HV_HYP_PAGE_SIZE.
|
||||
*
|
||||
* 2. Except for the first and last, each entry in the SGL must
|
||||
* have an offset that is a multiple of HV_HYP_PAGE_SIZE.
|
||||
*/
|
||||
host->sg_tablesize = (max_xfer_bytes >> HV_HYP_PAGE_SHIFT) + 1;
|
||||
/*
|
||||
* For non-IDE disks, the host supports multiple channels.
|
||||
* Set the number of HW queues we are supporting.
|
||||
|
|
|
@ -331,8 +331,8 @@ struct PVSCSIRingReqDesc {
|
|||
u8 tag;
|
||||
u8 bus;
|
||||
u8 target;
|
||||
u8 vcpuHint;
|
||||
u8 unused[59];
|
||||
u16 vcpuHint;
|
||||
u8 unused[58];
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
|
|
|
@ -748,17 +748,28 @@ static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
|
|||
}
|
||||
|
||||
/**
|
||||
* ufshcd_utrl_clear - Clear a bit in UTRLCLR register
|
||||
* ufshcd_utrl_clear() - Clear requests from the controller request list.
|
||||
* @hba: per adapter instance
|
||||
* @pos: position of the bit to be cleared
|
||||
* @mask: mask with one bit set for each request to be cleared
|
||||
*/
|
||||
static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
|
||||
static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask)
|
||||
{
|
||||
if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
|
||||
ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
|
||||
else
|
||||
ufshcd_writel(hba, ~(1 << pos),
|
||||
REG_UTP_TRANSFER_REQ_LIST_CLEAR);
|
||||
mask = ~mask;
|
||||
/*
|
||||
* From the UFSHCI specification: "UTP Transfer Request List CLear
|
||||
* Register (UTRLCLR): This field is bit significant. Each bit
|
||||
* corresponds to a slot in the UTP Transfer Request List, where bit 0
|
||||
* corresponds to request slot 0. A bit in this field is set to ‘0’
|
||||
* by host software to indicate to the host controller that a transfer
|
||||
* request slot is cleared. The host controller
|
||||
* shall free up any resources associated to the request slot
|
||||
* immediately, and shall set the associated bit in UTRLDBR to ‘0’. The
|
||||
* host software indicates no change to request slots by setting the
|
||||
* associated bits in this field to ‘1’. Bits in this field shall only
|
||||
* be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’."
|
||||
*/
|
||||
ufshcd_writel(hba, ~mask, REG_UTP_TRANSFER_REQ_LIST_CLEAR);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2866,27 +2877,26 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
|
|||
return ufshcd_compose_devman_upiu(hba, lrbp);
|
||||
}
|
||||
|
||||
static int
|
||||
ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
|
||||
/*
|
||||
* Clear all the requests from the controller for which a bit has been set in
|
||||
* @mask and wait until the controller confirms that these requests have been
|
||||
* cleared.
|
||||
*/
|
||||
static int ufshcd_clear_cmds(struct ufs_hba *hba, u32 mask)
|
||||
{
|
||||
int err = 0;
|
||||
unsigned long flags;
|
||||
u32 mask = 1 << tag;
|
||||
|
||||
/* clear outstanding transaction before retry */
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
ufshcd_utrl_clear(hba, tag);
|
||||
ufshcd_utrl_clear(hba, mask);
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
|
||||
/*
|
||||
* wait for h/w to clear corresponding bit in door-bell.
|
||||
* max. wait is 1 sec.
|
||||
*/
|
||||
err = ufshcd_wait_for_register(hba,
|
||||
REG_UTP_TRANSFER_REQ_DOOR_BELL,
|
||||
return ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL,
|
||||
mask, ~mask, 1000, 1000);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -2966,7 +2976,7 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
|
|||
err = -ETIMEDOUT;
|
||||
dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
|
||||
__func__, lrbp->task_tag);
|
||||
if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
|
||||
if (!ufshcd_clear_cmds(hba, 1U << lrbp->task_tag))
|
||||
/* successfully cleared the command, retry if needed */
|
||||
err = -EAGAIN;
|
||||
/*
|
||||
|
@ -6967,14 +6977,14 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
|
|||
}
|
||||
|
||||
/**
|
||||
* ufshcd_eh_device_reset_handler - device reset handler registered to
|
||||
* scsi layer.
|
||||
* ufshcd_eh_device_reset_handler() - Reset a single logical unit.
|
||||
* @cmd: SCSI command pointer
|
||||
*
|
||||
* Returns SUCCESS/FAILED
|
||||
*/
|
||||
static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
|
||||
{
|
||||
unsigned long flags, pending_reqs = 0, not_cleared = 0;
|
||||
struct Scsi_Host *host;
|
||||
struct ufs_hba *hba;
|
||||
u32 pos;
|
||||
|
@ -6993,14 +7003,24 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
|
|||
}
|
||||
|
||||
/* clear the commands that were pending for corresponding LUN */
|
||||
for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
|
||||
if (hba->lrb[pos].lun == lun) {
|
||||
err = ufshcd_clear_cmd(hba, pos);
|
||||
if (err)
|
||||
break;
|
||||
__ufshcd_transfer_req_compl(hba, 1U << pos);
|
||||
}
|
||||
spin_lock_irqsave(&hba->outstanding_lock, flags);
|
||||
for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs)
|
||||
if (hba->lrb[pos].lun == lun)
|
||||
__set_bit(pos, &pending_reqs);
|
||||
hba->outstanding_reqs &= ~pending_reqs;
|
||||
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
|
||||
|
||||
if (ufshcd_clear_cmds(hba, pending_reqs) < 0) {
|
||||
spin_lock_irqsave(&hba->outstanding_lock, flags);
|
||||
not_cleared = pending_reqs &
|
||||
ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
|
||||
hba->outstanding_reqs |= not_cleared;
|
||||
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
|
||||
|
||||
dev_err(hba->dev, "%s: failed to clear requests %#lx\n",
|
||||
__func__, not_cleared);
|
||||
}
|
||||
__ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared);
|
||||
|
||||
out:
|
||||
hba->req_abort_count = 0;
|
||||
|
@ -7097,7 +7117,7 @@ static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
|
|||
goto out;
|
||||
}
|
||||
|
||||
err = ufshcd_clear_cmd(hba, tag);
|
||||
err = ufshcd_clear_cmds(hba, 1U << tag);
|
||||
if (err)
|
||||
dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
|
||||
__func__, tag, err);
|
||||
|
|
Загрузка…
Ссылка в новой задаче