2017-02-13 00:52:30 +03:00
|
|
|
/*******************************************************************
|
|
|
|
* This file is part of the Emulex Linux Device Driver for *
|
|
|
|
* Fibre Channel Host Bus Adapters. *
|
2021-07-07 21:43:51 +03:00
|
|
|
* Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
|
2018-05-05 06:37:59 +03:00
|
|
|
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
2017-02-13 00:52:30 +03:00
|
|
|
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
|
|
|
* EMULEX and SLI are trademarks of Emulex. *
|
2017-02-13 00:52:39 +03:00
|
|
|
* www.broadcom.com *
|
2017-02-13 00:52:30 +03:00
|
|
|
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
|
|
|
|
* *
|
|
|
|
* This program is free software; you can redistribute it and/or *
|
|
|
|
* modify it under the terms of version 2 of the GNU General *
|
|
|
|
* Public License as published by the Free Software Foundation. *
|
|
|
|
* This program is distributed in the hope that it will be useful. *
|
|
|
|
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
|
|
|
|
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
|
|
|
|
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
|
|
|
|
* TO BE LEGALLY INVALID. See the GNU General Public License for *
|
|
|
|
* more details, a copy of which can be found in the file COPYING *
|
|
|
|
* included with this package. *
|
|
|
|
********************************************************************/
|
|
|
|
|
2020-03-31 19:50:02 +03:00
|
|
|
#include <linux/nvme.h>
|
|
|
|
#include <linux/nvme-fc-driver.h>
|
|
|
|
#include <linux/nvme-fc.h>
|
|
|
|
|
2017-04-22 02:04:45 +03:00
|
|
|
#define LPFC_NVME_DEFAULT_SEGS (64 + 1) /* 256K IOs */
|
2017-02-13 00:52:30 +03:00
|
|
|
|
|
|
|
#define LPFC_NVME_ERSP_LEN 0x20
|
|
|
|
|
2017-11-21 03:00:40 +03:00
|
|
|
#define LPFC_NVME_WAIT_TMO 10
|
2017-12-09 04:18:03 +03:00
|
|
|
#define LPFC_NVME_EXPEDITE_XRICNT 8
|
2018-03-05 23:04:02 +03:00
|
|
|
#define LPFC_NVME_FB_SHIFT 9
|
|
|
|
#define LPFC_NVME_MAX_FB (1 << 20) /* 1M */
|
2017-11-21 03:00:40 +03:00
|
|
|
|
2021-07-07 21:43:46 +03:00
|
|
|
#define lpfc_ndlp_get_nrport(ndlp) \
|
|
|
|
((!ndlp->nrport || (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT))\
|
2018-04-10 00:24:27 +03:00
|
|
|
? NULL : ndlp->nrport)
|
|
|
|
|
2017-02-13 00:52:32 +03:00
|
|
|
struct lpfc_nvme_qhandle {
|
|
|
|
uint32_t index; /* WQ index to use */
|
|
|
|
uint32_t qidx; /* queue index passed to create */
|
|
|
|
uint32_t cpu_id; /* current cpu id at time of create */
|
|
|
|
};
|
|
|
|
|
2017-02-13 00:52:30 +03:00
|
|
|
/* Declare nvme-based local and remote port definitions. */
|
|
|
|
struct lpfc_nvme_lport {
|
|
|
|
struct lpfc_vport *vport;
|
2019-01-17 19:14:44 +03:00
|
|
|
struct completion *lport_unreg_cmp;
|
2017-12-09 04:18:10 +03:00
|
|
|
/* Add stats counters here */
|
2018-04-10 00:24:23 +03:00
|
|
|
atomic_t fc4NvmeLsRequests;
|
|
|
|
atomic_t fc4NvmeLsCmpls;
|
2017-12-09 04:18:10 +03:00
|
|
|
atomic_t xmt_fcp_noxri;
|
|
|
|
atomic_t xmt_fcp_bad_ndlp;
|
|
|
|
atomic_t xmt_fcp_qdepth;
|
|
|
|
atomic_t xmt_fcp_wqerr;
|
2018-05-05 06:37:56 +03:00
|
|
|
atomic_t xmt_fcp_err;
|
2017-12-09 04:18:10 +03:00
|
|
|
atomic_t xmt_fcp_abort;
|
|
|
|
atomic_t xmt_ls_abort;
|
|
|
|
atomic_t xmt_ls_err;
|
|
|
|
atomic_t cmpl_fcp_xb;
|
|
|
|
atomic_t cmpl_fcp_err;
|
|
|
|
atomic_t cmpl_ls_xb;
|
|
|
|
atomic_t cmpl_ls_err;
|
2017-02-13 00:52:30 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
struct lpfc_nvme_rport {
|
|
|
|
struct lpfc_nvme_lport *lport;
|
|
|
|
struct nvme_fc_remote_port *remoteport;
|
|
|
|
struct lpfc_nodelist *ndlp;
|
|
|
|
struct completion rport_unreg_done;
|
|
|
|
};
|
|
|
|
|
2017-04-22 03:49:08 +03:00
|
|
|
struct lpfc_nvme_fcpreq_priv {
|
scsi: lpfc: Adapt partitioned XRI lists to efficient sharing
The XRI get/put lists were partitioned per hardware queue. However, the
adapter rarely had sufficient resources to give a large number of resources
per queue. As such, it became common for a cpu to encounter a lack of XRI
resource and request the upper io stack to retry after returning a BUSY
condition. This occurred even though other cpus were idle and not using
their resources.
Create as efficient a scheme as possible to move resources to the cpus that
need them. Each cpu maintains a small private pool which it allocates from
for io. There is a watermark that the cpu attempts to keep in the private
pool. The private pool, when empty, pulls from a global pool from the
cpu. When the cpu's global pool is empty it will pull from other cpu's
global pool. As there many cpu global pools (1 per cpu or hardware queue
count) and as each cpu selects what cpu to pull from at different rates and
at different times, it creates a radomizing effect that minimizes the
number of cpu's that will contend with each other when the steal XRI's from
another cpu's global pool.
On io completion, a cpu will push the XRI back on to its private pool. A
watermark level is maintained for the private pool such that when it is
exceeded it will move XRI's to the CPU global pool so that other cpu's may
allocate them.
On NVME, as heartbeat commands are critical to get placed on the wire, a
single expedite pool is maintained. When a heartbeat is to be sent, it will
allocate an XRI from the expedite pool rather than the normal cpu
private/global pools. On any io completion, if a reduction in the expedite
pools is seen, it will be replenished before the XRI is placed on the cpu
private pool.
Statistics are added to aid understanding the XRI levels on each cpu and
their behaviors.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-01-28 22:14:28 +03:00
|
|
|
struct lpfc_io_buf *nvme_buf;
|
2017-04-22 03:49:08 +03:00
|
|
|
};
|
2020-03-31 19:50:02 +03:00
|
|
|
|
2020-03-31 19:50:06 +03:00
|
|
|
/*
|
|
|
|
* set NVME LS request timeouts to 30s. It is larger than the 2*R_A_TOV
|
|
|
|
* set by the spec, which appears to have issues with some devices.
|
|
|
|
*/
|
|
|
|
#define LPFC_NVME_LS_TIMEOUT 30
|
|
|
|
|
2020-03-31 19:50:02 +03:00
|
|
|
|
|
|
|
#define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */
|
|
|
|
#define LPFC_NVMET_RQE_MIN_POST 128
|
|
|
|
#define LPFC_NVMET_RQE_DEF_POST 512
|
|
|
|
#define LPFC_NVMET_RQE_DEF_COUNT 2048
|
|
|
|
#define LPFC_NVMET_SUCCESS_LEN 12
|
|
|
|
|
|
|
|
#define LPFC_NVMET_MRQ_AUTO 0
|
|
|
|
#define LPFC_NVMET_MRQ_MAX 16
|
|
|
|
|
|
|
|
#define LPFC_NVMET_WAIT_TMO (5 * MSEC_PER_SEC)
|
|
|
|
|
|
|
|
/* Used for NVME Target */
|
2020-03-31 19:50:10 +03:00
|
|
|
#define LPFC_NVMET_INV_HOST_ACTIVE 1
|
|
|
|
|
2020-03-31 19:50:02 +03:00
|
|
|
struct lpfc_nvmet_tgtport {
|
|
|
|
struct lpfc_hba *phba;
|
|
|
|
struct completion *tport_unreg_cmp;
|
2020-03-31 19:50:10 +03:00
|
|
|
atomic_t state; /* tracks nvmet hosthandle invalidation */
|
2020-03-31 19:50:02 +03:00
|
|
|
|
|
|
|
/* Stats counters - lpfc_nvmet_unsol_ls_buffer */
|
|
|
|
atomic_t rcv_ls_req_in;
|
|
|
|
atomic_t rcv_ls_req_out;
|
|
|
|
atomic_t rcv_ls_req_drop;
|
|
|
|
atomic_t xmt_ls_abort;
|
|
|
|
atomic_t xmt_ls_abort_cmpl;
|
|
|
|
|
|
|
|
/* Stats counters - lpfc_nvmet_xmt_ls_rsp */
|
|
|
|
atomic_t xmt_ls_rsp;
|
|
|
|
atomic_t xmt_ls_drop;
|
|
|
|
|
|
|
|
/* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */
|
|
|
|
atomic_t xmt_ls_rsp_error;
|
|
|
|
atomic_t xmt_ls_rsp_aborted;
|
|
|
|
atomic_t xmt_ls_rsp_xb_set;
|
|
|
|
atomic_t xmt_ls_rsp_cmpl;
|
|
|
|
|
|
|
|
/* Stats counters - lpfc_nvmet_unsol_fcp_buffer */
|
|
|
|
atomic_t rcv_fcp_cmd_in;
|
|
|
|
atomic_t rcv_fcp_cmd_out;
|
|
|
|
atomic_t rcv_fcp_cmd_drop;
|
|
|
|
atomic_t rcv_fcp_cmd_defer;
|
|
|
|
atomic_t xmt_fcp_release;
|
|
|
|
|
|
|
|
/* Stats counters - lpfc_nvmet_xmt_fcp_op */
|
|
|
|
atomic_t xmt_fcp_drop;
|
|
|
|
atomic_t xmt_fcp_read_rsp;
|
|
|
|
atomic_t xmt_fcp_read;
|
|
|
|
atomic_t xmt_fcp_write;
|
|
|
|
atomic_t xmt_fcp_rsp;
|
|
|
|
|
|
|
|
/* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */
|
|
|
|
atomic_t xmt_fcp_rsp_xb_set;
|
|
|
|
atomic_t xmt_fcp_rsp_cmpl;
|
|
|
|
atomic_t xmt_fcp_rsp_error;
|
|
|
|
atomic_t xmt_fcp_rsp_aborted;
|
|
|
|
atomic_t xmt_fcp_rsp_drop;
|
|
|
|
|
|
|
|
/* Stats counters - lpfc_nvmet_xmt_fcp_abort */
|
|
|
|
atomic_t xmt_fcp_xri_abort_cqe;
|
|
|
|
atomic_t xmt_fcp_abort;
|
|
|
|
atomic_t xmt_fcp_abort_cmpl;
|
|
|
|
atomic_t xmt_abort_sol;
|
|
|
|
atomic_t xmt_abort_unsol;
|
|
|
|
atomic_t xmt_abort_rsp;
|
|
|
|
atomic_t xmt_abort_rsp_error;
|
|
|
|
|
|
|
|
/* Stats counters - defer IO */
|
|
|
|
atomic_t defer_ctx;
|
|
|
|
atomic_t defer_fod;
|
|
|
|
atomic_t defer_wqfull;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct lpfc_nvmet_ctx_info {
|
|
|
|
struct list_head nvmet_ctx_list;
|
|
|
|
spinlock_t nvmet_ctx_list_lock; /* lock per CPU */
|
|
|
|
struct lpfc_nvmet_ctx_info *nvmet_ctx_next_cpu;
|
|
|
|
struct lpfc_nvmet_ctx_info *nvmet_ctx_start_cpu;
|
|
|
|
uint16_t nvmet_ctx_list_cnt;
|
|
|
|
char pad[16]; /* pad to a cache-line */
|
|
|
|
};
|
|
|
|
|
|
|
|
/* This retrieves the context info associated with the specified cpu / mrq */
|
|
|
|
#define lpfc_get_ctx_list(phba, cpu, mrq) \
|
|
|
|
(phba->sli4_hba.nvmet_ctx_info + ((cpu * phba->cfg_nvmet_mrq) + mrq))
|
|
|
|
|
2020-03-31 19:50:04 +03:00
|
|
|
/* Values for state field of struct lpfc_async_xchg_ctx */
|
|
|
|
#define LPFC_NVME_STE_LS_RCV 1
|
|
|
|
#define LPFC_NVME_STE_LS_ABORT 2
|
|
|
|
#define LPFC_NVME_STE_LS_RSP 3
|
|
|
|
#define LPFC_NVME_STE_RCV 4
|
|
|
|
#define LPFC_NVME_STE_DATA 5
|
|
|
|
#define LPFC_NVME_STE_ABORT 6
|
|
|
|
#define LPFC_NVME_STE_DONE 7
|
|
|
|
#define LPFC_NVME_STE_FREE 0xff
|
|
|
|
|
|
|
|
/* Values for flag field of struct lpfc_async_xchg_ctx */
|
|
|
|
#define LPFC_NVME_IO_INP 0x1 /* IO is in progress on exchange */
|
|
|
|
#define LPFC_NVME_ABORT_OP 0x2 /* Abort WQE issued on exchange */
|
|
|
|
#define LPFC_NVME_XBUSY 0x4 /* XB bit set on IO cmpl */
|
|
|
|
#define LPFC_NVME_CTX_RLS 0x8 /* ctx free requested */
|
|
|
|
#define LPFC_NVME_ABTS_RCV 0x10 /* ABTS received on exchange */
|
|
|
|
#define LPFC_NVME_CTX_REUSE_WQ 0x20 /* ctx reused via WQ */
|
|
|
|
#define LPFC_NVME_DEFER_WQFULL 0x40 /* Waiting on a free WQE */
|
|
|
|
#define LPFC_NVME_TNOTIFY 0x80 /* notify transport of abts */
|
|
|
|
|
2020-03-31 19:50:03 +03:00
|
|
|
struct lpfc_async_xchg_ctx {
|
2020-03-31 19:50:02 +03:00
|
|
|
union {
|
|
|
|
struct nvmefc_tgt_fcp_req fcp_req;
|
2020-03-31 19:50:03 +03:00
|
|
|
} hdlrctx;
|
2020-03-31 19:50:02 +03:00
|
|
|
struct list_head list;
|
|
|
|
struct lpfc_hba *phba;
|
2020-03-31 19:50:05 +03:00
|
|
|
struct lpfc_nodelist *ndlp;
|
2020-03-31 19:50:03 +03:00
|
|
|
struct nvmefc_ls_req *ls_req;
|
|
|
|
struct nvmefc_ls_rsp ls_rsp;
|
2020-03-31 19:50:02 +03:00
|
|
|
struct lpfc_iocbq *wqeq;
|
|
|
|
struct lpfc_iocbq *abort_wqeq;
|
|
|
|
spinlock_t ctxlock; /* protect flag access */
|
|
|
|
uint32_t sid;
|
|
|
|
uint32_t offset;
|
|
|
|
uint16_t oxid;
|
|
|
|
uint16_t size;
|
|
|
|
uint16_t entry_cnt;
|
|
|
|
uint16_t cpu;
|
|
|
|
uint16_t idx;
|
|
|
|
uint16_t state;
|
|
|
|
uint16_t flag;
|
2020-03-31 19:50:05 +03:00
|
|
|
void *payload;
|
2020-03-31 19:50:02 +03:00
|
|
|
struct rqb_dmabuf *rqb_buffer;
|
|
|
|
struct lpfc_nvmet_ctxbuf *ctxbuf;
|
|
|
|
struct lpfc_sli4_hdw_queue *hdwq;
|
|
|
|
|
|
|
|
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
|
|
|
uint64_t ts_isr_cmd;
|
|
|
|
uint64_t ts_cmd_nvme;
|
|
|
|
uint64_t ts_nvme_data;
|
|
|
|
uint64_t ts_data_wqput;
|
|
|
|
uint64_t ts_isr_data;
|
|
|
|
uint64_t ts_data_nvme;
|
|
|
|
uint64_t ts_nvme_status;
|
|
|
|
uint64_t ts_status_wqput;
|
|
|
|
uint64_t ts_isr_status;
|
|
|
|
uint64_t ts_status_nvme;
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/* routines found in lpfc_nvme.c */
|
2020-03-31 19:50:06 +03:00
|
|
|
int __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|
|
|
struct nvmefc_ls_req *pnvme_lsreq,
|
|
|
|
void (*gen_req_cmp)(struct lpfc_hba *phba,
|
|
|
|
struct lpfc_iocbq *cmdwqe,
|
|
|
|
struct lpfc_wcqe_complete *wcqe));
|
|
|
|
void __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
|
|
|
|
struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe);
|
2020-03-31 19:50:07 +03:00
|
|
|
int __lpfc_nvme_ls_abort(struct lpfc_vport *vport,
|
|
|
|
struct lpfc_nodelist *ndlp, struct nvmefc_ls_req *pnvme_lsreq);
|
2020-03-31 19:50:02 +03:00
|
|
|
|
|
|
|
/* routines found in lpfc_nvmet.c */
|
2020-03-31 19:50:05 +03:00
|
|
|
int lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
|
|
|
|
struct lpfc_async_xchg_ctx *ctxp, uint32_t sid,
|
|
|
|
uint16_t xri);
|
2020-03-31 19:50:08 +03:00
|
|
|
int __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
|
|
|
|
struct nvmefc_ls_rsp *ls_rsp,
|
|
|
|
void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
|
|
|
|
struct lpfc_iocbq *cmdwqe,
|
|
|
|
struct lpfc_wcqe_complete *wcqe));
|
|
|
|
void __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba,
|
|
|
|
struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe);
|