Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target fixes from Nicholas Bellinger: "There has been work in a number of different areas over the last weeks, including: - Fix target-core-user (TCMU) back-end bi-directional handling (Xiubo Li + Mike Christie + Ilias Tsitsimpis) - Fix iscsi-target TMR reference leak during session shutdown (Rob Millner + Chu Yuan Lin) - Fix target_core_fabric_configfs.c race between LUN shutdown + mapped LUN creation (James Shen) - Fix target-core unknown fabric callback queue-full errors (Potnuri Bharat Teja) - Fix iscsi-target + iser-target queue-full handling in order to support iw_cxgb4 RNICs. (Potnuri Bharat Teja + Sagi Grimberg) - Fix ALUA transition state race between multiple initiator (Mike Christie) - Drop work-around for legacy GlobalSAN initiator, to allow QLogic 57840S + 579xx offload HBAs to work out-of-the-box in MSFT environments. (Martin Svec + Arun Easi) Note that a number are CC'ed for stable, and although the queue-full bug-fixes required for iser-target to work with iw_cxgb4 aren't CC'ed here, they'll be posted to Greg-KH separately" * git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: tcmu: Skip Data-Out blocks before gathering Data-In buffer for BIDI case iscsi-target: Drop work-around for legacy GlobalSAN initiator target: Fix ALUA transition state race between multiple initiators iser-target: avoid posting a recv buffer twice iser-target: Fix queue-full response handling iscsi-target: Propigate queue_data_in + queue_status errors target: Fix unknown fabric callback queue-full errors tcmu: Fix wrongly calculating of the base_command_size tcmu: Fix possible overwrite of t_data_sg's last iov[] target: Avoid mappedlun symlink creation during lun shutdown iscsi-target: Fix TMR reference leak during session shutdown usb: gadget: Correct usb EP argument for BOT status request tcmu: Allow cmd_time_out to be set to zero (disabled)
This commit is contained in:
Коммит
025def92dd
|
@ -817,6 +817,7 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
|
|||
rx_wr->sg_list = &rx_desc->rx_sg;
|
||||
rx_wr->num_sge = 1;
|
||||
rx_wr->next = rx_wr + 1;
|
||||
rx_desc->in_use = false;
|
||||
}
|
||||
rx_wr--;
|
||||
rx_wr->next = NULL; /* mark end of work requests list */
|
||||
|
@ -835,6 +836,15 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
|
|||
struct ib_recv_wr *rx_wr_failed, rx_wr;
|
||||
int ret;
|
||||
|
||||
if (!rx_desc->in_use) {
|
||||
/*
|
||||
* if the descriptor is not in-use we already reposted it
|
||||
* for recv, so just silently return
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
rx_desc->in_use = false;
|
||||
rx_wr.wr_cqe = &rx_desc->rx_cqe;
|
||||
rx_wr.sg_list = &rx_desc->rx_sg;
|
||||
rx_wr.num_sge = 1;
|
||||
|
@ -1397,6 +1407,8 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
|
|||
return;
|
||||
}
|
||||
|
||||
rx_desc->in_use = true;
|
||||
|
||||
ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
|
||||
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
|
||||
|
||||
|
@ -1659,10 +1671,23 @@ isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
|
|||
ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr);
|
||||
isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
|
||||
|
||||
if (ret)
|
||||
transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0);
|
||||
else
|
||||
isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
|
||||
if (ret) {
|
||||
/*
|
||||
* transport_generic_request_failure() expects to have
|
||||
* plus two references to handle queue-full, so re-add
|
||||
* one here as target-core will have already dropped
|
||||
* it after the first isert_put_datain() callback.
|
||||
*/
|
||||
kref_get(&cmd->cmd_kref);
|
||||
transport_generic_request_failure(cmd, cmd->pi_err);
|
||||
} else {
|
||||
/*
|
||||
* XXX: isert_put_response() failure is not retried.
|
||||
*/
|
||||
ret = isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
|
||||
if (ret)
|
||||
pr_warn_ratelimited("isert_put_response() ret: %d\n", ret);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1699,13 +1724,15 @@ isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
|
|||
cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
|
||||
spin_unlock_bh(&cmd->istate_lock);
|
||||
|
||||
if (ret) {
|
||||
target_put_sess_cmd(se_cmd);
|
||||
transport_send_check_condition_and_sense(se_cmd,
|
||||
se_cmd->pi_err, 0);
|
||||
} else {
|
||||
/*
|
||||
* transport_generic_request_failure() will drop the extra
|
||||
* se_cmd->cmd_kref reference after T10-PI error, and handle
|
||||
* any non-zero ->queue_status() callback error retries.
|
||||
*/
|
||||
if (ret)
|
||||
transport_generic_request_failure(se_cmd, se_cmd->pi_err);
|
||||
else
|
||||
target_execute_cmd(se_cmd);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -2171,26 +2198,28 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
|||
chain_wr = &isert_cmd->tx_desc.send_wr;
|
||||
}
|
||||
|
||||
isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
|
||||
isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", isert_cmd);
|
||||
return 1;
|
||||
rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
|
||||
isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n",
|
||||
isert_cmd, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int
|
||||
isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
|
||||
{
|
||||
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
|
||||
int ret;
|
||||
|
||||
isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
|
||||
isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done);
|
||||
|
||||
isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
|
||||
isert_rdma_rw_ctx_post(isert_cmd, conn->context,
|
||||
&isert_cmd->tx_desc.tx_cqe, NULL);
|
||||
ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context,
|
||||
&isert_cmd->tx_desc.tx_cqe, NULL);
|
||||
|
||||
isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
|
||||
isert_cmd);
|
||||
return 0;
|
||||
isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n",
|
||||
isert_cmd, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -60,7 +60,7 @@
|
|||
|
||||
#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \
|
||||
(ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \
|
||||
sizeof(struct ib_cqe)))
|
||||
sizeof(struct ib_cqe) + sizeof(bool)))
|
||||
|
||||
#define ISCSI_ISER_SG_TABLESIZE 256
|
||||
|
||||
|
@ -85,6 +85,7 @@ struct iser_rx_desc {
|
|||
u64 dma_addr;
|
||||
struct ib_sge rx_sg;
|
||||
struct ib_cqe rx_cqe;
|
||||
bool in_use;
|
||||
char pad[ISER_RX_PAD_SIZE];
|
||||
} __packed;
|
||||
|
||||
|
|
|
@ -485,8 +485,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *);
|
|||
|
||||
int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
|
||||
{
|
||||
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
|
||||
return 0;
|
||||
return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
|
||||
}
|
||||
EXPORT_SYMBOL(iscsit_queue_rsp);
|
||||
|
||||
|
|
|
@ -1398,11 +1398,10 @@ static u32 lio_sess_get_initiator_sid(
|
|||
static int lio_queue_data_in(struct se_cmd *se_cmd)
|
||||
{
|
||||
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
|
||||
struct iscsi_conn *conn = cmd->conn;
|
||||
|
||||
cmd->i_state = ISTATE_SEND_DATAIN;
|
||||
cmd->conn->conn_transport->iscsit_queue_data_in(cmd->conn, cmd);
|
||||
|
||||
return 0;
|
||||
return conn->conn_transport->iscsit_queue_data_in(conn, cmd);
|
||||
}
|
||||
|
||||
static int lio_write_pending(struct se_cmd *se_cmd)
|
||||
|
@ -1431,16 +1430,14 @@ static int lio_write_pending_status(struct se_cmd *se_cmd)
|
|||
static int lio_queue_status(struct se_cmd *se_cmd)
|
||||
{
|
||||
struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
|
||||
struct iscsi_conn *conn = cmd->conn;
|
||||
|
||||
cmd->i_state = ISTATE_SEND_STATUS;
|
||||
|
||||
if (cmd->se_cmd.scsi_status || cmd->sense_reason) {
|
||||
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
|
||||
return 0;
|
||||
return iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
|
||||
}
|
||||
cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd);
|
||||
|
||||
return 0;
|
||||
return conn->conn_transport->iscsit_queue_status(conn, cmd);
|
||||
}
|
||||
|
||||
static void lio_queue_tm_rsp(struct se_cmd *se_cmd)
|
||||
|
|
|
@ -781,22 +781,6 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
|
|||
} else if (IS_TYPE_NUMBER(param)) {
|
||||
if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
|
||||
SET_PSTATE_REPLY_OPTIONAL(param);
|
||||
/*
|
||||
* The GlobalSAN iSCSI Initiator for MacOSX does
|
||||
* not respond to MaxBurstLength, FirstBurstLength,
|
||||
* DefaultTime2Wait or DefaultTime2Retain parameter keys.
|
||||
* So, we set them to 'reply optional' here, and assume the
|
||||
* the defaults from iscsi_parameters.h if the initiator
|
||||
* is not RFC compliant and the keys are not negotiated.
|
||||
*/
|
||||
if (!strcmp(param->name, MAXBURSTLENGTH))
|
||||
SET_PSTATE_REPLY_OPTIONAL(param);
|
||||
if (!strcmp(param->name, FIRSTBURSTLENGTH))
|
||||
SET_PSTATE_REPLY_OPTIONAL(param);
|
||||
if (!strcmp(param->name, DEFAULTTIME2WAIT))
|
||||
SET_PSTATE_REPLY_OPTIONAL(param);
|
||||
if (!strcmp(param->name, DEFAULTTIME2RETAIN))
|
||||
SET_PSTATE_REPLY_OPTIONAL(param);
|
||||
/*
|
||||
* Required for gPXE iSCSI boot client
|
||||
*/
|
||||
|
|
|
@ -567,7 +567,7 @@ static void iscsit_remove_cmd_from_immediate_queue(
|
|||
}
|
||||
}
|
||||
|
||||
void iscsit_add_cmd_to_response_queue(
|
||||
int iscsit_add_cmd_to_response_queue(
|
||||
struct iscsi_cmd *cmd,
|
||||
struct iscsi_conn *conn,
|
||||
u8 state)
|
||||
|
@ -578,7 +578,7 @@ void iscsit_add_cmd_to_response_queue(
|
|||
if (!qr) {
|
||||
pr_err("Unable to allocate memory for"
|
||||
" struct iscsi_queue_req\n");
|
||||
return;
|
||||
return -ENOMEM;
|
||||
}
|
||||
INIT_LIST_HEAD(&qr->qr_list);
|
||||
qr->cmd = cmd;
|
||||
|
@ -590,6 +590,7 @@ void iscsit_add_cmd_to_response_queue(
|
|||
spin_unlock_bh(&conn->response_queue_lock);
|
||||
|
||||
wake_up(&conn->queues_wq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
|
||||
|
@ -737,21 +738,23 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
|
|||
{
|
||||
struct se_cmd *se_cmd = NULL;
|
||||
int rc;
|
||||
bool op_scsi = false;
|
||||
/*
|
||||
* Determine if a struct se_cmd is associated with
|
||||
* this struct iscsi_cmd.
|
||||
*/
|
||||
switch (cmd->iscsi_opcode) {
|
||||
case ISCSI_OP_SCSI_CMD:
|
||||
se_cmd = &cmd->se_cmd;
|
||||
__iscsit_free_cmd(cmd, true, shutdown);
|
||||
op_scsi = true;
|
||||
/*
|
||||
* Fallthrough
|
||||
*/
|
||||
case ISCSI_OP_SCSI_TMFUNC:
|
||||
rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
|
||||
if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
|
||||
__iscsit_free_cmd(cmd, true, shutdown);
|
||||
se_cmd = &cmd->se_cmd;
|
||||
__iscsit_free_cmd(cmd, op_scsi, shutdown);
|
||||
rc = transport_generic_free_cmd(se_cmd, shutdown);
|
||||
if (!rc && shutdown && se_cmd->se_sess) {
|
||||
__iscsit_free_cmd(cmd, op_scsi, shutdown);
|
||||
target_put_sess_cmd(se_cmd);
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -31,7 +31,7 @@ extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd
|
|||
struct iscsi_conn_recovery **, itt_t);
|
||||
extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
|
||||
extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *);
|
||||
extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
|
||||
extern int iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
|
||||
extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
|
||||
extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
|
||||
extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
|
||||
|
|
|
@ -197,8 +197,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
|
|||
/*
|
||||
* Set the ASYMMETRIC ACCESS State
|
||||
*/
|
||||
buf[off++] |= (atomic_read(
|
||||
&tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
|
||||
buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff;
|
||||
/*
|
||||
* Set supported ASYMMETRIC ACCESS State bits
|
||||
*/
|
||||
|
@ -710,7 +709,7 @@ target_alua_state_check(struct se_cmd *cmd)
|
|||
|
||||
spin_lock(&lun->lun_tg_pt_gp_lock);
|
||||
tg_pt_gp = lun->lun_tg_pt_gp;
|
||||
out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
|
||||
out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
|
||||
nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
|
||||
|
||||
// XXX: keeps using tg_pt_gp witout reference after unlock
|
||||
|
@ -911,7 +910,7 @@ static int core_alua_write_tpg_metadata(
|
|||
}
|
||||
|
||||
/*
|
||||
* Called with tg_pt_gp->tg_pt_gp_md_mutex held
|
||||
* Called with tg_pt_gp->tg_pt_gp_transition_mutex held
|
||||
*/
|
||||
static int core_alua_update_tpg_primary_metadata(
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp)
|
||||
|
@ -934,7 +933,7 @@ static int core_alua_update_tpg_primary_metadata(
|
|||
"alua_access_state=0x%02x\n"
|
||||
"alua_access_status=0x%02x\n",
|
||||
tg_pt_gp->tg_pt_gp_id,
|
||||
tg_pt_gp->tg_pt_gp_alua_pending_state,
|
||||
tg_pt_gp->tg_pt_gp_alua_access_state,
|
||||
tg_pt_gp->tg_pt_gp_alua_access_status);
|
||||
|
||||
snprintf(path, ALUA_METADATA_PATH_LEN,
|
||||
|
@ -1013,13 +1012,52 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
|
|||
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
|
||||
}
|
||||
|
||||
static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
|
||||
static int core_alua_do_transition_tg_pt(
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp,
|
||||
int new_state,
|
||||
int explicit)
|
||||
{
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
|
||||
struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work);
|
||||
struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
|
||||
bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
|
||||
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
|
||||
int prev_state;
|
||||
|
||||
mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex);
|
||||
/* Nothing to be done here */
|
||||
if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) {
|
||||
mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) {
|
||||
mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
/*
|
||||
* Save the old primary ALUA access state, and set the current state
|
||||
* to ALUA_ACCESS_STATE_TRANSITION.
|
||||
*/
|
||||
prev_state = tg_pt_gp->tg_pt_gp_alua_access_state;
|
||||
tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION;
|
||||
tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
|
||||
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
|
||||
ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
|
||||
|
||||
core_alua_queue_state_change_ua(tg_pt_gp);
|
||||
|
||||
if (new_state == ALUA_ACCESS_STATE_TRANSITION) {
|
||||
mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for the optional ALUA primary state transition delay
|
||||
*/
|
||||
if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
|
||||
msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
|
||||
|
||||
/*
|
||||
* Set the current primary ALUA access state to the requested new state
|
||||
*/
|
||||
tg_pt_gp->tg_pt_gp_alua_access_state = new_state;
|
||||
|
||||
/*
|
||||
* Update the ALUA metadata buf that has been allocated in
|
||||
|
@ -1034,93 +1072,19 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
|
|||
* struct file does NOT affect the actual ALUA transition.
|
||||
*/
|
||||
if (tg_pt_gp->tg_pt_gp_write_metadata) {
|
||||
mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
|
||||
core_alua_update_tpg_primary_metadata(tg_pt_gp);
|
||||
mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
|
||||
}
|
||||
/*
|
||||
* Set the current primary ALUA access state to the requested new state
|
||||
*/
|
||||
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
|
||||
tg_pt_gp->tg_pt_gp_alua_pending_state);
|
||||
|
||||
pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
|
||||
" from primary access state %s to %s\n", (explicit) ? "explicit" :
|
||||
"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
|
||||
tg_pt_gp->tg_pt_gp_id,
|
||||
core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
|
||||
core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
|
||||
core_alua_dump_state(prev_state),
|
||||
core_alua_dump_state(new_state));
|
||||
|
||||
core_alua_queue_state_change_ua(tg_pt_gp);
|
||||
|
||||
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
|
||||
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
|
||||
if (tg_pt_gp->tg_pt_gp_transition_complete)
|
||||
complete(tg_pt_gp->tg_pt_gp_transition_complete);
|
||||
}
|
||||
|
||||
static int core_alua_do_transition_tg_pt(
|
||||
struct t10_alua_tg_pt_gp *tg_pt_gp,
|
||||
int new_state,
|
||||
int explicit)
|
||||
{
|
||||
struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
|
||||
DECLARE_COMPLETION_ONSTACK(wait);
|
||||
|
||||
/* Nothing to be done here */
|
||||
if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
|
||||
return 0;
|
||||
|
||||
if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION)
|
||||
return -EAGAIN;
|
||||
|
||||
/*
|
||||
* Flush any pending transitions
|
||||
*/
|
||||
if (!explicit)
|
||||
flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
|
||||
|
||||
/*
|
||||
* Save the old primary ALUA access state, and set the current state
|
||||
* to ALUA_ACCESS_STATE_TRANSITION.
|
||||
*/
|
||||
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
|
||||
ALUA_ACCESS_STATE_TRANSITION);
|
||||
tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
|
||||
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
|
||||
ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
|
||||
|
||||
core_alua_queue_state_change_ua(tg_pt_gp);
|
||||
|
||||
if (new_state == ALUA_ACCESS_STATE_TRANSITION)
|
||||
return 0;
|
||||
|
||||
tg_pt_gp->tg_pt_gp_alua_previous_state =
|
||||
atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
|
||||
tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
|
||||
|
||||
/*
|
||||
* Check for the optional ALUA primary state transition delay
|
||||
*/
|
||||
if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
|
||||
msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
|
||||
|
||||
/*
|
||||
* Take a reference for workqueue item
|
||||
*/
|
||||
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
|
||||
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
|
||||
schedule_work(&tg_pt_gp->tg_pt_gp_transition_work);
|
||||
if (explicit) {
|
||||
tg_pt_gp->tg_pt_gp_transition_complete = &wait;
|
||||
wait_for_completion(&wait);
|
||||
tg_pt_gp->tg_pt_gp_transition_complete = NULL;
|
||||
}
|
||||
|
||||
mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1685,14 +1649,12 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
|
|||
}
|
||||
INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
|
||||
INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
|
||||
mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
|
||||
mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex);
|
||||
spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
|
||||
atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
|
||||
INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
|
||||
core_alua_do_transition_tg_pt_work);
|
||||
tg_pt_gp->tg_pt_gp_dev = dev;
|
||||
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
|
||||
ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
|
||||
tg_pt_gp->tg_pt_gp_alua_access_state =
|
||||
ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
|
||||
/*
|
||||
* Enable both explicit and implicit ALUA support by default
|
||||
*/
|
||||
|
@ -1797,8 +1759,6 @@ void core_alua_free_tg_pt_gp(
|
|||
dev->t10_alua.alua_tg_pt_gps_counter--;
|
||||
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
||||
|
||||
flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
|
||||
|
||||
/*
|
||||
* Allow a struct t10_alua_tg_pt_gp_member * referenced by
|
||||
* core_alua_get_tg_pt_gp_by_name() in
|
||||
|
@ -1938,8 +1898,8 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
|
|||
"Primary Access Status: %s\nTG Port Secondary Access"
|
||||
" State: %s\nTG Port Secondary Access Status: %s\n",
|
||||
config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
|
||||
core_alua_dump_state(atomic_read(
|
||||
&tg_pt_gp->tg_pt_gp_alua_access_state)),
|
||||
core_alua_dump_state(
|
||||
tg_pt_gp->tg_pt_gp_alua_access_state),
|
||||
core_alua_dump_status(
|
||||
tg_pt_gp->tg_pt_gp_alua_access_status),
|
||||
atomic_read(&lun->lun_tg_pt_secondary_offline) ?
|
||||
|
|
|
@ -2392,7 +2392,7 @@ static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item,
|
|||
char *page)
|
||||
{
|
||||
return sprintf(page, "%d\n",
|
||||
atomic_read(&to_tg_pt_gp(item)->tg_pt_gp_alua_access_state));
|
||||
to_tg_pt_gp(item)->tg_pt_gp_alua_access_state);
|
||||
}
|
||||
|
||||
static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
|
||||
|
|
|
@ -92,6 +92,11 @@ static int target_fabric_mappedlun_link(
|
|||
pr_err("Source se_lun->lun_se_dev does not exist\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (lun->lun_shutdown) {
|
||||
pr_err("Unable to create mappedlun symlink because"
|
||||
" lun->lun_shutdown=true\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
se_tpg = lun->lun_tpg;
|
||||
|
||||
nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
|
||||
|
|
|
@ -642,6 +642,8 @@ void core_tpg_remove_lun(
|
|||
*/
|
||||
struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
|
||||
|
||||
lun->lun_shutdown = true;
|
||||
|
||||
core_clear_lun_from_tpg(lun, tpg);
|
||||
/*
|
||||
* Wait for any active I/O references to percpu se_lun->lun_ref to
|
||||
|
@ -663,6 +665,8 @@ void core_tpg_remove_lun(
|
|||
}
|
||||
if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
|
||||
hlist_del_rcu(&lun->link);
|
||||
|
||||
lun->lun_shutdown = false;
|
||||
mutex_unlock(&tpg->tpg_lun_mutex);
|
||||
|
||||
percpu_ref_exit(&lun->lun_ref);
|
||||
|
|
|
@ -64,8 +64,9 @@ struct kmem_cache *t10_alua_lba_map_cache;
|
|||
struct kmem_cache *t10_alua_lba_map_mem_cache;
|
||||
|
||||
static void transport_complete_task_attr(struct se_cmd *cmd);
|
||||
static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
|
||||
static void transport_handle_queue_full(struct se_cmd *cmd,
|
||||
struct se_device *dev);
|
||||
struct se_device *dev, int err, bool write_pending);
|
||||
static int transport_put_cmd(struct se_cmd *cmd);
|
||||
static void target_complete_ok_work(struct work_struct *work);
|
||||
|
||||
|
@ -804,7 +805,8 @@ void target_qf_do_work(struct work_struct *work)
|
|||
|
||||
if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
|
||||
transport_write_pending_qf(cmd);
|
||||
else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK)
|
||||
else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
|
||||
cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
|
||||
transport_complete_qf(cmd);
|
||||
}
|
||||
}
|
||||
|
@ -1719,7 +1721,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
|
|||
}
|
||||
trace_target_cmd_complete(cmd);
|
||||
ret = cmd->se_tfo->queue_status(cmd);
|
||||
if (ret == -EAGAIN || ret == -ENOMEM)
|
||||
if (ret)
|
||||
goto queue_full;
|
||||
goto check_stop;
|
||||
default:
|
||||
|
@ -1730,7 +1732,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
|
|||
}
|
||||
|
||||
ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
|
||||
if (ret == -EAGAIN || ret == -ENOMEM)
|
||||
if (ret)
|
||||
goto queue_full;
|
||||
|
||||
check_stop:
|
||||
|
@ -1739,8 +1741,7 @@ check_stop:
|
|||
return;
|
||||
|
||||
queue_full:
|
||||
cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
|
||||
transport_handle_queue_full(cmd, cmd->se_dev);
|
||||
transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
|
||||
}
|
||||
EXPORT_SYMBOL(transport_generic_request_failure);
|
||||
|
||||
|
@ -1977,13 +1978,29 @@ static void transport_complete_qf(struct se_cmd *cmd)
|
|||
int ret = 0;
|
||||
|
||||
transport_complete_task_attr(cmd);
|
||||
/*
|
||||
* If a fabric driver ->write_pending() or ->queue_data_in() callback
|
||||
* has returned neither -ENOMEM or -EAGAIN, assume it's fatal and
|
||||
* the same callbacks should not be retried. Return CHECK_CONDITION
|
||||
* if a scsi_status is not already set.
|
||||
*
|
||||
* If a fabric driver ->queue_status() has returned non zero, always
|
||||
* keep retrying no matter what..
|
||||
*/
|
||||
if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
|
||||
if (cmd->scsi_status)
|
||||
goto queue_status;
|
||||
|
||||
if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
|
||||
trace_target_cmd_complete(cmd);
|
||||
ret = cmd->se_tfo->queue_status(cmd);
|
||||
goto out;
|
||||
cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
|
||||
cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
|
||||
cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
|
||||
translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
|
||||
goto queue_status;
|
||||
}
|
||||
|
||||
if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
|
||||
goto queue_status;
|
||||
|
||||
switch (cmd->data_direction) {
|
||||
case DMA_FROM_DEVICE:
|
||||
if (cmd->scsi_status)
|
||||
|
@ -2007,19 +2024,33 @@ queue_status:
|
|||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
if (ret < 0) {
|
||||
transport_handle_queue_full(cmd, cmd->se_dev);
|
||||
transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
|
||||
return;
|
||||
}
|
||||
transport_lun_remove_cmd(cmd);
|
||||
transport_cmd_check_stop_to_fabric(cmd);
|
||||
}
|
||||
|
||||
static void transport_handle_queue_full(
|
||||
struct se_cmd *cmd,
|
||||
struct se_device *dev)
|
||||
static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
|
||||
int err, bool write_pending)
|
||||
{
|
||||
/*
|
||||
* -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or
|
||||
* ->queue_data_in() callbacks from new process context.
|
||||
*
|
||||
* Otherwise for other errors, transport_complete_qf() will send
|
||||
* CHECK_CONDITION via ->queue_status() instead of attempting to
|
||||
* retry associated fabric driver data-transfer callbacks.
|
||||
*/
|
||||
if (err == -EAGAIN || err == -ENOMEM) {
|
||||
cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
|
||||
TRANSPORT_COMPLETE_QF_OK;
|
||||
} else {
|
||||
pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
|
||||
cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
|
||||
}
|
||||
|
||||
spin_lock_irq(&dev->qf_cmd_lock);
|
||||
list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
|
||||
atomic_inc_mb(&dev->dev_qf_count);
|
||||
|
@ -2083,7 +2114,7 @@ static void target_complete_ok_work(struct work_struct *work)
|
|||
WARN_ON(!cmd->scsi_status);
|
||||
ret = transport_send_check_condition_and_sense(
|
||||
cmd, 0, 1);
|
||||
if (ret == -EAGAIN || ret == -ENOMEM)
|
||||
if (ret)
|
||||
goto queue_full;
|
||||
|
||||
transport_lun_remove_cmd(cmd);
|
||||
|
@ -2109,7 +2140,7 @@ static void target_complete_ok_work(struct work_struct *work)
|
|||
} else if (rc) {
|
||||
ret = transport_send_check_condition_and_sense(cmd,
|
||||
rc, 0);
|
||||
if (ret == -EAGAIN || ret == -ENOMEM)
|
||||
if (ret)
|
||||
goto queue_full;
|
||||
|
||||
transport_lun_remove_cmd(cmd);
|
||||
|
@ -2134,7 +2165,7 @@ queue_rsp:
|
|||
if (target_read_prot_action(cmd)) {
|
||||
ret = transport_send_check_condition_and_sense(cmd,
|
||||
cmd->pi_err, 0);
|
||||
if (ret == -EAGAIN || ret == -ENOMEM)
|
||||
if (ret)
|
||||
goto queue_full;
|
||||
|
||||
transport_lun_remove_cmd(cmd);
|
||||
|
@ -2144,7 +2175,7 @@ queue_rsp:
|
|||
|
||||
trace_target_cmd_complete(cmd);
|
||||
ret = cmd->se_tfo->queue_data_in(cmd);
|
||||
if (ret == -EAGAIN || ret == -ENOMEM)
|
||||
if (ret)
|
||||
goto queue_full;
|
||||
break;
|
||||
case DMA_TO_DEVICE:
|
||||
|
@ -2157,7 +2188,7 @@ queue_rsp:
|
|||
atomic_long_add(cmd->data_length,
|
||||
&cmd->se_lun->lun_stats.tx_data_octets);
|
||||
ret = cmd->se_tfo->queue_data_in(cmd);
|
||||
if (ret == -EAGAIN || ret == -ENOMEM)
|
||||
if (ret)
|
||||
goto queue_full;
|
||||
break;
|
||||
}
|
||||
|
@ -2166,7 +2197,7 @@ queue_rsp:
|
|||
queue_status:
|
||||
trace_target_cmd_complete(cmd);
|
||||
ret = cmd->se_tfo->queue_status(cmd);
|
||||
if (ret == -EAGAIN || ret == -ENOMEM)
|
||||
if (ret)
|
||||
goto queue_full;
|
||||
break;
|
||||
default:
|
||||
|
@ -2180,8 +2211,8 @@ queue_status:
|
|||
queue_full:
|
||||
pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
|
||||
" data_direction: %d\n", cmd, cmd->data_direction);
|
||||
cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
|
||||
transport_handle_queue_full(cmd, cmd->se_dev);
|
||||
|
||||
transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
|
||||
}
|
||||
|
||||
void target_free_sgl(struct scatterlist *sgl, int nents)
|
||||
|
@ -2449,18 +2480,14 @@ transport_generic_new_cmd(struct se_cmd *cmd)
|
|||
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
||||
|
||||
ret = cmd->se_tfo->write_pending(cmd);
|
||||
if (ret == -EAGAIN || ret == -ENOMEM)
|
||||
if (ret)
|
||||
goto queue_full;
|
||||
|
||||
/* fabric drivers should only return -EAGAIN or -ENOMEM as error */
|
||||
WARN_ON(ret);
|
||||
|
||||
return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
return 0;
|
||||
|
||||
queue_full:
|
||||
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
|
||||
cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
|
||||
transport_handle_queue_full(cmd, cmd->se_dev);
|
||||
transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(transport_generic_new_cmd);
|
||||
|
@ -2470,10 +2497,10 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
|
|||
int ret;
|
||||
|
||||
ret = cmd->se_tfo->write_pending(cmd);
|
||||
if (ret == -EAGAIN || ret == -ENOMEM) {
|
||||
if (ret) {
|
||||
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
|
||||
cmd);
|
||||
transport_handle_queue_full(cmd, cmd->se_dev);
|
||||
transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3011,6 +3038,8 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
|
|||
__releases(&cmd->t_state_lock)
|
||||
__acquires(&cmd->t_state_lock)
|
||||
{
|
||||
int ret;
|
||||
|
||||
assert_spin_locked(&cmd->t_state_lock);
|
||||
WARN_ON_ONCE(!irqs_disabled());
|
||||
|
||||
|
@ -3034,7 +3063,9 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
|
|||
trace_target_cmd_complete(cmd);
|
||||
|
||||
spin_unlock_irq(&cmd->t_state_lock);
|
||||
cmd->se_tfo->queue_status(cmd);
|
||||
ret = cmd->se_tfo->queue_status(cmd);
|
||||
if (ret)
|
||||
transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
|
||||
spin_lock_irq(&cmd->t_state_lock);
|
||||
|
||||
return 1;
|
||||
|
@ -3055,6 +3086,7 @@ EXPORT_SYMBOL(transport_check_aborted_status);
|
|||
void transport_send_task_abort(struct se_cmd *cmd)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&cmd->t_state_lock, flags);
|
||||
if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
|
||||
|
@ -3090,7 +3122,9 @@ send_abort:
|
|||
cmd->t_task_cdb[0], cmd->tag);
|
||||
|
||||
trace_target_cmd_complete(cmd);
|
||||
cmd->se_tfo->queue_status(cmd);
|
||||
ret = cmd->se_tfo->queue_status(cmd);
|
||||
if (ret)
|
||||
transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
|
||||
}
|
||||
|
||||
static void target_tmr_work(struct work_struct *work)
|
||||
|
|
|
@ -311,24 +311,50 @@ static void free_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd)
|
|||
DATA_BLOCK_BITS);
|
||||
}
|
||||
|
||||
static void gather_data_area(struct tcmu_dev *udev, unsigned long *cmd_bitmap,
|
||||
struct scatterlist *data_sg, unsigned int data_nents)
|
||||
static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
|
||||
bool bidi)
|
||||
{
|
||||
struct se_cmd *se_cmd = cmd->se_cmd;
|
||||
int i, block;
|
||||
int block_remaining = 0;
|
||||
void *from, *to;
|
||||
size_t copy_bytes, from_offset;
|
||||
struct scatterlist *sg;
|
||||
struct scatterlist *sg, *data_sg;
|
||||
unsigned int data_nents;
|
||||
DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
|
||||
|
||||
bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
|
||||
|
||||
if (!bidi) {
|
||||
data_sg = se_cmd->t_data_sg;
|
||||
data_nents = se_cmd->t_data_nents;
|
||||
} else {
|
||||
uint32_t count;
|
||||
|
||||
/*
|
||||
* For bidi case, the first count blocks are for Data-Out
|
||||
* buffer blocks, and before gathering the Data-In buffer
|
||||
* the Data-Out buffer blocks should be discarded.
|
||||
*/
|
||||
count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
|
||||
while (count--) {
|
||||
block = find_first_bit(bitmap, DATA_BLOCK_BITS);
|
||||
clear_bit(block, bitmap);
|
||||
}
|
||||
|
||||
data_sg = se_cmd->t_bidi_data_sg;
|
||||
data_nents = se_cmd->t_bidi_data_nents;
|
||||
}
|
||||
|
||||
for_each_sg(data_sg, sg, data_nents, i) {
|
||||
int sg_remaining = sg->length;
|
||||
to = kmap_atomic(sg_page(sg)) + sg->offset;
|
||||
while (sg_remaining > 0) {
|
||||
if (block_remaining == 0) {
|
||||
block = find_first_bit(cmd_bitmap,
|
||||
block = find_first_bit(bitmap,
|
||||
DATA_BLOCK_BITS);
|
||||
block_remaining = DATA_BLOCK_SIZE;
|
||||
clear_bit(block, cmd_bitmap);
|
||||
clear_bit(block, bitmap);
|
||||
}
|
||||
copy_bytes = min_t(size_t, sg_remaining,
|
||||
block_remaining);
|
||||
|
@ -394,6 +420,27 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d
|
|||
return true;
|
||||
}
|
||||
|
||||
static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
|
||||
{
|
||||
struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
|
||||
size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
|
||||
|
||||
if (se_cmd->se_cmd_flags & SCF_BIDI) {
|
||||
BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
|
||||
data_length += round_up(se_cmd->t_bidi_data_sg->length,
|
||||
DATA_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
return data_length;
|
||||
}
|
||||
|
||||
static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
|
||||
{
|
||||
size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
|
||||
|
||||
return data_length / DATA_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
static sense_reason_t
|
||||
tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
|
||||
{
|
||||
|
@ -407,7 +454,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
|
|||
uint32_t cmd_head;
|
||||
uint64_t cdb_off;
|
||||
bool copy_to_data_area;
|
||||
size_t data_length;
|
||||
size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
|
||||
DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
|
||||
|
||||
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
|
||||
|
@ -421,8 +468,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
|
|||
* expensive to tell how many regions are freed in the bitmap
|
||||
*/
|
||||
base_command_size = max(offsetof(struct tcmu_cmd_entry,
|
||||
req.iov[se_cmd->t_bidi_data_nents +
|
||||
se_cmd->t_data_nents]),
|
||||
req.iov[tcmu_cmd_get_block_cnt(tcmu_cmd)]),
|
||||
sizeof(struct tcmu_cmd_entry));
|
||||
command_size = base_command_size
|
||||
+ round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
|
||||
|
@ -433,11 +479,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
|
|||
|
||||
mb = udev->mb_addr;
|
||||
cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
|
||||
data_length = se_cmd->data_length;
|
||||
if (se_cmd->se_cmd_flags & SCF_BIDI) {
|
||||
BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
|
||||
data_length += se_cmd->t_bidi_data_sg->length;
|
||||
}
|
||||
if ((command_size > (udev->cmdr_size / 2)) ||
|
||||
data_length > udev->data_size) {
|
||||
pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
|
||||
|
@ -511,11 +552,14 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
|
|||
entry->req.iov_dif_cnt = 0;
|
||||
|
||||
/* Handle BIDI commands */
|
||||
iov_cnt = 0;
|
||||
alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
|
||||
se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false);
|
||||
entry->req.iov_bidi_cnt = iov_cnt;
|
||||
|
||||
if (se_cmd->se_cmd_flags & SCF_BIDI) {
|
||||
iov_cnt = 0;
|
||||
iov++;
|
||||
alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
|
||||
se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
|
||||
false);
|
||||
entry->req.iov_bidi_cnt = iov_cnt;
|
||||
}
|
||||
/* cmd's data_bitmap is what changed in process */
|
||||
bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap,
|
||||
DATA_BLOCK_BITS);
|
||||
|
@ -592,19 +636,11 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
|
|||
se_cmd->scsi_sense_length);
|
||||
free_data_area(udev, cmd);
|
||||
} else if (se_cmd->se_cmd_flags & SCF_BIDI) {
|
||||
DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
|
||||
|
||||
/* Get Data-In buffer before clean up */
|
||||
bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
|
||||
gather_data_area(udev, bitmap,
|
||||
se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
|
||||
gather_data_area(udev, cmd, true);
|
||||
free_data_area(udev, cmd);
|
||||
} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
|
||||
DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
|
||||
|
||||
bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
|
||||
gather_data_area(udev, bitmap,
|
||||
se_cmd->t_data_sg, se_cmd->t_data_nents);
|
||||
gather_data_area(udev, cmd, false);
|
||||
free_data_area(udev, cmd);
|
||||
} else if (se_cmd->data_direction == DMA_TO_DEVICE) {
|
||||
free_data_area(udev, cmd);
|
||||
|
@ -1196,11 +1232,6 @@ static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *pag
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (!val) {
|
||||
pr_err("Illegal value for cmd_time_out\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
udev->cmd_time_out = val * MSEC_PER_SEC;
|
||||
return count;
|
||||
}
|
||||
|
|
|
@ -373,7 +373,7 @@ static void bot_cleanup_old_alt(struct f_uas *fu)
|
|||
usb_ep_free_request(fu->ep_in, fu->bot_req_in);
|
||||
usb_ep_free_request(fu->ep_out, fu->bot_req_out);
|
||||
usb_ep_free_request(fu->ep_out, fu->cmd.req);
|
||||
usb_ep_free_request(fu->ep_out, fu->bot_status.req);
|
||||
usb_ep_free_request(fu->ep_in, fu->bot_status.req);
|
||||
|
||||
kfree(fu->cmd.buf);
|
||||
|
||||
|
|
|
@ -117,6 +117,7 @@ enum transport_state_table {
|
|||
TRANSPORT_ISTATE_PROCESSING = 11,
|
||||
TRANSPORT_COMPLETE_QF_WP = 18,
|
||||
TRANSPORT_COMPLETE_QF_OK = 19,
|
||||
TRANSPORT_COMPLETE_QF_ERR = 20,
|
||||
};
|
||||
|
||||
/* Used for struct se_cmd->se_cmd_flags */
|
||||
|
@ -279,8 +280,6 @@ struct t10_alua_tg_pt_gp {
|
|||
u16 tg_pt_gp_id;
|
||||
int tg_pt_gp_valid_id;
|
||||
int tg_pt_gp_alua_supported_states;
|
||||
int tg_pt_gp_alua_pending_state;
|
||||
int tg_pt_gp_alua_previous_state;
|
||||
int tg_pt_gp_alua_access_status;
|
||||
int tg_pt_gp_alua_access_type;
|
||||
int tg_pt_gp_nonop_delay_msecs;
|
||||
|
@ -289,18 +288,16 @@ struct t10_alua_tg_pt_gp {
|
|||
int tg_pt_gp_pref;
|
||||
int tg_pt_gp_write_metadata;
|
||||
u32 tg_pt_gp_members;
|
||||
atomic_t tg_pt_gp_alua_access_state;
|
||||
int tg_pt_gp_alua_access_state;
|
||||
atomic_t tg_pt_gp_ref_cnt;
|
||||
spinlock_t tg_pt_gp_lock;
|
||||
struct mutex tg_pt_gp_md_mutex;
|
||||
struct mutex tg_pt_gp_transition_mutex;
|
||||
struct se_device *tg_pt_gp_dev;
|
||||
struct config_group tg_pt_gp_group;
|
||||
struct list_head tg_pt_gp_list;
|
||||
struct list_head tg_pt_gp_lun_list;
|
||||
struct se_lun *tg_pt_gp_alua_lun;
|
||||
struct se_node_acl *tg_pt_gp_alua_nacl;
|
||||
struct work_struct tg_pt_gp_transition_work;
|
||||
struct completion *tg_pt_gp_transition_complete;
|
||||
};
|
||||
|
||||
struct t10_vpd {
|
||||
|
@ -705,6 +702,7 @@ struct se_lun {
|
|||
u64 unpacked_lun;
|
||||
#define SE_LUN_LINK_MAGIC 0xffff7771
|
||||
u32 lun_link_magic;
|
||||
bool lun_shutdown;
|
||||
bool lun_access_ro;
|
||||
u32 lun_index;
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче