diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 98b12a8923f0..ed0b1ff99110 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -66,15 +66,16 @@ void core_tmr_release_req( struct se_tmr_req *tmr) { struct se_device *dev = tmr->tmr_dev; + unsigned long flags; if (!dev) { kmem_cache_free(se_tmr_req_cache, tmr); return; } - spin_lock_irq(&dev->se_tmr_lock); + spin_lock_irqsave(&dev->se_tmr_lock, flags); list_del(&tmr->tmr_list); - spin_unlock_irq(&dev->se_tmr_lock); + spin_unlock_irqrestore(&dev->se_tmr_lock, flags); kmem_cache_free(se_tmr_req_cache, tmr); } @@ -99,54 +100,20 @@ static void core_tmr_handle_tas_abort( transport_cmd_finish_abort(cmd, 0); } -int core_tmr_lun_reset( +static void core_tmr_drain_tmr_list( struct se_device *dev, struct se_tmr_req *tmr, - struct list_head *preempt_and_abort_list, - struct se_cmd *prout_cmd) + struct list_head *preempt_and_abort_list) { - struct se_cmd *cmd, *tcmd; - struct se_node_acl *tmr_nacl = NULL; - struct se_portal_group *tmr_tpg = NULL; - struct se_queue_obj *qobj = &dev->dev_queue_obj; + LIST_HEAD(drain_tmr_list); struct se_tmr_req *tmr_p, *tmr_pp; - struct se_task *task, *task_tmp; + struct se_cmd *cmd; unsigned long flags; - int fe_count, tas; - /* - * TASK_ABORTED status bit, this is configurable via ConfigFS - * struct se_device attributes. spc4r17 section 7.4.6 Control mode page - * - * A task aborted status (TAS) bit set to zero specifies that aborted - * tasks shall be terminated by the device server without any response - * to the application client. A TAS bit set to one specifies that tasks - * aborted by the actions of an I_T nexus other than the I_T nexus on - * which the command was received shall be completed with TASK ABORTED - * status (see SAM-4). - */ - tas = dev->se_sub_dev->se_dev_attrib.emulate_tas; - /* - * Determine if this se_tmr is coming from a $FABRIC_MOD - * or struct se_device passthrough.. - */ - if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { - tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; - tmr_tpg = tmr->task_cmd->se_sess->se_tpg; - if (tmr_nacl && tmr_tpg) { - pr_debug("LUN_RESET: TMR caller fabric: %s" - " initiator port %s\n", - tmr_tpg->se_tpg_tfo->get_fabric_name(), - tmr_nacl->initiatorname); - } - } - pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n", - (preempt_and_abort_list) ? "Preempt" : "TMR", - dev->transport->name, tas); /* * Release all pending and outgoing TMRs aside from the received * LUN_RESET tmr.. */ - spin_lock_irq(&dev->se_tmr_lock); + spin_lock_irqsave(&dev->se_tmr_lock, flags); list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) { /* * Allow the received TMR to return with FUNCTION_COMPLETE. @@ -168,29 +135,48 @@ int core_tmr_lun_reset( (core_scsi3_check_cdb_abort_and_preempt( preempt_and_abort_list, cmd) != 0)) continue; - spin_unlock_irq(&dev->se_tmr_lock); - spin_lock_irqsave(&cmd->t_state_lock, flags); + spin_lock(&cmd->t_state_lock); if (!atomic_read(&cmd->t_transport_active)) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - spin_lock_irq(&dev->se_tmr_lock); + spin_unlock(&cmd->t_state_lock); continue; } if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - spin_lock_irq(&dev->se_tmr_lock); + spin_unlock(&cmd->t_state_lock); continue; } + spin_unlock(&cmd->t_state_lock); + + list_move_tail(&tmr->tmr_list, &drain_tmr_list); + } + spin_unlock_irqrestore(&dev->se_tmr_lock, flags); + + while (!list_empty(&drain_tmr_list)) { + tmr = list_entry(drain_tmr_list.next, struct se_tmr_req, tmr_list); + list_del(&tmr->tmr_list); + cmd = tmr_p->task_cmd; + pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x," " Response: 0x%02x, t_state: %d\n", - (preempt_and_abort_list) ? "Preempt" : "", tmr_p, - tmr_p->function, tmr_p->response, cmd->t_state); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); + (preempt_and_abort_list) ? "Preempt" : "", tmr, + tmr->function, tmr->response, cmd->t_state); transport_cmd_finish_abort_tmr(cmd); - spin_lock_irq(&dev->se_tmr_lock); } - spin_unlock_irq(&dev->se_tmr_lock); +} + +static void core_tmr_drain_task_list( + struct se_device *dev, + struct se_cmd *prout_cmd, + struct se_node_acl *tmr_nacl, + int tas, + struct list_head *preempt_and_abort_list) +{ + LIST_HEAD(drain_task_list); + struct se_cmd *cmd; + struct se_task *task, *task_tmp; + unsigned long flags; + int fe_count; /* * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status. * This is following sam4r17, section 5.6 Aborting commands, Table 38 @@ -235,9 +221,23 @@ int core_tmr_lun_reset( if (prout_cmd == cmd) continue; - list_del(&task->t_state_list); + list_move_tail(&task->t_state_list, &drain_task_list); atomic_set(&task->task_state_active, 0); - spin_unlock_irqrestore(&dev->execute_task_lock, flags); + /* + * Remove from task execute list before processing drain_task_list + */ + if (atomic_read(&task->task_execute_queue) != 0) { + list_del(&task->t_execute_list); + atomic_set(&task->task_execute_queue, 0); + atomic_dec(&dev->execute_tasks); + } + } + spin_unlock_irqrestore(&dev->execute_task_lock, flags); + + while (!list_empty(&drain_task_list)) { + task = list_entry(drain_task_list.next, struct se_task, t_state_list); + list_del(&task->t_state_list); + cmd = task->task_se_cmd; spin_lock_irqsave(&cmd->t_state_lock, flags); pr_debug("LUN_RESET: %s cmd: %p task: %p" @@ -274,20 +274,14 @@ int core_tmr_lun_reset( atomic_set(&task->task_active, 0); atomic_set(&task->task_stop, 0); - } else { - if (atomic_read(&task->task_execute_queue) != 0) - transport_remove_task_from_execute_queue(task, dev); } __transport_stop_task_timer(task, &flags); if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { - spin_unlock_irqrestore( - &cmd->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); pr_debug("LUN_RESET: Skipping task: %p, dev: %p for" " t_task_cdbs_ex_left: %d\n", task, dev, atomic_read(&cmd->t_task_cdbs_ex_left)); - - spin_lock_irqsave(&dev->execute_task_lock, flags); continue; } fe_count = atomic_read(&cmd->t_fe_count); @@ -297,22 +291,31 @@ int core_tmr_lun_reset( " task: %p, t_fe_count: %d dev: %p\n", task, fe_count, dev); atomic_set(&cmd->t_transport_aborted, 1); - spin_unlock_irqrestore(&cmd->t_state_lock, - flags); - core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); - spin_lock_irqsave(&dev->execute_task_lock, flags); + core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); continue; } pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p," " t_fe_count: %d dev: %p\n", task, fe_count, dev); atomic_set(&cmd->t_transport_aborted, 1); spin_unlock_irqrestore(&cmd->t_state_lock, flags); - core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); - spin_lock_irqsave(&dev->execute_task_lock, flags); + core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); } - spin_unlock_irqrestore(&dev->execute_task_lock, flags); +} + +static void core_tmr_drain_cmd_list( + struct se_device *dev, + struct se_cmd *prout_cmd, + struct se_node_acl *tmr_nacl, + int tas, + struct list_head *preempt_and_abort_list) +{ + LIST_HEAD(drain_cmd_list); + struct se_queue_obj *qobj = &dev->dev_queue_obj; + struct se_cmd *cmd, *tcmd; + unsigned long flags; /* * Release all commands remaining in the struct se_device cmd queue. * @@ -337,10 +340,15 @@ int core_tmr_lun_reset( if (prout_cmd == cmd) continue; - atomic_dec(&cmd->t_transport_queue_active); + atomic_set(&cmd->t_transport_queue_active, 0); atomic_dec(&qobj->queue_cnt); + list_move_tail(&cmd->se_queue_node, &drain_cmd_list); + } + spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + + while (!list_empty(&drain_cmd_list)) { + cmd = list_entry(drain_cmd_list.next, struct se_cmd, se_queue_node); list_del_init(&cmd->se_queue_node); - spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:" " %d t_fe_count: %d\n", (preempt_and_abort_list) ? @@ -353,9 +361,53 @@ int core_tmr_lun_reset( core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, atomic_read(&cmd->t_fe_count)); - spin_lock_irqsave(&qobj->cmd_queue_lock, flags); } - spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); +} + +int core_tmr_lun_reset( + struct se_device *dev, + struct se_tmr_req *tmr, + struct list_head *preempt_and_abort_list, + struct se_cmd *prout_cmd) +{ + struct se_node_acl *tmr_nacl = NULL; + struct se_portal_group *tmr_tpg = NULL; + int tas; + /* + * TASK_ABORTED status bit, this is configurable via ConfigFS + * struct se_device attributes. spc4r17 section 7.4.6 Control mode page + * + * A task aborted status (TAS) bit set to zero specifies that aborted + * tasks shall be terminated by the device server without any response + * to the application client. A TAS bit set to one specifies that tasks + * aborted by the actions of an I_T nexus other than the I_T nexus on + * which the command was received shall be completed with TASK ABORTED + * status (see SAM-4). + */ + tas = dev->se_sub_dev->se_dev_attrib.emulate_tas; + /* + * Determine if this se_tmr is coming from a $FABRIC_MOD + * or struct se_device passthrough.. + */ + if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { + tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; + tmr_tpg = tmr->task_cmd->se_sess->se_tpg; + if (tmr_nacl && tmr_tpg) { + pr_debug("LUN_RESET: TMR caller fabric: %s" + " initiator port %s\n", + tmr_tpg->se_tpg_tfo->get_fabric_name(), + tmr_nacl->initiatorname); + } + } + pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n", + (preempt_and_abort_list) ? "Preempt" : "TMR", + dev->transport->name, tas); + + core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); + core_tmr_drain_task_list(dev, prout_cmd, tmr_nacl, tas, + preempt_and_abort_list); + core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas, + preempt_and_abort_list); /* * Clear any legacy SPC-2 reservation when called during * LOGICAL UNIT RESET @@ -378,3 +430,4 @@ int core_tmr_lun_reset( dev->transport->name); return 0; } +