|
@@ -534,9 +534,6 @@ void transport_deregister_session(struct se_session *se_sess)
|
|
|
}
|
|
|
EXPORT_SYMBOL(transport_deregister_session);
|
|
|
|
|
|
-/*
|
|
|
- * Called with cmd->t_state_lock held.
|
|
|
- */
|
|
|
static void target_remove_from_state_list(struct se_cmd *cmd)
|
|
|
{
|
|
|
struct se_device *dev = cmd->se_dev;
|
|
@@ -561,10 +558,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
|
|
|
- spin_lock_irqsave(&cmd->t_state_lock, flags);
|
|
|
- if (write_pending)
|
|
|
- cmd->t_state = TRANSPORT_WRITE_PENDING;
|
|
|
-
|
|
|
if (remove_from_lists) {
|
|
|
target_remove_from_state_list(cmd);
|
|
|
|
|
@@ -574,6 +567,10 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
|
|
|
cmd->se_lun = NULL;
|
|
|
}
|
|
|
|
|
|
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
|
|
|
+ if (write_pending)
|
|
|
+ cmd->t_state = TRANSPORT_WRITE_PENDING;
|
|
|
+
|
|
|
/*
|
|
|
* Determine if frontend context caller is requesting the stopping of
|
|
|
* this command for frontend exceptions.
|
|
@@ -627,6 +624,8 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
|
|
|
|
|
|
void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
|
|
|
{
|
|
|
+ bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
|
|
|
+
|
|
|
if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
|
|
|
transport_lun_remove_cmd(cmd);
|
|
|
/*
|
|
@@ -638,7 +637,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
|
|
|
|
|
|
if (transport_cmd_check_stop_to_fabric(cmd))
|
|
|
return;
|
|
|
- if (remove)
|
|
|
+ if (remove && ack_kref)
|
|
|
transport_put_cmd(cmd);
|
|
|
}
|
|
|
|
|
@@ -693,20 +692,11 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
|
|
|
success = 1;
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * See if we are waiting to complete for an exception condition.
|
|
|
- */
|
|
|
- if (cmd->transport_state & CMD_T_REQUEST_STOP) {
|
|
|
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
|
|
- complete(&cmd->task_stop_comp);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
/*
|
|
|
* Check for case where an explicit ABORT_TASK has been received
|
|
|
* and transport_wait_for_tasks() will be waiting for completion..
|
|
|
*/
|
|
|
- if (cmd->transport_state & CMD_T_ABORTED &&
|
|
|
+ if (cmd->transport_state & CMD_T_ABORTED ||
|
|
|
cmd->transport_state & CMD_T_STOP) {
|
|
|
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
|
|
complete_all(&cmd->t_transport_stop_comp);
|
|
@@ -721,10 +711,10 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
|
|
|
cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
|
|
|
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
|
|
|
|
|
- if (cmd->cpuid == -1)
|
|
|
- queue_work(target_completion_wq, &cmd->work);
|
|
|
- else
|
|
|
+ if (cmd->se_cmd_flags & SCF_USE_CPUID)
|
|
|
queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
|
|
|
+ else
|
|
|
+ queue_work(target_completion_wq, &cmd->work);
|
|
|
}
|
|
|
EXPORT_SYMBOL(target_complete_cmd);
|
|
|
|
|
@@ -1203,7 +1193,6 @@ void transport_init_se_cmd(
|
|
|
INIT_LIST_HEAD(&cmd->state_list);
|
|
|
init_completion(&cmd->t_transport_stop_comp);
|
|
|
init_completion(&cmd->cmd_wait_comp);
|
|
|
- init_completion(&cmd->task_stop_comp);
|
|
|
spin_lock_init(&cmd->t_state_lock);
|
|
|
kref_init(&cmd->cmd_kref);
|
|
|
cmd->transport_state = CMD_T_DEV_ACTIVE;
|
|
@@ -1437,6 +1426,12 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
|
|
|
*/
|
|
|
transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
|
|
|
data_length, data_dir, task_attr, sense);
|
|
|
+
|
|
|
+ if (flags & TARGET_SCF_USE_CPUID)
|
|
|
+ se_cmd->se_cmd_flags |= SCF_USE_CPUID;
|
|
|
+ else
|
|
|
+ se_cmd->cpuid = WORK_CPU_UNBOUND;
|
|
|
+
|
|
|
if (flags & TARGET_SCF_UNKNOWN_SIZE)
|
|
|
se_cmd->unknown_data_length = 1;
|
|
|
/*
|
|
@@ -1634,33 +1629,6 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
|
|
|
}
|
|
|
EXPORT_SYMBOL(target_submit_tmr);
|
|
|
|
|
|
-/*
|
|
|
- * If the cmd is active, request it to be stopped and sleep until it
|
|
|
- * has completed.
|
|
|
- */
|
|
|
-bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
|
|
|
- __releases(&cmd->t_state_lock)
|
|
|
- __acquires(&cmd->t_state_lock)
|
|
|
-{
|
|
|
- bool was_active = false;
|
|
|
-
|
|
|
- if (cmd->transport_state & CMD_T_BUSY) {
|
|
|
- cmd->transport_state |= CMD_T_REQUEST_STOP;
|
|
|
- spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
|
|
|
-
|
|
|
- pr_debug("cmd %p waiting to complete\n", cmd);
|
|
|
- wait_for_completion(&cmd->task_stop_comp);
|
|
|
- pr_debug("cmd %p stopped successfully\n", cmd);
|
|
|
-
|
|
|
- spin_lock_irqsave(&cmd->t_state_lock, *flags);
|
|
|
- cmd->transport_state &= ~CMD_T_REQUEST_STOP;
|
|
|
- cmd->transport_state &= ~CMD_T_BUSY;
|
|
|
- was_active = true;
|
|
|
- }
|
|
|
-
|
|
|
- return was_active;
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Handle SAM-esque emulation for generic transport request failures.
|
|
|
*/
|
|
@@ -1859,19 +1827,21 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
+static int __transport_check_aborted_status(struct se_cmd *, int);
|
|
|
+
|
|
|
void target_execute_cmd(struct se_cmd *cmd)
|
|
|
{
|
|
|
- /*
|
|
|
- * If the received CDB has aleady been aborted stop processing it here.
|
|
|
- */
|
|
|
- if (transport_check_aborted_status(cmd, 1))
|
|
|
- return;
|
|
|
-
|
|
|
/*
|
|
|
* Determine if frontend context caller is requesting the stopping of
|
|
|
* this command for frontend exceptions.
|
|
|
+ *
|
|
|
+ * If the received CDB has aleady been aborted stop processing it here.
|
|
|
*/
|
|
|
spin_lock_irq(&cmd->t_state_lock);
|
|
|
+ if (__transport_check_aborted_status(cmd, 1)) {
|
|
|
+ spin_unlock_irq(&cmd->t_state_lock);
|
|
|
+ return;
|
|
|
+ }
|
|
|
if (cmd->transport_state & CMD_T_STOP) {
|
|
|
pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
|
|
|
__func__, __LINE__, cmd->tag);
|
|
@@ -2222,20 +2192,14 @@ static inline void transport_free_pages(struct se_cmd *cmd)
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * transport_release_cmd - free a command
|
|
|
- * @cmd: command to free
|
|
|
+ * transport_put_cmd - release a reference to a command
|
|
|
+ * @cmd: command to release
|
|
|
*
|
|
|
- * This routine unconditionally frees a command, and reference counting
|
|
|
- * or list removal must be done in the caller.
|
|
|
+ * This routine releases our reference to the command and frees it if possible.
|
|
|
*/
|
|
|
-static int transport_release_cmd(struct se_cmd *cmd)
|
|
|
+static int transport_put_cmd(struct se_cmd *cmd)
|
|
|
{
|
|
|
BUG_ON(!cmd->se_tfo);
|
|
|
-
|
|
|
- if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
|
|
|
- core_tmr_release_req(cmd->se_tmr_req);
|
|
|
- if (cmd->t_task_cdb != cmd->__t_task_cdb)
|
|
|
- kfree(cmd->t_task_cdb);
|
|
|
/*
|
|
|
* If this cmd has been setup with target_get_sess_cmd(), drop
|
|
|
* the kref and call ->release_cmd() in kref callback.
|
|
@@ -2243,18 +2207,6 @@ static int transport_release_cmd(struct se_cmd *cmd)
|
|
|
return target_put_sess_cmd(cmd);
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * transport_put_cmd - release a reference to a command
|
|
|
- * @cmd: command to release
|
|
|
- *
|
|
|
- * This routine releases our reference to the command and frees it if possible.
|
|
|
- */
|
|
|
-static int transport_put_cmd(struct se_cmd *cmd)
|
|
|
-{
|
|
|
- transport_free_pages(cmd);
|
|
|
- return transport_release_cmd(cmd);
|
|
|
-}
|
|
|
-
|
|
|
void *transport_kmap_data_sg(struct se_cmd *cmd)
|
|
|
{
|
|
|
struct scatterlist *sg = cmd->t_data_sg;
|
|
@@ -2450,34 +2402,58 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
|
|
|
+static bool
|
|
|
+__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
|
|
|
+ unsigned long *flags);
|
|
|
+
|
|
|
+static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
|
|
|
+ __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
|
|
|
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
|
|
+}
|
|
|
+
|
|
|
+int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
|
|
|
+{
|
|
|
int ret = 0;
|
|
|
+ bool aborted = false, tas = false;
|
|
|
|
|
|
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
|
|
|
if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
|
|
|
- transport_wait_for_tasks(cmd);
|
|
|
+ target_wait_free_cmd(cmd, &aborted, &tas);
|
|
|
|
|
|
- ret = transport_release_cmd(cmd);
|
|
|
+ if (!aborted || tas)
|
|
|
+ ret = transport_put_cmd(cmd);
|
|
|
} else {
|
|
|
if (wait_for_tasks)
|
|
|
- transport_wait_for_tasks(cmd);
|
|
|
+ target_wait_free_cmd(cmd, &aborted, &tas);
|
|
|
/*
|
|
|
* Handle WRITE failure case where transport_generic_new_cmd()
|
|
|
* has already added se_cmd to state_list, but fabric has
|
|
|
* failed command before I/O submission.
|
|
|
*/
|
|
|
- if (cmd->state_active) {
|
|
|
- spin_lock_irqsave(&cmd->t_state_lock, flags);
|
|
|
+ if (cmd->state_active)
|
|
|
target_remove_from_state_list(cmd);
|
|
|
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
|
|
- }
|
|
|
|
|
|
if (cmd->se_lun)
|
|
|
transport_lun_remove_cmd(cmd);
|
|
|
|
|
|
- ret = transport_put_cmd(cmd);
|
|
|
+ if (!aborted || tas)
|
|
|
+ ret = transport_put_cmd(cmd);
|
|
|
+ }
|
|
|
+ /*
|
|
|
+ * If the task has been internally aborted due to TMR ABORT_TASK
|
|
|
+ * or LUN_RESET, target_core_tmr.c is responsible for performing
|
|
|
+ * the remaining calls to target_put_sess_cmd(), and not the
|
|
|
+ * callers of this function.
|
|
|
+ */
|
|
|
+ if (aborted) {
|
|
|
+ pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
|
|
|
+ wait_for_completion(&cmd->cmd_wait_comp);
|
|
|
+ cmd->se_tfo->release_cmd(cmd);
|
|
|
+ ret = 1;
|
|
|
}
|
|
|
return ret;
|
|
|
}
|
|
@@ -2517,26 +2493,46 @@ out:
|
|
|
}
|
|
|
EXPORT_SYMBOL(target_get_sess_cmd);
|
|
|
|
|
|
+static void target_free_cmd_mem(struct se_cmd *cmd)
|
|
|
+{
|
|
|
+ transport_free_pages(cmd);
|
|
|
+
|
|
|
+ if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
|
|
|
+ core_tmr_release_req(cmd->se_tmr_req);
|
|
|
+ if (cmd->t_task_cdb != cmd->__t_task_cdb)
|
|
|
+ kfree(cmd->t_task_cdb);
|
|
|
+}
|
|
|
+
|
|
|
static void target_release_cmd_kref(struct kref *kref)
|
|
|
{
|
|
|
struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
|
|
|
struct se_session *se_sess = se_cmd->se_sess;
|
|
|
unsigned long flags;
|
|
|
+ bool fabric_stop;
|
|
|
|
|
|
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
|
|
|
if (list_empty(&se_cmd->se_cmd_list)) {
|
|
|
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
|
|
|
+ target_free_cmd_mem(se_cmd);
|
|
|
se_cmd->se_tfo->release_cmd(se_cmd);
|
|
|
return;
|
|
|
}
|
|
|
- if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
|
|
|
+
|
|
|
+ spin_lock(&se_cmd->t_state_lock);
|
|
|
+ fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
|
|
|
+ spin_unlock(&se_cmd->t_state_lock);
|
|
|
+
|
|
|
+ if (se_cmd->cmd_wait_set || fabric_stop) {
|
|
|
+ list_del_init(&se_cmd->se_cmd_list);
|
|
|
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
|
|
|
+ target_free_cmd_mem(se_cmd);
|
|
|
complete(&se_cmd->cmd_wait_comp);
|
|
|
return;
|
|
|
}
|
|
|
- list_del(&se_cmd->se_cmd_list);
|
|
|
+ list_del_init(&se_cmd->se_cmd_list);
|
|
|
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
|
|
|
|
|
|
+ target_free_cmd_mem(se_cmd);
|
|
|
se_cmd->se_tfo->release_cmd(se_cmd);
|
|
|
}
|
|
|
|
|
@@ -2548,6 +2544,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd)
|
|
|
struct se_session *se_sess = se_cmd->se_sess;
|
|
|
|
|
|
if (!se_sess) {
|
|
|
+ target_free_cmd_mem(se_cmd);
|
|
|
se_cmd->se_tfo->release_cmd(se_cmd);
|
|
|
return 1;
|
|
|
}
|
|
@@ -2564,6 +2561,7 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
|
|
|
{
|
|
|
struct se_cmd *se_cmd;
|
|
|
unsigned long flags;
|
|
|
+ int rc;
|
|
|
|
|
|
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
|
|
|
if (se_sess->sess_tearing_down) {
|
|
@@ -2573,8 +2571,15 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
|
|
|
se_sess->sess_tearing_down = 1;
|
|
|
list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
|
|
|
|
|
|
- list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
|
|
|
- se_cmd->cmd_wait_set = 1;
|
|
|
+ list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
|
|
|
+ rc = kref_get_unless_zero(&se_cmd->cmd_kref);
|
|
|
+ if (rc) {
|
|
|
+ se_cmd->cmd_wait_set = 1;
|
|
|
+ spin_lock(&se_cmd->t_state_lock);
|
|
|
+ se_cmd->transport_state |= CMD_T_FABRIC_STOP;
|
|
|
+ spin_unlock(&se_cmd->t_state_lock);
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
|
|
|
}
|
|
@@ -2587,15 +2592,25 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
|
|
|
{
|
|
|
struct se_cmd *se_cmd, *tmp_cmd;
|
|
|
unsigned long flags;
|
|
|
+ bool tas;
|
|
|
|
|
|
list_for_each_entry_safe(se_cmd, tmp_cmd,
|
|
|
&se_sess->sess_wait_list, se_cmd_list) {
|
|
|
- list_del(&se_cmd->se_cmd_list);
|
|
|
+ list_del_init(&se_cmd->se_cmd_list);
|
|
|
|
|
|
pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
|
|
|
" %d\n", se_cmd, se_cmd->t_state,
|
|
|
se_cmd->se_tfo->get_cmd_state(se_cmd));
|
|
|
|
|
|
+ spin_lock_irqsave(&se_cmd->t_state_lock, flags);
|
|
|
+ tas = (se_cmd->transport_state & CMD_T_TAS);
|
|
|
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
|
|
|
+
|
|
|
+ if (!target_put_sess_cmd(se_cmd)) {
|
|
|
+ if (tas)
|
|
|
+ target_put_sess_cmd(se_cmd);
|
|
|
+ }
|
|
|
+
|
|
|
wait_for_completion(&se_cmd->cmd_wait_comp);
|
|
|
pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
|
|
|
" fabric state: %d\n", se_cmd, se_cmd->t_state,
|
|
@@ -2617,53 +2632,75 @@ void transport_clear_lun_ref(struct se_lun *lun)
|
|
|
wait_for_completion(&lun->lun_ref_comp);
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * transport_wait_for_tasks - wait for completion to occur
|
|
|
- * @cmd: command to wait
|
|
|
- *
|
|
|
- * Called from frontend fabric context to wait for storage engine
|
|
|
- * to pause and/or release frontend generated struct se_cmd.
|
|
|
- */
|
|
|
-bool transport_wait_for_tasks(struct se_cmd *cmd)
|
|
|
+static bool
|
|
|
+__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
|
|
|
+ bool *aborted, bool *tas, unsigned long *flags)
|
|
|
+ __releases(&cmd->t_state_lock)
|
|
|
+ __acquires(&cmd->t_state_lock)
|
|
|
{
|
|
|
- unsigned long flags;
|
|
|
|
|
|
- spin_lock_irqsave(&cmd->t_state_lock, flags);
|
|
|
+ assert_spin_locked(&cmd->t_state_lock);
|
|
|
+ WARN_ON_ONCE(!irqs_disabled());
|
|
|
+
|
|
|
+ if (fabric_stop)
|
|
|
+ cmd->transport_state |= CMD_T_FABRIC_STOP;
|
|
|
+
|
|
|
+ if (cmd->transport_state & CMD_T_ABORTED)
|
|
|
+ *aborted = true;
|
|
|
+
|
|
|
+ if (cmd->transport_state & CMD_T_TAS)
|
|
|
+ *tas = true;
|
|
|
+
|
|
|
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
|
|
|
- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
|
|
|
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
|
|
+ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
|
|
|
return false;
|
|
|
- }
|
|
|
|
|
|
if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
|
|
|
- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
|
|
|
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
|
|
+ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
|
|
|
return false;
|
|
|
- }
|
|
|
|
|
|
- if (!(cmd->transport_state & CMD_T_ACTIVE)) {
|
|
|
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
|
|
+ if (!(cmd->transport_state & CMD_T_ACTIVE))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ if (fabric_stop && *aborted)
|
|
|
return false;
|
|
|
- }
|
|
|
|
|
|
cmd->transport_state |= CMD_T_STOP;
|
|
|
|
|
|
- pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n",
|
|
|
- cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
|
|
|
+ pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d,"
|
|
|
+ " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag,
|
|
|
+ cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
|
|
|
|
|
|
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
|
|
+ spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
|
|
|
|
|
|
wait_for_completion(&cmd->t_transport_stop_comp);
|
|
|
|
|
|
- spin_lock_irqsave(&cmd->t_state_lock, flags);
|
|
|
+ spin_lock_irqsave(&cmd->t_state_lock, *flags);
|
|
|
cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
|
|
|
|
|
|
- pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n",
|
|
|
- cmd->tag);
|
|
|
+ pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
|
|
|
+ "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
|
|
|
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * transport_wait_for_tasks - wait for completion to occur
|
|
|
+ * @cmd: command to wait
|
|
|
+ *
|
|
|
+ * Called from frontend fabric context to wait for storage engine
|
|
|
+ * to pause and/or release frontend generated struct se_cmd.
|
|
|
+ */
|
|
|
+bool transport_wait_for_tasks(struct se_cmd *cmd)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ bool ret, aborted = false, tas = false;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
|
|
|
+ ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
|
|
|
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
|
|
|
|
|
- return true;
|
|
|
+ return ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL(transport_wait_for_tasks);
|
|
|
|
|
@@ -2845,28 +2882,49 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
|
|
|
}
|
|
|
EXPORT_SYMBOL(transport_send_check_condition_and_sense);
|
|
|
|
|
|
-int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
|
|
|
+static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
|
|
|
+ __releases(&cmd->t_state_lock)
|
|
|
+ __acquires(&cmd->t_state_lock)
|
|
|
{
|
|
|
+ assert_spin_locked(&cmd->t_state_lock);
|
|
|
+ WARN_ON_ONCE(!irqs_disabled());
|
|
|
+
|
|
|
if (!(cmd->transport_state & CMD_T_ABORTED))
|
|
|
return 0;
|
|
|
-
|
|
|
/*
|
|
|
* If cmd has been aborted but either no status is to be sent or it has
|
|
|
* already been sent, just return
|
|
|
*/
|
|
|
- if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
|
|
|
+ if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
|
|
|
+ if (send_status)
|
|
|
+ cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
|
|
|
return 1;
|
|
|
+ }
|
|
|
|
|
|
- pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n",
|
|
|
- cmd->t_task_cdb[0], cmd->tag);
|
|
|
+ pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
|
|
|
+ " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
|
|
|
|
|
|
cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
|
|
|
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
|
|
|
trace_target_cmd_complete(cmd);
|
|
|
+
|
|
|
+ spin_unlock_irq(&cmd->t_state_lock);
|
|
|
cmd->se_tfo->queue_status(cmd);
|
|
|
+ spin_lock_irq(&cmd->t_state_lock);
|
|
|
|
|
|
return 1;
|
|
|
}
|
|
|
+
|
|
|
+int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ spin_lock_irq(&cmd->t_state_lock);
|
|
|
+ ret = __transport_check_aborted_status(cmd, send_status);
|
|
|
+ spin_unlock_irq(&cmd->t_state_lock);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
EXPORT_SYMBOL(transport_check_aborted_status);
|
|
|
|
|
|
void transport_send_task_abort(struct se_cmd *cmd)
|
|
@@ -2888,11 +2946,17 @@ void transport_send_task_abort(struct se_cmd *cmd)
|
|
|
*/
|
|
|
if (cmd->data_direction == DMA_TO_DEVICE) {
|
|
|
if (cmd->se_tfo->write_pending_status(cmd) != 0) {
|
|
|
- cmd->transport_state |= CMD_T_ABORTED;
|
|
|
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
|
|
|
+ if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
|
|
|
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
|
|
+ goto send_abort;
|
|
|
+ }
|
|
|
cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
|
|
|
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
|
|
return;
|
|
|
}
|
|
|
}
|
|
|
+send_abort:
|
|
|
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
|
|
|
|
|
|
transport_lun_remove_cmd(cmd);
|
|
@@ -2909,8 +2973,17 @@ static void target_tmr_work(struct work_struct *work)
|
|
|
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
|
|
|
struct se_device *dev = cmd->se_dev;
|
|
|
struct se_tmr_req *tmr = cmd->se_tmr_req;
|
|
|
+ unsigned long flags;
|
|
|
int ret;
|
|
|
|
|
|
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
|
|
|
+ if (cmd->transport_state & CMD_T_ABORTED) {
|
|
|
+ tmr->response = TMR_FUNCTION_REJECTED;
|
|
|
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
|
|
+ goto check_stop;
|
|
|
+ }
|
|
|
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
|
|
+
|
|
|
switch (tmr->function) {
|
|
|
case TMR_ABORT_TASK:
|
|
|
core_tmr_abort_task(dev, tmr, cmd->se_sess);
|
|
@@ -2943,9 +3016,17 @@ static void target_tmr_work(struct work_struct *work)
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
|
|
|
+ if (cmd->transport_state & CMD_T_ABORTED) {
|
|
|
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
|
|
+ goto check_stop;
|
|
|
+ }
|
|
|
cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
|
|
|
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
|
|
+
|
|
|
cmd->se_tfo->queue_tm_rsp(cmd);
|
|
|
|
|
|
+check_stop:
|
|
|
transport_cmd_check_stop_to_fabric(cmd);
|
|
|
}
|
|
|
|