|
@@ -64,8 +64,9 @@ struct kmem_cache *t10_alua_lba_map_cache;
|
|
|
struct kmem_cache *t10_alua_lba_map_mem_cache;
|
|
|
|
|
|
static void transport_complete_task_attr(struct se_cmd *cmd);
|
|
|
+static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
|
|
|
static void transport_handle_queue_full(struct se_cmd *cmd,
|
|
|
- struct se_device *dev);
|
|
|
+ struct se_device *dev, int err, bool write_pending);
|
|
|
static int transport_put_cmd(struct se_cmd *cmd);
|
|
|
static void target_complete_ok_work(struct work_struct *work);
|
|
|
|
|
@@ -804,7 +805,8 @@ void target_qf_do_work(struct work_struct *work)
|
|
|
|
|
|
if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
|
|
|
transport_write_pending_qf(cmd);
|
|
|
- else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK)
|
|
|
+ else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
|
|
|
+ cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
|
|
|
transport_complete_qf(cmd);
|
|
|
}
|
|
|
}
|
|
@@ -1719,7 +1721,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
|
|
|
}
|
|
|
trace_target_cmd_complete(cmd);
|
|
|
ret = cmd->se_tfo->queue_status(cmd);
|
|
|
- if (ret == -EAGAIN || ret == -ENOMEM)
|
|
|
+ if (ret)
|
|
|
goto queue_full;
|
|
|
goto check_stop;
|
|
|
default:
|
|
@@ -1730,7 +1732,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
|
|
|
}
|
|
|
|
|
|
ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
|
|
|
- if (ret == -EAGAIN || ret == -ENOMEM)
|
|
|
+ if (ret)
|
|
|
goto queue_full;
|
|
|
|
|
|
check_stop:
|
|
@@ -1739,8 +1741,7 @@ check_stop:
|
|
|
return;
|
|
|
|
|
|
queue_full:
|
|
|
- cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
|
|
|
- transport_handle_queue_full(cmd, cmd->se_dev);
|
|
|
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
|
|
|
}
|
|
|
EXPORT_SYMBOL(transport_generic_request_failure);
|
|
|
|
|
@@ -1977,13 +1978,29 @@ static void transport_complete_qf(struct se_cmd *cmd)
|
|
|
int ret = 0;
|
|
|
|
|
|
transport_complete_task_attr(cmd);
|
|
|
+ /*
|
|
|
+ * If a fabric driver ->write_pending() or ->queue_data_in() callback
|
|
|
+ * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and
|
|
|
+ * the same callbacks should not be retried. Return CHECK_CONDITION
|
|
|
+ * if a scsi_status is not already set.
|
|
|
+ *
|
|
|
+ * If a fabric driver ->queue_status() has returned non zero, always
|
|
|
+ * keep retrying no matter what..
|
|
|
+ */
|
|
|
+ if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
|
|
|
+ if (cmd->scsi_status)
|
|
|
+ goto queue_status;
|
|
|
|
|
|
- if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
|
|
|
- trace_target_cmd_complete(cmd);
|
|
|
- ret = cmd->se_tfo->queue_status(cmd);
|
|
|
- goto out;
|
|
|
+ cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
|
|
|
+ cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
|
|
|
+ cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
|
|
|
+ translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
|
|
|
+ goto queue_status;
|
|
|
}
|
|
|
|
|
|
+ if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
|
|
|
+ goto queue_status;
|
|
|
+
|
|
|
switch (cmd->data_direction) {
|
|
|
case DMA_FROM_DEVICE:
|
|
|
if (cmd->scsi_status)
|
|
@@ -2007,19 +2024,33 @@ queue_status:
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
-out:
|
|
|
if (ret < 0) {
|
|
|
- transport_handle_queue_full(cmd, cmd->se_dev);
|
|
|
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
|
|
|
return;
|
|
|
}
|
|
|
transport_lun_remove_cmd(cmd);
|
|
|
transport_cmd_check_stop_to_fabric(cmd);
|
|
|
}
|
|
|
|
|
|
-static void transport_handle_queue_full(
|
|
|
- struct se_cmd *cmd,
|
|
|
- struct se_device *dev)
|
|
|
+static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
|
|
|
+ int err, bool write_pending)
|
|
|
{
|
|
|
+ /*
|
|
|
+ * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or
|
|
|
+ * ->queue_data_in() callbacks from new process context.
|
|
|
+ *
|
|
|
+ * Otherwise for other errors, transport_complete_qf() will send
|
|
|
+ * CHECK_CONDITION via ->queue_status() instead of attempting to
|
|
|
+ * retry associated fabric driver data-transfer callbacks.
|
|
|
+ */
|
|
|
+ if (err == -EAGAIN || err == -ENOMEM) {
|
|
|
+ cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
|
|
|
+ TRANSPORT_COMPLETE_QF_OK;
|
|
|
+ } else {
|
|
|
+ pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
|
|
|
+ cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
|
|
|
+ }
|
|
|
+
|
|
|
spin_lock_irq(&dev->qf_cmd_lock);
|
|
|
list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
|
|
|
atomic_inc_mb(&dev->dev_qf_count);
|
|
@@ -2083,7 +2114,7 @@ static void target_complete_ok_work(struct work_struct *work)
|
|
|
WARN_ON(!cmd->scsi_status);
|
|
|
ret = transport_send_check_condition_and_sense(
|
|
|
cmd, 0, 1);
|
|
|
- if (ret == -EAGAIN || ret == -ENOMEM)
|
|
|
+ if (ret)
|
|
|
goto queue_full;
|
|
|
|
|
|
transport_lun_remove_cmd(cmd);
|
|
@@ -2109,7 +2140,7 @@ static void target_complete_ok_work(struct work_struct *work)
|
|
|
} else if (rc) {
|
|
|
ret = transport_send_check_condition_and_sense(cmd,
|
|
|
rc, 0);
|
|
|
- if (ret == -EAGAIN || ret == -ENOMEM)
|
|
|
+ if (ret)
|
|
|
goto queue_full;
|
|
|
|
|
|
transport_lun_remove_cmd(cmd);
|
|
@@ -2134,7 +2165,7 @@ queue_rsp:
|
|
|
if (target_read_prot_action(cmd)) {
|
|
|
ret = transport_send_check_condition_and_sense(cmd,
|
|
|
cmd->pi_err, 0);
|
|
|
- if (ret == -EAGAIN || ret == -ENOMEM)
|
|
|
+ if (ret)
|
|
|
goto queue_full;
|
|
|
|
|
|
transport_lun_remove_cmd(cmd);
|
|
@@ -2144,7 +2175,7 @@ queue_rsp:
|
|
|
|
|
|
trace_target_cmd_complete(cmd);
|
|
|
ret = cmd->se_tfo->queue_data_in(cmd);
|
|
|
- if (ret == -EAGAIN || ret == -ENOMEM)
|
|
|
+ if (ret)
|
|
|
goto queue_full;
|
|
|
break;
|
|
|
case DMA_TO_DEVICE:
|
|
@@ -2157,7 +2188,7 @@ queue_rsp:
|
|
|
atomic_long_add(cmd->data_length,
|
|
|
&cmd->se_lun->lun_stats.tx_data_octets);
|
|
|
ret = cmd->se_tfo->queue_data_in(cmd);
|
|
|
- if (ret == -EAGAIN || ret == -ENOMEM)
|
|
|
+ if (ret)
|
|
|
goto queue_full;
|
|
|
break;
|
|
|
}
|
|
@@ -2166,7 +2197,7 @@ queue_rsp:
|
|
|
queue_status:
|
|
|
trace_target_cmd_complete(cmd);
|
|
|
ret = cmd->se_tfo->queue_status(cmd);
|
|
|
- if (ret == -EAGAIN || ret == -ENOMEM)
|
|
|
+ if (ret)
|
|
|
goto queue_full;
|
|
|
break;
|
|
|
default:
|
|
@@ -2180,8 +2211,8 @@ queue_status:
|
|
|
queue_full:
|
|
|
pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
|
|
|
" data_direction: %d\n", cmd, cmd->data_direction);
|
|
|
- cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
|
|
|
- transport_handle_queue_full(cmd, cmd->se_dev);
|
|
|
+
|
|
|
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
|
|
|
}
|
|
|
|
|
|
void target_free_sgl(struct scatterlist *sgl, int nents)
|
|
@@ -2449,18 +2480,14 @@ transport_generic_new_cmd(struct se_cmd *cmd)
|
|
|
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
|
|
|
|
|
ret = cmd->se_tfo->write_pending(cmd);
|
|
|
- if (ret == -EAGAIN || ret == -ENOMEM)
|
|
|
+ if (ret)
|
|
|
goto queue_full;
|
|
|
|
|
|
- /* fabric drivers should only return -EAGAIN or -ENOMEM as error */
|
|
|
- WARN_ON(ret);
|
|
|
-
|
|
|
- return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
|
|
+ return 0;
|
|
|
|
|
|
queue_full:
|
|
|
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
|
|
|
- cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
|
|
|
- transport_handle_queue_full(cmd, cmd->se_dev);
|
|
|
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
|
|
|
return 0;
|
|
|
}
|
|
|
EXPORT_SYMBOL(transport_generic_new_cmd);
|
|
@@ -2470,10 +2497,10 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
|
|
|
int ret;
|
|
|
|
|
|
ret = cmd->se_tfo->write_pending(cmd);
|
|
|
- if (ret == -EAGAIN || ret == -ENOMEM) {
|
|
|
+ if (ret) {
|
|
|
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
|
|
|
cmd);
|
|
|
- transport_handle_queue_full(cmd, cmd->se_dev);
|
|
|
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -3011,6 +3038,8 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
|
|
|
__releases(&cmd->t_state_lock)
|
|
|
__acquires(&cmd->t_state_lock)
|
|
|
{
|
|
|
+ int ret;
|
|
|
+
|
|
|
assert_spin_locked(&cmd->t_state_lock);
|
|
|
WARN_ON_ONCE(!irqs_disabled());
|
|
|
|
|
@@ -3034,7 +3063,9 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
|
|
|
trace_target_cmd_complete(cmd);
|
|
|
|
|
|
spin_unlock_irq(&cmd->t_state_lock);
|
|
|
- cmd->se_tfo->queue_status(cmd);
|
|
|
+ ret = cmd->se_tfo->queue_status(cmd);
|
|
|
+ if (ret)
|
|
|
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
|
|
|
spin_lock_irq(&cmd->t_state_lock);
|
|
|
|
|
|
return 1;
|
|
@@ -3055,6 +3086,7 @@ EXPORT_SYMBOL(transport_check_aborted_status);
|
|
|
void transport_send_task_abort(struct se_cmd *cmd)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
+ int ret;
|
|
|
|
|
|
spin_lock_irqsave(&cmd->t_state_lock, flags);
|
|
|
if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
|
|
@@ -3090,7 +3122,9 @@ send_abort:
|
|
|
cmd->t_task_cdb[0], cmd->tag);
|
|
|
|
|
|
trace_target_cmd_complete(cmd);
|
|
|
- cmd->se_tfo->queue_status(cmd);
|
|
|
+ ret = cmd->se_tfo->queue_status(cmd);
|
|
|
+ if (ret)
|
|
|
+ transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
|
|
|
}
|
|
|
|
|
|
static void target_tmr_work(struct work_struct *work)
|