|
@@ -142,8 +142,12 @@ struct tcmu_dev {
|
|
|
|
|
|
struct idr commands;
|
|
struct idr commands;
|
|
|
|
|
|
- struct timer_list timeout;
|
|
|
|
|
|
+ struct timer_list cmd_timer;
|
|
unsigned int cmd_time_out;
|
|
unsigned int cmd_time_out;
|
|
|
|
+
|
|
|
|
+ struct timer_list qfull_timer;
|
|
|
|
+ int qfull_time_out;
|
|
|
|
+
|
|
struct list_head timedout_entry;
|
|
struct list_head timedout_entry;
|
|
|
|
|
|
spinlock_t nl_cmd_lock;
|
|
spinlock_t nl_cmd_lock;
|
|
@@ -741,18 +745,14 @@ static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
|
|
return command_size;
|
|
return command_size;
|
|
}
|
|
}
|
|
|
|
|
|
-static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd)
|
|
|
|
|
|
+static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
|
|
|
|
+ struct timer_list *timer)
|
|
{
|
|
{
|
|
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
|
|
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
|
|
- unsigned long tmo = udev->cmd_time_out;
|
|
|
|
int cmd_id;
|
|
int cmd_id;
|
|
|
|
|
|
- /*
|
|
|
|
- * If it was on the cmdr queue waiting we do not reset the timer
|
|
|
|
- * for requeues and when it is finally sent to userspace.
|
|
|
|
- */
|
|
|
|
if (tcmu_cmd->cmd_id)
|
|
if (tcmu_cmd->cmd_id)
|
|
- return 0;
|
|
|
|
|
|
+ goto setup_timer;
|
|
|
|
|
|
cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
|
|
cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
|
|
if (cmd_id < 0) {
|
|
if (cmd_id < 0) {
|
|
@@ -761,23 +761,38 @@ static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd)
|
|
}
|
|
}
|
|
tcmu_cmd->cmd_id = cmd_id;
|
|
tcmu_cmd->cmd_id = cmd_id;
|
|
|
|
|
|
- if (!tmo)
|
|
|
|
- tmo = TCMU_TIME_OUT;
|
|
|
|
-
|
|
|
|
pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd->cmd_id,
|
|
pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd->cmd_id,
|
|
udev->name, tmo / MSEC_PER_SEC);
|
|
udev->name, tmo / MSEC_PER_SEC);
|
|
|
|
|
|
|
|
+setup_timer:
|
|
|
|
+ if (!tmo)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
|
|
tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
|
|
- mod_timer(&udev->timeout, tcmu_cmd->deadline);
|
|
|
|
|
|
+ mod_timer(timer, tcmu_cmd->deadline);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
|
|
static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
|
|
{
|
|
{
|
|
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
|
|
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
|
|
|
|
+ unsigned int tmo;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
- ret = tcmu_setup_cmd_timer(tcmu_cmd);
|
|
|
|
|
|
+ /*
|
|
|
|
+ * For backwards compat if qfull_time_out is not set use
|
|
|
|
+ * cmd_time_out and if that's not set use the default time out.
|
|
|
|
+ */
|
|
|
|
+ if (!udev->qfull_time_out)
|
|
|
|
+ return -ETIMEDOUT;
|
|
|
|
+ else if (udev->qfull_time_out > 0)
|
|
|
|
+ tmo = udev->qfull_time_out;
|
|
|
|
+ else if (udev->cmd_time_out)
|
|
|
|
+ tmo = udev->cmd_time_out;
|
|
|
|
+ else
|
|
|
|
+ tmo = TCMU_TIME_OUT;
|
|
|
|
+
|
|
|
|
+ ret = tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
|
|
if (ret)
|
|
if (ret)
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
@@ -901,7 +916,8 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
|
|
}
|
|
}
|
|
entry->req.iov_bidi_cnt = iov_cnt;
|
|
entry->req.iov_bidi_cnt = iov_cnt;
|
|
|
|
|
|
- ret = tcmu_setup_cmd_timer(tcmu_cmd);
|
|
|
|
|
|
+ ret = tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out,
|
|
|
|
+ &udev->cmd_timer);
|
|
if (ret) {
|
|
if (ret) {
|
|
tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
|
|
tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
|
|
mutex_unlock(&udev->cmdr_lock);
|
|
mutex_unlock(&udev->cmdr_lock);
|
|
@@ -1049,14 +1065,19 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
|
|
handled++;
|
|
handled++;
|
|
}
|
|
}
|
|
|
|
|
|
- if (mb->cmd_tail == mb->cmd_head && list_empty(&udev->cmdr_queue)) {
|
|
|
|
- del_timer(&udev->timeout);
|
|
|
|
- /*
|
|
|
|
- * not more pending or waiting commands so try to reclaim
|
|
|
|
- * blocks if needed.
|
|
|
|
- */
|
|
|
|
- if (atomic_read(&global_db_count) > TCMU_GLOBAL_MAX_BLOCKS)
|
|
|
|
- schedule_delayed_work(&tcmu_unmap_work, 0);
|
|
|
|
|
|
+ if (mb->cmd_tail == mb->cmd_head) {
|
|
|
|
+ /* no more pending commands */
|
|
|
|
+ del_timer(&udev->cmd_timer);
|
|
|
|
+
|
|
|
|
+ if (list_empty(&udev->cmdr_queue)) {
|
|
|
|
+ /*
|
|
|
|
+ * no more pending or waiting commands so try to
|
|
|
|
+ * reclaim blocks if needed.
|
|
|
|
+ */
|
|
|
|
+ if (atomic_read(&global_db_count) >
|
|
|
|
+ TCMU_GLOBAL_MAX_BLOCKS)
|
|
|
|
+ schedule_delayed_work(&tcmu_unmap_work, 0);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
return handled;
|
|
return handled;
|
|
@@ -1077,13 +1098,15 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
is_running = list_empty(&cmd->cmdr_queue_entry);
|
|
is_running = list_empty(&cmd->cmdr_queue_entry);
|
|
- pr_debug("Timing out cmd %u on dev %s that is %s.\n",
|
|
|
|
- id, udev->name, is_running ? "inflight" : "queued");
|
|
|
|
-
|
|
|
|
- se_cmd = cmd->se_cmd;
|
|
|
|
- cmd->se_cmd = NULL;
|
|
|
|
|
|
|
|
if (is_running) {
|
|
if (is_running) {
|
|
|
|
+ /*
|
|
|
|
+ * If cmd_time_out is disabled but qfull is set deadline
|
|
|
|
+ * will only reflect the qfull timeout. Ignore it.
|
|
|
|
+ */
|
|
|
|
+ if (!udev->cmd_time_out)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
|
|
set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
|
|
/*
|
|
/*
|
|
* target_complete_cmd will translate this to LUN COMM FAILURE
|
|
* target_complete_cmd will translate this to LUN COMM FAILURE
|
|
@@ -1096,16 +1119,18 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
|
|
tcmu_free_cmd(cmd);
|
|
tcmu_free_cmd(cmd);
|
|
scsi_status = SAM_STAT_TASK_SET_FULL;
|
|
scsi_status = SAM_STAT_TASK_SET_FULL;
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ pr_debug("Timing out cmd %u on dev %s that is %s.\n",
|
|
|
|
+ id, udev->name, is_running ? "inflight" : "queued");
|
|
|
|
+
|
|
|
|
+ se_cmd = cmd->se_cmd;
|
|
|
|
+ cmd->se_cmd = NULL;
|
|
target_complete_cmd(se_cmd, scsi_status);
|
|
target_complete_cmd(se_cmd, scsi_status);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static void tcmu_device_timedout(struct timer_list *t)
|
|
|
|
|
|
+static void tcmu_device_timedout(struct tcmu_dev *udev)
|
|
{
|
|
{
|
|
- struct tcmu_dev *udev = from_timer(udev, t, timeout);
|
|
|
|
-
|
|
|
|
- pr_debug("%s cmd timeout has expired\n", udev->name);
|
|
|
|
-
|
|
|
|
spin_lock(&timed_out_udevs_lock);
|
|
spin_lock(&timed_out_udevs_lock);
|
|
if (list_empty(&udev->timedout_entry))
|
|
if (list_empty(&udev->timedout_entry))
|
|
list_add_tail(&udev->timedout_entry, &timed_out_udevs);
|
|
list_add_tail(&udev->timedout_entry, &timed_out_udevs);
|
|
@@ -1114,6 +1139,22 @@ static void tcmu_device_timedout(struct timer_list *t)
|
|
schedule_delayed_work(&tcmu_unmap_work, 0);
|
|
schedule_delayed_work(&tcmu_unmap_work, 0);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void tcmu_cmd_timedout(struct timer_list *t)
|
|
|
|
+{
|
|
|
|
+ struct tcmu_dev *udev = from_timer(udev, t, cmd_timer);
|
|
|
|
+
|
|
|
|
+ pr_debug("%s cmd timeout has expired\n", udev->name);
|
|
|
|
+ tcmu_device_timedout(udev);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void tcmu_qfull_timedout(struct timer_list *t)
|
|
|
|
+{
|
|
|
|
+ struct tcmu_dev *udev = from_timer(udev, t, qfull_timer);
|
|
|
|
+
|
|
|
|
+ pr_debug("%s qfull timeout has expired\n", udev->name);
|
|
|
|
+ tcmu_device_timedout(udev);
|
|
|
|
+}
|
|
|
|
+
|
|
static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
|
|
static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
|
|
{
|
|
{
|
|
struct tcmu_hba *tcmu_hba;
|
|
struct tcmu_hba *tcmu_hba;
|
|
@@ -1151,6 +1192,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
|
|
|
|
|
|
udev->hba = hba;
|
|
udev->hba = hba;
|
|
udev->cmd_time_out = TCMU_TIME_OUT;
|
|
udev->cmd_time_out = TCMU_TIME_OUT;
|
|
|
|
+ udev->qfull_time_out = -1;
|
|
|
|
|
|
mutex_init(&udev->cmdr_lock);
|
|
mutex_init(&udev->cmdr_lock);
|
|
|
|
|
|
@@ -1158,7 +1200,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
|
|
INIT_LIST_HEAD(&udev->cmdr_queue);
|
|
INIT_LIST_HEAD(&udev->cmdr_queue);
|
|
idr_init(&udev->commands);
|
|
idr_init(&udev->commands);
|
|
|
|
|
|
- timer_setup(&udev->timeout, tcmu_device_timedout, 0);
|
|
|
|
|
|
+ timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
|
|
|
|
+ timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0);
|
|
|
|
|
|
init_waitqueue_head(&udev->nl_cmd_wq);
|
|
init_waitqueue_head(&udev->nl_cmd_wq);
|
|
spin_lock_init(&udev->nl_cmd_lock);
|
|
spin_lock_init(&udev->nl_cmd_lock);
|
|
@@ -1213,6 +1256,8 @@ static bool run_cmdr_queue(struct tcmu_dev *udev)
|
|
goto done;
|
|
goto done;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
+ if (list_empty(&udev->cmdr_queue))
|
|
|
|
+ del_timer(&udev->qfull_timer);
|
|
done:
|
|
done:
|
|
return drained;
|
|
return drained;
|
|
}
|
|
}
|
|
@@ -1712,7 +1757,8 @@ static void tcmu_destroy_device(struct se_device *dev)
|
|
{
|
|
{
|
|
struct tcmu_dev *udev = TCMU_DEV(dev);
|
|
struct tcmu_dev *udev = TCMU_DEV(dev);
|
|
|
|
|
|
- del_timer_sync(&udev->timeout);
|
|
|
|
|
|
+ del_timer_sync(&udev->cmd_timer);
|
|
|
|
+ del_timer_sync(&udev->qfull_timer);
|
|
|
|
|
|
mutex_lock(&root_udev_mutex);
|
|
mutex_lock(&root_udev_mutex);
|
|
list_del(&udev->node);
|
|
list_del(&udev->node);
|
|
@@ -1893,6 +1939,40 @@ static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *pag
|
|
}
|
|
}
|
|
CONFIGFS_ATTR(tcmu_, cmd_time_out);
|
|
CONFIGFS_ATTR(tcmu_, cmd_time_out);
|
|
|
|
|
|
|
|
+static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page)
|
|
|
|
+{
|
|
|
|
+ struct se_dev_attrib *da = container_of(to_config_group(item),
|
|
|
|
+ struct se_dev_attrib, da_group);
|
|
|
|
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
|
|
|
|
+
|
|
|
|
+ return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ?
|
|
|
|
+ udev->qfull_time_out :
|
|
|
|
+ udev->qfull_time_out / MSEC_PER_SEC);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static ssize_t tcmu_qfull_time_out_store(struct config_item *item,
|
|
|
|
+ const char *page, size_t count)
|
|
|
|
+{
|
|
|
|
+ struct se_dev_attrib *da = container_of(to_config_group(item),
|
|
|
|
+ struct se_dev_attrib, da_group);
|
|
|
|
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
|
|
|
|
+ s32 val;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ ret = kstrtos32(page, 0, &val);
|
|
|
|
+ if (ret < 0)
|
|
|
|
+ return ret;
|
|
|
|
+
|
|
|
|
+ if (val >= 0) {
|
|
|
|
+ udev->qfull_time_out = val * MSEC_PER_SEC;
|
|
|
|
+ } else {
|
|
|
|
+ printk(KERN_ERR "Invalid qfull timeout value %d\n", val);
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+ return count;
|
|
|
|
+}
|
|
|
|
+CONFIGFS_ATTR(tcmu_, qfull_time_out);
|
|
|
|
+
|
|
static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
|
|
static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
|
|
{
|
|
{
|
|
struct se_dev_attrib *da = container_of(to_config_group(item),
|
|
struct se_dev_attrib *da = container_of(to_config_group(item),
|
|
@@ -2038,6 +2118,7 @@ CONFIGFS_ATTR(tcmu_, emulate_write_cache);
|
|
|
|
|
|
static struct configfs_attribute *tcmu_attrib_attrs[] = {
|
|
static struct configfs_attribute *tcmu_attrib_attrs[] = {
|
|
&tcmu_attr_cmd_time_out,
|
|
&tcmu_attr_cmd_time_out,
|
|
|
|
+ &tcmu_attr_qfull_time_out,
|
|
&tcmu_attr_dev_config,
|
|
&tcmu_attr_dev_config,
|
|
&tcmu_attr_dev_size,
|
|
&tcmu_attr_dev_size,
|
|
&tcmu_attr_emulate_write_cache,
|
|
&tcmu_attr_emulate_write_cache,
|