瀏覽代碼

Merge tag 'dm-3.15-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device-mapper fixes from Mike Snitzer:
 "A dm-cache stable fix to split discards on cache block boundaries
  because dm-cache cannot yet handle discards that span cache blocks.

  Really fix a dm-mpath LOCKDEP warning that was introduced in -rc1.

  Add a 'no_space_timeout' control to dm-thinp to restore the ability to
  queue IO indefinitely when no data space is available.  This fixes a
  change in behavior that was introduced in -rc6 where the timeout
  couldn't be disabled"

* tag 'dm-3.15-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm mpath: really fix lockdep warning
  dm cache: always split discards on cache block boundaries
  dm thin: add 'no_space_timeout' dm-thin-pool module param
Linus Torvalds 11 年之前
父節點
當前提交
24e19d279f
共有 4 個文件被更改,包括 23 次插入10 次删除
  1. 4 1
      Documentation/device-mapper/thin-provisioning.txt
  2. 2 0
      drivers/md/dm-cache-target.c
  3. 8 6
      drivers/md/dm-mpath.c
  4. 9 3
      drivers/md/dm-thin.c

+ 4 - 1
Documentation/device-mapper/thin-provisioning.txt

@@ -309,7 +309,10 @@ ii) Status
     error_if_no_space|queue_if_no_space
     error_if_no_space|queue_if_no_space
 	If the pool runs out of data or metadata space, the pool will
 	If the pool runs out of data or metadata space, the pool will
 	either queue or error the IO destined to the data device.  The
 	either queue or error the IO destined to the data device.  The
-	default is to queue the IO until more space is added.
+	default is to queue the IO until more space is added or the
+	'no_space_timeout' expires.  The 'no_space_timeout' dm-thin-pool
+	module parameter can be used to change this timeout -- it
+	defaults to 60 seconds but may be disabled using a value of 0.
 
 
 iii) Messages
 iii) Messages
 
 

+ 2 - 0
drivers/md/dm-cache-target.c

@@ -2178,6 +2178,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
 	ti->num_discard_bios = 1;
 	ti->num_discard_bios = 1;
 	ti->discards_supported = true;
 	ti->discards_supported = true;
 	ti->discard_zeroes_data_unsupported = true;
 	ti->discard_zeroes_data_unsupported = true;
+	/* Discard bios must be split on a block boundary */
+	ti->split_discard_bios = true;
 
 
 	cache->features = ca->features;
 	cache->features = ca->features;
 	ti->per_bio_data_size = get_per_bio_data_size(cache);
 	ti->per_bio_data_size = get_per_bio_data_size(cache);

+ 8 - 6
drivers/md/dm-mpath.c

@@ -445,11 +445,11 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
 	else
 	else
 		m->saved_queue_if_no_path = queue_if_no_path;
 		m->saved_queue_if_no_path = queue_if_no_path;
 	m->queue_if_no_path = queue_if_no_path;
 	m->queue_if_no_path = queue_if_no_path;
-	if (!m->queue_if_no_path)
-		dm_table_run_md_queue_async(m->ti->table);
-
 	spin_unlock_irqrestore(&m->lock, flags);
 	spin_unlock_irqrestore(&m->lock, flags);
 
 
+	if (!queue_if_no_path)
+		dm_table_run_md_queue_async(m->ti->table);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -954,7 +954,7 @@ out:
  */
  */
 static int reinstate_path(struct pgpath *pgpath)
 static int reinstate_path(struct pgpath *pgpath)
 {
 {
-	int r = 0;
+	int r = 0, run_queue = 0;
 	unsigned long flags;
 	unsigned long flags;
 	struct multipath *m = pgpath->pg->m;
 	struct multipath *m = pgpath->pg->m;
 
 
@@ -978,7 +978,7 @@ static int reinstate_path(struct pgpath *pgpath)
 
 
 	if (!m->nr_valid_paths++) {
 	if (!m->nr_valid_paths++) {
 		m->current_pgpath = NULL;
 		m->current_pgpath = NULL;
-		dm_table_run_md_queue_async(m->ti->table);
+		run_queue = 1;
 	} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
 	} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
 		if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
 		if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
 			m->pg_init_in_progress++;
 			m->pg_init_in_progress++;
@@ -991,6 +991,8 @@ static int reinstate_path(struct pgpath *pgpath)
 
 
 out:
 out:
 	spin_unlock_irqrestore(&m->lock, flags);
 	spin_unlock_irqrestore(&m->lock, flags);
+	if (run_queue)
+		dm_table_run_md_queue_async(m->ti->table);
 
 
 	return r;
 	return r;
 }
 }
@@ -1566,8 +1568,8 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
 		}
 		}
 		if (m->pg_init_required)
 		if (m->pg_init_required)
 			__pg_init_all_paths(m);
 			__pg_init_all_paths(m);
-		dm_table_run_md_queue_async(m->ti->table);
 		spin_unlock_irqrestore(&m->lock, flags);
 		spin_unlock_irqrestore(&m->lock, flags);
+		dm_table_run_md_queue_async(m->ti->table);
 	}
 	}
 
 
 	return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
 	return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);

+ 9 - 3
drivers/md/dm-thin.c

@@ -27,7 +27,9 @@
 #define MAPPING_POOL_SIZE 1024
 #define MAPPING_POOL_SIZE 1024
 #define PRISON_CELLS 1024
 #define PRISON_CELLS 1024
 #define COMMIT_PERIOD HZ
 #define COMMIT_PERIOD HZ
-#define NO_SPACE_TIMEOUT (HZ * 60)
+#define NO_SPACE_TIMEOUT_SECS 60
+
+static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
 
 
 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
 		"A percentage of time allocated for copy on write");
 		"A percentage of time allocated for copy on write");
@@ -1670,6 +1672,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
 	struct pool_c *pt = pool->ti->private;
 	struct pool_c *pt = pool->ti->private;
 	bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
 	bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
 	enum pool_mode old_mode = get_pool_mode(pool);
 	enum pool_mode old_mode = get_pool_mode(pool);
+	unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
 
 
 	/*
 	/*
 	 * Never allow the pool to transition to PM_WRITE mode if user
 	 * Never allow the pool to transition to PM_WRITE mode if user
@@ -1732,8 +1735,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
 		pool->process_prepared_mapping = process_prepared_mapping;
 		pool->process_prepared_mapping = process_prepared_mapping;
 		pool->process_prepared_discard = process_prepared_discard_passdown;
 		pool->process_prepared_discard = process_prepared_discard_passdown;
 
 
-		if (!pool->pf.error_if_no_space)
-			queue_delayed_work(pool->wq, &pool->no_space_timeout, NO_SPACE_TIMEOUT);
+		if (!pool->pf.error_if_no_space && no_space_timeout)
+			queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
 		break;
 		break;
 
 
 	case PM_WRITE:
 	case PM_WRITE:
@@ -3508,6 +3511,9 @@ static void dm_thin_exit(void)
 module_init(dm_thin_init);
 module_init(dm_thin_init);
 module_exit(dm_thin_exit);
 module_exit(dm_thin_exit);
 
 
+module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
+
 MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
 MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");