Эх сурвалжийг харах

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A set of fixes that should go into the next -rc. This contains:

   - A use-after-free in the request_list exit for the legacy IO path,
     from Bart.

   - A fix for CFQ, fixing a recent regression with the conversion to
     higher resolution timing for iops mode. From Hou Tao.

   - A single fix for nbd, split in two patches, fixing a leak of a data
     structure.

   - A regression fix from Keith, ensuring that callers of
     blk_mq_update_nr_hw_queues() hold the right lock"

* 'for-linus' of git://git.kernel.dk/linux-block:
  block: Avoid that blk_exit_rl() triggers a use-after-free
  cfq-iosched: fix the delay of cfq_group's vdisktime under iops mode
  blk-mq: Take tagset lock when updating hw queues
  nbd: don't leak nbd_config
  nbd: nbd_reset() call in nbd_dev_add() is redundant
Linus Torvalds 8 жил өмнө
parent
commit
bb329859ef

+ 1 - 1
block/blk-cgroup.c

@@ -74,7 +74,7 @@ static void blkg_free(struct blkcg_gq *blkg)
 			blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
 			blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
 
 
 	if (blkg->blkcg != &blkcg_root)
 	if (blkg->blkcg != &blkcg_root)
-		blk_exit_rl(&blkg->rl);
+		blk_exit_rl(blkg->q, &blkg->rl);
 
 
 	blkg_rwstat_exit(&blkg->stat_ios);
 	blkg_rwstat_exit(&blkg->stat_ios);
 	blkg_rwstat_exit(&blkg->stat_bytes);
 	blkg_rwstat_exit(&blkg->stat_bytes);

+ 8 - 2
block/blk-core.c

@@ -648,13 +648,19 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q,
 	if (!rl->rq_pool)
 	if (!rl->rq_pool)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
+	if (rl != &q->root_rl)
+		WARN_ON_ONCE(!blk_get_queue(q));
+
 	return 0;
 	return 0;
 }
 }
 
 
-void blk_exit_rl(struct request_list *rl)
+void blk_exit_rl(struct request_queue *q, struct request_list *rl)
 {
 {
-	if (rl->rq_pool)
+	if (rl->rq_pool) {
 		mempool_destroy(rl->rq_pool);
 		mempool_destroy(rl->rq_pool);
+		if (rl != &q->root_rl)
+			blk_put_queue(q);
+	}
 }
 }
 
 
 struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
 struct request_queue *blk_alloc_queue(gfp_t gfp_mask)

+ 9 - 1
block/blk-mq.c

@@ -2641,7 +2641,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
 	return ret;
 	return ret;
 }
 }
 
 
-void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
+static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+							int nr_hw_queues)
 {
 {
 	struct request_queue *q;
 	struct request_queue *q;
 
 
@@ -2665,6 +2666,13 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
 	list_for_each_entry(q, &set->tag_list, tag_set_list)
 	list_for_each_entry(q, &set->tag_list, tag_set_list)
 		blk_mq_unfreeze_queue(q);
 		blk_mq_unfreeze_queue(q);
 }
 }
+
+void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
+{
+	mutex_lock(&set->tag_list_lock);
+	__blk_mq_update_nr_hw_queues(set, nr_hw_queues);
+	mutex_unlock(&set->tag_list_lock);
+}
 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
 
 
 /* Enable polling stats and return whether they were already enabled. */
 /* Enable polling stats and return whether they were already enabled. */

+ 1 - 1
block/blk-sysfs.c

@@ -809,7 +809,7 @@ static void blk_release_queue(struct kobject *kobj)
 
 
 	blk_free_queue_stats(q->stats);
 	blk_free_queue_stats(q->stats);
 
 
-	blk_exit_rl(&q->root_rl);
+	blk_exit_rl(q, &q->root_rl);
 
 
 	if (q->queue_tags)
 	if (q->queue_tags)
 		__blk_queue_free_tags(q);
 		__blk_queue_free_tags(q);

+ 1 - 1
block/blk.h

@@ -59,7 +59,7 @@ void blk_free_flush_queue(struct blk_flush_queue *q);
 
 
 int blk_init_rl(struct request_list *rl, struct request_queue *q,
 int blk_init_rl(struct request_list *rl, struct request_queue *q,
 		gfp_t gfp_mask);
 		gfp_t gfp_mask);
-void blk_exit_rl(struct request_list *rl);
+void blk_exit_rl(struct request_queue *q, struct request_list *rl);
 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
 			struct bio *bio);
 			struct bio *bio);
 void blk_queue_bypass_start(struct request_queue *q);
 void blk_queue_bypass_start(struct request_queue *q);

+ 15 - 2
block/cfq-iosched.c

@@ -38,9 +38,13 @@ static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */
 static const int cfq_hist_divisor = 4;
 static const int cfq_hist_divisor = 4;
 
 
 /*
 /*
- * offset from end of service tree
+ * offset from end of queue service tree for idle class
  */
  */
 #define CFQ_IDLE_DELAY		(NSEC_PER_SEC / 5)
 #define CFQ_IDLE_DELAY		(NSEC_PER_SEC / 5)
+/* offset from end of group service tree under time slice mode */
+#define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5)
+/* offset from end of group service under IOPS mode */
+#define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5)
 
 
 /*
 /*
  * below this threshold, we consider thinktime immediate
  * below this threshold, we consider thinktime immediate
@@ -1362,6 +1366,14 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
 	cfqg->vfraction = max_t(unsigned, vfr, 1);
 	cfqg->vfraction = max_t(unsigned, vfr, 1);
 }
 }
 
 
+static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd)
+{
+	if (!iops_mode(cfqd))
+		return CFQ_SLICE_MODE_GROUP_DELAY;
+	else
+		return CFQ_IOPS_MODE_GROUP_DELAY;
+}
+
 static void
 static void
 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
 {
 {
@@ -1381,7 +1393,8 @@ cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
 	n = rb_last(&st->rb);
 	n = rb_last(&st->rb);
 	if (n) {
 	if (n) {
 		__cfqg = rb_entry_cfqg(n);
 		__cfqg = rb_entry_cfqg(n);
-		cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
+		cfqg->vdisktime = __cfqg->vdisktime +
+			cfq_get_cfqg_vdisktime_delay(cfqd);
 	} else
 	} else
 		cfqg->vdisktime = st->min_vdisktime;
 		cfqg->vdisktime = st->min_vdisktime;
 	cfq_group_service_tree_add(st, cfqg);
 	cfq_group_service_tree_add(st, cfqg);

+ 5 - 10
drivers/block/nbd.c

@@ -937,14 +937,6 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
 	return -ENOSPC;
 	return -ENOSPC;
 }
 }
 
 
-/* Reset all properties of an NBD device */
-static void nbd_reset(struct nbd_device *nbd)
-{
-	nbd->config = NULL;
-	nbd->tag_set.timeout = 0;
-	queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
-}
-
 static void nbd_bdev_reset(struct block_device *bdev)
 static void nbd_bdev_reset(struct block_device *bdev)
 {
 {
 	if (bdev->bd_openers > 1)
 	if (bdev->bd_openers > 1)
@@ -1029,7 +1021,11 @@ static void nbd_config_put(struct nbd_device *nbd)
 			}
 			}
 			kfree(config->socks);
 			kfree(config->socks);
 		}
 		}
-		nbd_reset(nbd);
+		kfree(nbd->config);
+		nbd->config = NULL;
+
+		nbd->tag_set.timeout = 0;
+		queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
 
 
 		mutex_unlock(&nbd->config_lock);
 		mutex_unlock(&nbd->config_lock);
 		nbd_put(nbd);
 		nbd_put(nbd);
@@ -1483,7 +1479,6 @@ static int nbd_dev_add(int index)
 	disk->fops = &nbd_fops;
 	disk->fops = &nbd_fops;
 	disk->private_data = nbd;
 	disk->private_data = nbd;
 	sprintf(disk->disk_name, "nbd%d", index);
 	sprintf(disk->disk_name, "nbd%d", index);
-	nbd_reset(nbd);
 	add_disk(disk);
 	add_disk(disk);
 	nbd_total_devices++;
 	nbd_total_devices++;
 	return index;
 	return index;