Browse Source

Merge tag 'for-linus-20180825' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A few small fixes for this merge window:

   - Locking imbalance fix for bcache (Shan Hai)

   - A few small fixes for wbt. One is a cleanup/prep, one is a fix for
     an existing issue, and the last two are fixes for changes that went
     into this merge window (me)"

* tag 'for-linus-20180825' of git://git.kernel.dk/linux-block:
  blk-wbt: don't maintain inflight counts if disabled
  blk-wbt: fix has-sleeper queueing check
  blk-wbt: use wq_has_sleeper() for wq active check
  blk-wbt: move disable check into get_limit()
  bcache: release dc->writeback_lock properly in bch_writeback_thread()
Linus Torvalds 7 years ago
parent
commit
b8dcdab36f
3 changed files with 38 additions and 22 deletions
  1. 18 1
      block/blk-sysfs.c
  2. 17 20
      block/blk-wbt.c
  3. 3 1
      drivers/md/bcache/writeback.c

+ 18 - 1
block/blk-sysfs.c

@@ -453,9 +453,26 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
 	else if (val >= 0)
 		val *= 1000ULL;
 
-	wbt_set_min_lat(q, val);
+	/*
+	 * Ensure that the queue is idled, in case the latency update
+	 * ends up either enabling or disabling wbt completely. We can't
+	 * have IO inflight if that happens.
+	 */
+	if (q->mq_ops) {
+		blk_mq_freeze_queue(q);
+		blk_mq_quiesce_queue(q);
+	} else
+		blk_queue_bypass_start(q);
 
+	wbt_set_min_lat(q, val);
 	wbt_update_limits(q);
+
+	if (q->mq_ops) {
+		blk_mq_unquiesce_queue(q);
+		blk_mq_unfreeze_queue(q);
+	} else
+		blk_queue_bypass_end(q);
+
 	return count;
 }
 

+ 17 - 20
block/blk-wbt.c

@@ -118,7 +118,7 @@ static void rwb_wake_all(struct rq_wb *rwb)
 	for (i = 0; i < WBT_NUM_RWQ; i++) {
 		struct rq_wait *rqw = &rwb->rq_wait[i];
 
-		if (waitqueue_active(&rqw->wait))
+		if (wq_has_sleeper(&rqw->wait))
 			wake_up_all(&rqw->wait);
 	}
 }
@@ -162,7 +162,7 @@ static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
 	if (inflight && inflight >= limit)
 		return;
 
-	if (waitqueue_active(&rqw->wait)) {
+	if (wq_has_sleeper(&rqw->wait)) {
 		int diff = limit - inflight;
 
 		if (!inflight || diff >= rwb->wb_background / 2)
@@ -449,6 +449,13 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
 {
 	unsigned int limit;
 
+	/*
+	 * If we got disabled, just return UINT_MAX. This ensures that
+	 * we'll properly inc a new IO, and dec+wakeup at the end.
+	 */
+	if (!rwb_enabled(rwb))
+		return UINT_MAX;
+
 	if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD)
 		return rwb->wb_background;
 
@@ -485,31 +492,17 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
 {
 	struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
 	DECLARE_WAITQUEUE(wait, current);
+	bool has_sleeper;
 
-	/*
-	* inc it here even if disabled, since we'll dec it at completion.
-	* this only happens if the task was sleeping in __wbt_wait(),
-	* and someone turned it off at the same time.
-	*/
-	if (!rwb_enabled(rwb)) {
-		atomic_inc(&rqw->inflight);
-		return;
-	}
-
-	if (!waitqueue_active(&rqw->wait)
-		&& rq_wait_inc_below(rqw, get_limit(rwb, rw)))
+	has_sleeper = wq_has_sleeper(&rqw->wait);
+	if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
 		return;
 
 	add_wait_queue_exclusive(&rqw->wait, &wait);
 	do {
 		set_current_state(TASK_UNINTERRUPTIBLE);
 
-		if (!rwb_enabled(rwb)) {
-			atomic_inc(&rqw->inflight);
-			break;
-		}
-
-		if (rq_wait_inc_below(rqw, get_limit(rwb, rw)))
+		if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
 			break;
 
 		if (lock) {
@@ -518,6 +511,7 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
 			spin_lock_irq(lock);
 		} else
 			io_schedule();
+		has_sleeper = false;
 	} while (1);
 
 	__set_current_state(TASK_RUNNING);
@@ -546,6 +540,9 @@ static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
 {
 	enum wbt_flags flags = 0;
 
+	if (!rwb_enabled(rwb))
+		return 0;
+
 	if (bio_op(bio) == REQ_OP_READ) {
 		flags = WBT_READ;
 	} else if (wbt_should_throttle(rwb, bio)) {

+ 3 - 1
drivers/md/bcache/writeback.c

@@ -685,8 +685,10 @@ static int bch_writeback_thread(void *arg)
 			 * data on cache. BCACHE_DEV_DETACHING flag is set in
 			 * bch_cached_dev_detach().
 			 */
-			if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
+			if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
+				up_write(&dc->writeback_lock);
 				break;
+			}
 		}
 
 		up_write(&dc->writeback_lock);