|
@@ -63,6 +63,28 @@ struct kmem_cache *blk_requestq_cachep;
|
|
|
*/
|
|
|
static struct workqueue_struct *kblockd_workqueue;
|
|
|
|
|
|
+static void blk_clear_congested(struct request_list *rl, int sync)
|
|
|
+{
|
|
|
+ if (rl != &rl->q->root_rl)
|
|
|
+ return;
|
|
|
+#ifdef CONFIG_CGROUP_WRITEBACK
|
|
|
+ clear_wb_congested(rl->blkg->wb_congested, sync);
|
|
|
+#else
|
|
|
+ clear_wb_congested(rl->q->backing_dev_info.wb.congested, sync);
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
+static void blk_set_congested(struct request_list *rl, int sync)
|
|
|
+{
|
|
|
+ if (rl != &rl->q->root_rl)
|
|
|
+ return;
|
|
|
+#ifdef CONFIG_CGROUP_WRITEBACK
|
|
|
+ set_wb_congested(rl->blkg->wb_congested, sync);
|
|
|
+#else
|
|
|
+ set_wb_congested(rl->q->backing_dev_info.wb.congested, sync);
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
void blk_queue_congestion_threshold(struct request_queue *q)
|
|
|
{
|
|
|
int nr;
|
|
@@ -842,13 +864,8 @@ static void __freed_request(struct request_list *rl, int sync)
|
|
|
{
|
|
|
struct request_queue *q = rl->q;
|
|
|
|
|
|
- /*
|
|
|
- * bdi isn't aware of blkcg yet. As all async IOs end up root
|
|
|
- * blkcg anyway, just use root blkcg state.
|
|
|
- */
|
|
|
- if (rl == &q->root_rl &&
|
|
|
- rl->count[sync] < queue_congestion_off_threshold(q))
|
|
|
- blk_clear_queue_congested(q, sync);
|
|
|
+ if (rl->count[sync] < queue_congestion_off_threshold(q))
|
|
|
+ blk_clear_congested(rl, sync);
|
|
|
|
|
|
if (rl->count[sync] + 1 <= q->nr_requests) {
|
|
|
if (waitqueue_active(&rl->wait[sync]))
|
|
@@ -881,25 +898,25 @@ static void freed_request(struct request_list *rl, unsigned int flags)
|
|
|
int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
|
|
|
{
|
|
|
struct request_list *rl;
|
|
|
+ int on_thresh, off_thresh;
|
|
|
|
|
|
spin_lock_irq(q->queue_lock);
|
|
|
q->nr_requests = nr;
|
|
|
blk_queue_congestion_threshold(q);
|
|
|
+ on_thresh = queue_congestion_on_threshold(q);
|
|
|
+ off_thresh = queue_congestion_off_threshold(q);
|
|
|
|
|
|
- /* congestion isn't cgroup aware and follows root blkcg for now */
|
|
|
- rl = &q->root_rl;
|
|
|
-
|
|
|
- if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
|
|
|
- blk_set_queue_congested(q, BLK_RW_SYNC);
|
|
|
- else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
|
|
|
- blk_clear_queue_congested(q, BLK_RW_SYNC);
|
|
|
+ blk_queue_for_each_rl(rl, q) {
|
|
|
+ if (rl->count[BLK_RW_SYNC] >= on_thresh)
|
|
|
+ blk_set_congested(rl, BLK_RW_SYNC);
|
|
|
+ else if (rl->count[BLK_RW_SYNC] < off_thresh)
|
|
|
+ blk_clear_congested(rl, BLK_RW_SYNC);
|
|
|
|
|
|
- if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
|
|
|
- blk_set_queue_congested(q, BLK_RW_ASYNC);
|
|
|
- else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
|
|
|
- blk_clear_queue_congested(q, BLK_RW_ASYNC);
|
|
|
+ if (rl->count[BLK_RW_ASYNC] >= on_thresh)
|
|
|
+ blk_set_congested(rl, BLK_RW_ASYNC);
|
|
|
+ else if (rl->count[BLK_RW_ASYNC] < off_thresh)
|
|
|
+ blk_clear_congested(rl, BLK_RW_ASYNC);
|
|
|
|
|
|
- blk_queue_for_each_rl(rl, q) {
|
|
|
if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
|
|
|
blk_set_rl_full(rl, BLK_RW_SYNC);
|
|
|
} else {
|
|
@@ -1009,12 +1026,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
- /*
|
|
|
- * bdi isn't aware of blkcg yet. As all async IOs end up
|
|
|
- * root blkcg anyway, just use root blkcg state.
|
|
|
- */
|
|
|
- if (rl == &q->root_rl)
|
|
|
- blk_set_queue_congested(q, is_sync);
|
|
|
+ blk_set_congested(rl, is_sync);
|
|
|
}
|
|
|
|
|
|
/*
|