|
@@ -1335,6 +1335,15 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
|
|
|
hctx_unlock(hctx, srcu_idx);
|
|
|
}
|
|
|
|
|
|
+static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
|
|
|
+{
|
|
|
+ int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
|
|
|
+
|
|
|
+ if (cpu >= nr_cpu_ids)
|
|
|
+ cpu = cpumask_first(hctx->cpumask);
|
|
|
+ return cpu;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* It'd be great if the workqueue API had a way to pass
|
|
|
* in a mask and had some smarts for more clever placement.
|
|
@@ -1354,14 +1363,7 @@ select_cpu:
|
|
|
next_cpu = cpumask_next_and(next_cpu, hctx->cpumask,
|
|
|
cpu_online_mask);
|
|
|
if (next_cpu >= nr_cpu_ids)
|
|
|
- next_cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
|
|
|
-
|
|
|
- /*
|
|
|
- * No online CPU is found, so have to make sure hctx->next_cpu
|
|
|
- * is set correctly for not breaking workqueue.
|
|
|
- */
|
|
|
- if (next_cpu >= nr_cpu_ids)
|
|
|
- next_cpu = cpumask_first(hctx->cpumask);
|
|
|
+ next_cpu = blk_mq_first_mapped_cpu(hctx);
|
|
|
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
|
|
|
}
|
|
|
|
|
@@ -2430,10 +2432,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
|
|
|
/*
|
|
|
* Initialize batch roundrobin counts
|
|
|
*/
|
|
|
- hctx->next_cpu = cpumask_first_and(hctx->cpumask,
|
|
|
- cpu_online_mask);
|
|
|
- if (hctx->next_cpu >= nr_cpu_ids)
|
|
|
- hctx->next_cpu = cpumask_first(hctx->cpumask);
|
|
|
+ hctx->next_cpu = blk_mq_first_mapped_cpu(hctx);
|
|
|
hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
|
|
|
}
|
|
|
}
|