|
|
@@ -888,6 +888,19 @@ static void blk_rq_timed_out_timer(struct timer_list *t)
|
|
|
kblockd_schedule_work(&q->timeout_work);
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * blk_alloc_queue_node - allocate a request queue
|
|
|
+ * @gfp_mask: memory allocation flags
|
|
|
+ * @node_id: NUMA node to allocate memory from
|
|
|
+ * @lock: For legacy queues, pointer to a spinlock that will be used to e.g.
|
|
|
+ * serialize calls to the legacy .request_fn() callback. Ignored for
|
|
|
+ * blk-mq request queues.
|
|
|
+ *
|
|
|
+ * Note: pass the queue lock as the third argument to this function instead of
|
|
|
+ * setting the queue lock pointer explicitly to avoid triggering a sporadic
|
|
|
+ * crash in the blkcg code. This function namely calls blkcg_init_queue() and
|
|
|
+ * the queue lock pointer must be set before blkcg_init_queue() is called.
|
|
|
+ */
|
|
|
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
|
|
|
spinlock_t *lock)
|
|
|
{
|
|
|
@@ -940,11 +953,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
|
|
|
mutex_init(&q->sysfs_lock);
|
|
|
spin_lock_init(&q->__queue_lock);
|
|
|
|
|
|
- /*
|
|
|
- * By default initialize queue_lock to internal lock and driver can
|
|
|
- * override it later if need be.
|
|
|
- */
|
|
|
- q->queue_lock = &q->__queue_lock;
|
|
|
+ if (!q->mq_ops)
|
|
|
+ q->queue_lock = lock ? : &q->__queue_lock;
|
|
|
|
|
|
/*
|
|
|
* A queue starts its life with bypass turned on to avoid
|
|
|
@@ -1031,13 +1041,11 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
|
|
|
{
|
|
|
struct request_queue *q;
|
|
|
|
|
|
- q = blk_alloc_queue_node(GFP_KERNEL, node_id, NULL);
|
|
|
+ q = blk_alloc_queue_node(GFP_KERNEL, node_id, lock);
|
|
|
if (!q)
|
|
|
return NULL;
|
|
|
|
|
|
q->request_fn = rfn;
|
|
|
- if (lock)
|
|
|
- q->queue_lock = lock;
|
|
|
if (blk_init_allocated_queue(q) < 0) {
|
|
|
blk_cleanup_queue(q);
|
|
|
return NULL;
|