|
@@ -405,6 +405,8 @@ struct request_queue {
|
|
|
struct blk_mq_ctx __percpu *queue_ctx;
|
|
|
unsigned int nr_queues;
|
|
|
|
|
|
+ unsigned int queue_depth;
|
|
|
+
|
|
|
/* hw dispatch queues */
|
|
|
struct blk_mq_hw_ctx **queue_hw_ctx;
|
|
|
unsigned int nr_hw_queues;
|
|
@@ -777,6 +779,14 @@ static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
+static inline unsigned int blk_queue_depth(struct request_queue *q)
|
|
|
+{
|
|
|
+ if (q->queue_depth)
|
|
|
+ return q->queue_depth;
|
|
|
+
|
|
|
+ return q->nr_requests;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* q->prep_rq_fn return values
|
|
|
*/
|
|
@@ -1094,6 +1104,7 @@ extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
|
|
|
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
|
|
|
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
|
|
|
extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
|
|
|
+extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
|
|
|
extern void blk_set_default_limits(struct queue_limits *lim);
|
|
|
extern void blk_set_stacking_limits(struct queue_limits *lim);
|
|
|
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|