blk-mq.h 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255
  1. #ifndef BLK_MQ_H
  2. #define BLK_MQ_H
  3. #include <linux/blkdev.h>
  4. struct blk_mq_tags;
  5. struct blk_flush_queue;
  6. struct blk_mq_cpu_notifier {
  7. struct list_head list;
  8. void *data;
  9. int (*notify)(void *data, unsigned long action, unsigned int cpu);
  10. };
  11. struct blk_mq_ctxmap {
  12. unsigned int map_size;
  13. unsigned int bits_per_word;
  14. struct blk_align_bitmap *map;
  15. };
  16. struct blk_mq_hw_ctx {
  17. struct {
  18. spinlock_t lock;
  19. struct list_head dispatch;
  20. } ____cacheline_aligned_in_smp;
  21. unsigned long state; /* BLK_MQ_S_* flags */
  22. struct delayed_work run_work;
  23. struct delayed_work delay_work;
  24. cpumask_var_t cpumask;
  25. int next_cpu;
  26. int next_cpu_batch;
  27. unsigned long flags; /* BLK_MQ_F_* flags */
  28. struct request_queue *queue;
  29. struct blk_flush_queue *fq;
  30. void *driver_data;
  31. struct blk_mq_ctxmap ctx_map;
  32. unsigned int nr_ctx;
  33. struct blk_mq_ctx **ctxs;
  34. atomic_t wait_index;
  35. struct blk_mq_tags *tags;
  36. unsigned long queued;
  37. unsigned long run;
  38. #define BLK_MQ_MAX_DISPATCH_ORDER 10
  39. unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
  40. unsigned int numa_node;
  41. unsigned int queue_num;
  42. atomic_t nr_active;
  43. struct blk_mq_cpu_notifier cpu_notifier;
  44. struct kobject kobj;
  45. };
  46. struct blk_mq_tag_set {
  47. struct blk_mq_ops *ops;
  48. unsigned int nr_hw_queues;
  49. unsigned int queue_depth; /* max hw supported */
  50. unsigned int reserved_tags;
  51. unsigned int cmd_size; /* per-request extra data */
  52. int numa_node;
  53. unsigned int timeout;
  54. unsigned int flags; /* BLK_MQ_F_* */
  55. void *driver_data;
  56. struct blk_mq_tags **tags;
  57. struct mutex tag_list_lock;
  58. struct list_head tag_list;
  59. };
  60. struct blk_mq_queue_data {
  61. struct request *rq;
  62. struct list_head *list;
  63. bool last;
  64. };
  65. typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
  66. typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
  67. typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
  68. typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
  69. typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
  70. typedef int (init_request_fn)(void *, struct request *, unsigned int,
  71. unsigned int, unsigned int);
  72. typedef void (exit_request_fn)(void *, struct request *, unsigned int,
  73. unsigned int);
  74. typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
  75. bool);
  76. struct blk_mq_ops {
  77. /*
  78. * Queue request
  79. */
  80. queue_rq_fn *queue_rq;
  81. /*
  82. * Map to specific hardware queue
  83. */
  84. map_queue_fn *map_queue;
  85. /*
  86. * Called on request timeout
  87. */
  88. timeout_fn *timeout;
  89. softirq_done_fn *complete;
  90. /*
  91. * Called when the block layer side of a hardware queue has been
  92. * set up, allowing the driver to allocate/init matching structures.
  93. * Ditto for exit/teardown.
  94. */
  95. init_hctx_fn *init_hctx;
  96. exit_hctx_fn *exit_hctx;
  97. /*
  98. * Called for every command allocated by the block layer to allow
  99. * the driver to set up driver specific data.
  100. *
  101. * Tag greater than or equal to queue_depth is for setting up
  102. * flush request.
  103. *
  104. * Ditto for exit/teardown.
  105. */
  106. init_request_fn *init_request;
  107. exit_request_fn *exit_request;
  108. };
  109. enum {
  110. BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */
  111. BLK_MQ_RQ_QUEUE_BUSY = 1, /* requeue IO for later */
  112. BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */
  113. BLK_MQ_F_SHOULD_MERGE = 1 << 0,
  114. BLK_MQ_F_TAG_SHARED = 1 << 1,
  115. BLK_MQ_F_SG_MERGE = 1 << 2,
  116. BLK_MQ_F_SYSFS_UP = 1 << 3,
  117. BLK_MQ_F_DEFER_ISSUE = 1 << 4,
  118. BLK_MQ_S_STOPPED = 0,
  119. BLK_MQ_S_TAG_ACTIVE = 1,
  120. BLK_MQ_MAX_DEPTH = 10240,
  121. BLK_MQ_CPU_WORK_BATCH = 8,
  122. };
  123. struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
  124. void blk_mq_finish_init(struct request_queue *q);
  125. int blk_mq_register_disk(struct gendisk *);
  126. void blk_mq_unregister_disk(struct gendisk *);
  127. int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
  128. void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
  129. void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
  130. void blk_mq_insert_request(struct request *, bool, bool, bool);
  131. void blk_mq_run_queues(struct request_queue *q, bool async);
  132. void blk_mq_free_request(struct request *rq);
  133. void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq);
  134. bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
  135. struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
  136. gfp_t gfp, bool reserved);
  137. struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
  138. enum {
  139. BLK_MQ_UNIQUE_TAG_BITS = 16,
  140. BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
  141. };
  142. u32 blk_mq_unique_tag(struct request *rq);
  143. static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
  144. {
  145. return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
  146. }
  147. static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
  148. {
  149. return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
  150. }
  151. struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
  152. struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
  153. int blk_mq_request_started(struct request *rq);
  154. void blk_mq_start_request(struct request *rq);
  155. void blk_mq_end_request(struct request *rq, int error);
  156. void __blk_mq_end_request(struct request *rq, int error);
  157. void blk_mq_requeue_request(struct request *rq);
  158. void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
  159. void blk_mq_cancel_requeue_work(struct request_queue *q);
  160. void blk_mq_kick_requeue_list(struct request_queue *q);
  161. void blk_mq_abort_requeue_list(struct request_queue *q);
  162. void blk_mq_complete_request(struct request *rq);
  163. void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
  164. void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
  165. void blk_mq_stop_hw_queues(struct request_queue *q);
  166. void blk_mq_start_hw_queues(struct request_queue *q);
  167. void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
  168. void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
  169. void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
  170. void *priv);
  171. void blk_mq_unfreeze_queue(struct request_queue *q);
  172. void blk_mq_freeze_queue_start(struct request_queue *q);
  173. /*
  174. * Driver command data is immediately after the request. So subtract request
  175. * size to get back to the original request.
  176. */
  177. static inline struct request *blk_mq_rq_from_pdu(void *pdu)
  178. {
  179. return pdu - sizeof(struct request);
  180. }
  181. static inline void *blk_mq_rq_to_pdu(struct request *rq)
  182. {
  183. return (void *) rq + sizeof(*rq);
  184. }
  185. #define queue_for_each_hw_ctx(q, hctx, i) \
  186. for ((i) = 0; (i) < (q)->nr_hw_queues && \
  187. ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
  188. #define queue_for_each_ctx(q, ctx, i) \
  189. for ((i) = 0; (i) < (q)->nr_queues && \
  190. ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++)
  191. #define hctx_for_each_ctx(hctx, ctx, i) \
  192. for ((i) = 0; (i) < (hctx)->nr_ctx && \
  193. ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
  194. #define blk_ctx_sum(q, sum) \
  195. ({ \
  196. struct blk_mq_ctx *__x; \
  197. unsigned int __ret = 0, __i; \
  198. \
  199. queue_for_each_ctx((q), __x, __i) \
  200. __ret += sum; \
  201. __ret; \
  202. })
  203. #endif