blk-rq-qos.h 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106
  1. #ifndef RQ_QOS_H
  2. #define RQ_QOS_H
  3. #include <linux/kernel.h>
  4. #include <linux/blkdev.h>
  5. #include <linux/blk_types.h>
  6. #include <linux/atomic.h>
  7. #include <linux/wait.h>
  8. enum rq_qos_id {
  9. RQ_QOS_WBT,
  10. RQ_QOS_CGROUP,
  11. };
  12. struct rq_wait {
  13. wait_queue_head_t wait;
  14. atomic_t inflight;
  15. };
  16. struct rq_qos {
  17. struct rq_qos_ops *ops;
  18. struct request_queue *q;
  19. enum rq_qos_id id;
  20. struct rq_qos *next;
  21. };
  22. struct rq_qos_ops {
  23. enum wbt_flags (*throttle)(struct rq_qos *, struct bio *,
  24. spinlock_t *);
  25. void (*issue)(struct rq_qos *, struct request *);
  26. void (*requeue)(struct rq_qos *, struct request *);
  27. void (*done)(struct rq_qos *, struct request *);
  28. void (*cleanup)(struct rq_qos *, enum wbt_flags);
  29. void (*exit)(struct rq_qos *);
  30. };
  31. struct rq_depth {
  32. unsigned int max_depth;
  33. int scale_step;
  34. bool scaled_max;
  35. unsigned int queue_depth;
  36. unsigned int default_depth;
  37. };
  38. static inline struct rq_qos *rq_qos_id(struct request_queue *q,
  39. enum rq_qos_id id)
  40. {
  41. struct rq_qos *rqos;
  42. for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
  43. if (rqos->id == id)
  44. break;
  45. }
  46. return rqos;
  47. }
  48. static inline struct rq_qos *wbt_rq_qos(struct request_queue *q)
  49. {
  50. return rq_qos_id(q, RQ_QOS_WBT);
  51. }
  52. static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
  53. {
  54. return rq_qos_id(q, RQ_QOS_CGROUP);
  55. }
  56. static inline void rq_wait_init(struct rq_wait *rq_wait)
  57. {
  58. atomic_set(&rq_wait->inflight, 0);
  59. init_waitqueue_head(&rq_wait->wait);
  60. }
  61. static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
  62. {
  63. rqos->next = q->rq_qos;
  64. q->rq_qos = rqos;
  65. }
  66. static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
  67. {
  68. struct rq_qos *cur, *prev = NULL;
  69. for (cur = q->rq_qos; cur; cur = cur->next) {
  70. if (cur == rqos) {
  71. if (prev)
  72. prev->next = rqos->next;
  73. else
  74. q->rq_qos = cur;
  75. break;
  76. }
  77. prev = cur;
  78. }
  79. }
  80. bool rq_wait_inc_below(struct rq_wait *rq_wait, int limit);
  81. void rq_depth_scale_up(struct rq_depth *rqd);
  82. void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
  83. bool rq_depth_calc_max_depth(struct rq_depth *rqd);
  84. void rq_qos_cleanup(struct request_queue *, enum wbt_flags);
  85. void rq_qos_done(struct request_queue *, struct request *);
  86. void rq_qos_issue(struct request_queue *, struct request *);
  87. void rq_qos_requeue(struct request_queue *, struct request *);
  88. enum wbt_flags rq_qos_throttle(struct request_queue *, struct bio *, spinlock_t *);
  89. void rq_qos_exit(struct request_queue *);
  90. #endif