blk-stat.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221
  1. /*
  2. * Block stat tracking code
  3. *
  4. * Copyright (C) 2016 Jens Axboe
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/rculist.h>
  8. #include <linux/blk-mq.h>
  9. #include "blk-stat.h"
  10. #include "blk-mq.h"
  11. #include "blk.h"
  12. struct blk_queue_stats {
  13. struct list_head callbacks;
  14. spinlock_t lock;
  15. bool enable_accounting;
  16. };
  17. static void blk_stat_init(struct blk_rq_stat *stat)
  18. {
  19. stat->min = -1ULL;
  20. stat->max = stat->nr_samples = stat->mean = 0;
  21. stat->batch = 0;
  22. }
  23. /* src is a per-cpu stat, mean isn't initialized */
  24. static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
  25. {
  26. if (!src->nr_samples)
  27. return;
  28. dst->min = min(dst->min, src->min);
  29. dst->max = max(dst->max, src->max);
  30. dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
  31. dst->nr_samples + src->nr_samples);
  32. dst->nr_samples += src->nr_samples;
  33. }
  34. static void __blk_stat_add(struct blk_rq_stat *stat, u64 value)
  35. {
  36. stat->min = min(stat->min, value);
  37. stat->max = max(stat->max, value);
  38. stat->batch += value;
  39. stat->nr_samples++;
  40. }
  41. void blk_stat_add(struct request *rq)
  42. {
  43. struct request_queue *q = rq->q;
  44. struct blk_stat_callback *cb;
  45. struct blk_rq_stat *stat;
  46. int bucket;
  47. u64 now, value;
  48. now = __blk_stat_time(ktime_to_ns(ktime_get()));
  49. if (now < blk_stat_time(&rq->issue_stat))
  50. return;
  51. value = now - blk_stat_time(&rq->issue_stat);
  52. blk_throtl_stat_add(rq, value);
  53. rcu_read_lock();
  54. list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
  55. if (!blk_stat_is_active(cb))
  56. continue;
  57. bucket = cb->bucket_fn(rq);
  58. if (bucket < 0)
  59. continue;
  60. stat = &get_cpu_ptr(cb->cpu_stat)[bucket];
  61. __blk_stat_add(stat, value);
  62. put_cpu_ptr(cb->cpu_stat);
  63. }
  64. rcu_read_unlock();
  65. }
  66. static void blk_stat_timer_fn(struct timer_list *t)
  67. {
  68. struct blk_stat_callback *cb = from_timer(cb, t, timer);
  69. unsigned int bucket;
  70. int cpu;
  71. for (bucket = 0; bucket < cb->buckets; bucket++)
  72. blk_stat_init(&cb->stat[bucket]);
  73. for_each_online_cpu(cpu) {
  74. struct blk_rq_stat *cpu_stat;
  75. cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
  76. for (bucket = 0; bucket < cb->buckets; bucket++) {
  77. blk_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
  78. blk_stat_init(&cpu_stat[bucket]);
  79. }
  80. }
  81. cb->timer_fn(cb);
  82. }
  83. struct blk_stat_callback *
  84. blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
  85. int (*bucket_fn)(const struct request *),
  86. unsigned int buckets, void *data)
  87. {
  88. struct blk_stat_callback *cb;
  89. cb = kmalloc(sizeof(*cb), GFP_KERNEL);
  90. if (!cb)
  91. return NULL;
  92. cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
  93. GFP_KERNEL);
  94. if (!cb->stat) {
  95. kfree(cb);
  96. return NULL;
  97. }
  98. cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
  99. __alignof__(struct blk_rq_stat));
  100. if (!cb->cpu_stat) {
  101. kfree(cb->stat);
  102. kfree(cb);
  103. return NULL;
  104. }
  105. cb->timer_fn = timer_fn;
  106. cb->bucket_fn = bucket_fn;
  107. cb->data = data;
  108. cb->buckets = buckets;
  109. timer_setup(&cb->timer, blk_stat_timer_fn, 0);
  110. return cb;
  111. }
  112. EXPORT_SYMBOL_GPL(blk_stat_alloc_callback);
  113. void blk_stat_add_callback(struct request_queue *q,
  114. struct blk_stat_callback *cb)
  115. {
  116. unsigned int bucket;
  117. int cpu;
  118. for_each_possible_cpu(cpu) {
  119. struct blk_rq_stat *cpu_stat;
  120. cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
  121. for (bucket = 0; bucket < cb->buckets; bucket++)
  122. blk_stat_init(&cpu_stat[bucket]);
  123. }
  124. spin_lock(&q->stats->lock);
  125. list_add_tail_rcu(&cb->list, &q->stats->callbacks);
  126. blk_queue_flag_set(QUEUE_FLAG_STATS, q);
  127. spin_unlock(&q->stats->lock);
  128. }
  129. EXPORT_SYMBOL_GPL(blk_stat_add_callback);
  130. void blk_stat_remove_callback(struct request_queue *q,
  131. struct blk_stat_callback *cb)
  132. {
  133. spin_lock(&q->stats->lock);
  134. list_del_rcu(&cb->list);
  135. if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
  136. blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
  137. spin_unlock(&q->stats->lock);
  138. del_timer_sync(&cb->timer);
  139. }
  140. EXPORT_SYMBOL_GPL(blk_stat_remove_callback);
  141. static void blk_stat_free_callback_rcu(struct rcu_head *head)
  142. {
  143. struct blk_stat_callback *cb;
  144. cb = container_of(head, struct blk_stat_callback, rcu);
  145. free_percpu(cb->cpu_stat);
  146. kfree(cb->stat);
  147. kfree(cb);
  148. }
  149. void blk_stat_free_callback(struct blk_stat_callback *cb)
  150. {
  151. if (cb)
  152. call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
  153. }
  154. EXPORT_SYMBOL_GPL(blk_stat_free_callback);
  155. void blk_stat_enable_accounting(struct request_queue *q)
  156. {
  157. spin_lock(&q->stats->lock);
  158. q->stats->enable_accounting = true;
  159. blk_queue_flag_set(QUEUE_FLAG_STATS, q);
  160. spin_unlock(&q->stats->lock);
  161. }
  162. struct blk_queue_stats *blk_alloc_queue_stats(void)
  163. {
  164. struct blk_queue_stats *stats;
  165. stats = kmalloc(sizeof(*stats), GFP_KERNEL);
  166. if (!stats)
  167. return NULL;
  168. INIT_LIST_HEAD(&stats->callbacks);
  169. spin_lock_init(&stats->lock);
  170. stats->enable_accounting = false;
  171. return stats;
  172. }
  173. void blk_free_queue_stats(struct blk_queue_stats *stats)
  174. {
  175. if (!stats)
  176. return;
  177. WARN_ON(!list_empty(&stats->callbacks));
  178. kfree(stats);
  179. }