blk-mq-debugfs.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008
  1. /*
  2. * Copyright (C) 2017 Facebook
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program. If not, see <https://www.gnu.org/licenses/>.
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/blkdev.h>
  18. #include <linux/debugfs.h>
  19. #include <linux/blk-mq.h>
  20. #include "blk.h"
  21. #include "blk-mq.h"
  22. #include "blk-mq-debugfs.h"
  23. #include "blk-mq-tag.h"
  24. static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
  25. {
  26. if (stat->nr_samples) {
  27. seq_printf(m, "samples=%d, mean=%lld, min=%llu, max=%llu",
  28. stat->nr_samples, stat->mean, stat->min, stat->max);
  29. } else {
  30. seq_puts(m, "samples=0");
  31. }
  32. }
  33. static int queue_poll_stat_show(void *data, struct seq_file *m)
  34. {
  35. struct request_queue *q = data;
  36. int bucket;
  37. for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS/2; bucket++) {
  38. seq_printf(m, "read (%d Bytes): ", 1 << (9+bucket));
  39. print_stat(m, &q->poll_stat[2*bucket]);
  40. seq_puts(m, "\n");
  41. seq_printf(m, "write (%d Bytes): ", 1 << (9+bucket));
  42. print_stat(m, &q->poll_stat[2*bucket+1]);
  43. seq_puts(m, "\n");
  44. }
  45. return 0;
  46. }
  47. static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
  48. __acquires(&q->requeue_lock)
  49. {
  50. struct request_queue *q = m->private;
  51. spin_lock_irq(&q->requeue_lock);
  52. return seq_list_start(&q->requeue_list, *pos);
  53. }
  54. static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
  55. {
  56. struct request_queue *q = m->private;
  57. return seq_list_next(v, &q->requeue_list, pos);
  58. }
  59. static void queue_requeue_list_stop(struct seq_file *m, void *v)
  60. __releases(&q->requeue_lock)
  61. {
  62. struct request_queue *q = m->private;
  63. spin_unlock_irq(&q->requeue_lock);
  64. }
  65. static const struct seq_operations queue_requeue_list_seq_ops = {
  66. .start = queue_requeue_list_start,
  67. .next = queue_requeue_list_next,
  68. .stop = queue_requeue_list_stop,
  69. .show = blk_mq_debugfs_rq_show,
  70. };
  71. static int blk_flags_show(struct seq_file *m, const unsigned long flags,
  72. const char *const *flag_name, int flag_name_count)
  73. {
  74. bool sep = false;
  75. int i;
  76. for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
  77. if (!(flags & BIT(i)))
  78. continue;
  79. if (sep)
  80. seq_puts(m, "|");
  81. sep = true;
  82. if (i < flag_name_count && flag_name[i])
  83. seq_puts(m, flag_name[i]);
  84. else
  85. seq_printf(m, "%d", i);
  86. }
  87. return 0;
  88. }
  89. static int queue_pm_only_show(void *data, struct seq_file *m)
  90. {
  91. struct request_queue *q = data;
  92. seq_printf(m, "%d\n", atomic_read(&q->pm_only));
  93. return 0;
  94. }
  95. #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
  96. static const char *const blk_queue_flag_name[] = {
  97. QUEUE_FLAG_NAME(QUEUED),
  98. QUEUE_FLAG_NAME(STOPPED),
  99. QUEUE_FLAG_NAME(DYING),
  100. QUEUE_FLAG_NAME(BYPASS),
  101. QUEUE_FLAG_NAME(BIDI),
  102. QUEUE_FLAG_NAME(NOMERGES),
  103. QUEUE_FLAG_NAME(SAME_COMP),
  104. QUEUE_FLAG_NAME(FAIL_IO),
  105. QUEUE_FLAG_NAME(NONROT),
  106. QUEUE_FLAG_NAME(IO_STAT),
  107. QUEUE_FLAG_NAME(DISCARD),
  108. QUEUE_FLAG_NAME(NOXMERGES),
  109. QUEUE_FLAG_NAME(ADD_RANDOM),
  110. QUEUE_FLAG_NAME(SECERASE),
  111. QUEUE_FLAG_NAME(SAME_FORCE),
  112. QUEUE_FLAG_NAME(DEAD),
  113. QUEUE_FLAG_NAME(INIT_DONE),
  114. QUEUE_FLAG_NAME(NO_SG_MERGE),
  115. QUEUE_FLAG_NAME(POLL),
  116. QUEUE_FLAG_NAME(WC),
  117. QUEUE_FLAG_NAME(FUA),
  118. QUEUE_FLAG_NAME(FLUSH_NQ),
  119. QUEUE_FLAG_NAME(DAX),
  120. QUEUE_FLAG_NAME(STATS),
  121. QUEUE_FLAG_NAME(POLL_STATS),
  122. QUEUE_FLAG_NAME(REGISTERED),
  123. QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
  124. QUEUE_FLAG_NAME(QUIESCED),
  125. };
  126. #undef QUEUE_FLAG_NAME
  127. static int queue_state_show(void *data, struct seq_file *m)
  128. {
  129. struct request_queue *q = data;
  130. blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
  131. ARRAY_SIZE(blk_queue_flag_name));
  132. seq_puts(m, "\n");
  133. return 0;
  134. }
  135. static ssize_t queue_state_write(void *data, const char __user *buf,
  136. size_t count, loff_t *ppos)
  137. {
  138. struct request_queue *q = data;
  139. char opbuf[16] = { }, *op;
  140. /*
  141. * The "state" attribute is removed after blk_cleanup_queue() has called
  142. * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
  143. * triggering a use-after-free.
  144. */
  145. if (blk_queue_dead(q))
  146. return -ENOENT;
  147. if (count >= sizeof(opbuf)) {
  148. pr_err("%s: operation too long\n", __func__);
  149. goto inval;
  150. }
  151. if (copy_from_user(opbuf, buf, count))
  152. return -EFAULT;
  153. op = strstrip(opbuf);
  154. if (strcmp(op, "run") == 0) {
  155. blk_mq_run_hw_queues(q, true);
  156. } else if (strcmp(op, "start") == 0) {
  157. blk_mq_start_stopped_hw_queues(q, true);
  158. } else if (strcmp(op, "kick") == 0) {
  159. blk_mq_kick_requeue_list(q);
  160. } else {
  161. pr_err("%s: unsupported operation '%s'\n", __func__, op);
  162. inval:
  163. pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
  164. return -EINVAL;
  165. }
  166. return count;
  167. }
  168. static int queue_write_hint_show(void *data, struct seq_file *m)
  169. {
  170. struct request_queue *q = data;
  171. int i;
  172. for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
  173. seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]);
  174. return 0;
  175. }
  176. static ssize_t queue_write_hint_store(void *data, const char __user *buf,
  177. size_t count, loff_t *ppos)
  178. {
  179. struct request_queue *q = data;
  180. int i;
  181. for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
  182. q->write_hints[i] = 0;
  183. return count;
  184. }
  185. static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
  186. { "poll_stat", 0400, queue_poll_stat_show },
  187. { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
  188. { "pm_only", 0600, queue_pm_only_show, NULL },
  189. { "state", 0600, queue_state_show, queue_state_write },
  190. { "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
  191. { "zone_wlock", 0400, queue_zone_wlock_show, NULL },
  192. { },
  193. };
  194. #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
  195. static const char *const hctx_state_name[] = {
  196. HCTX_STATE_NAME(STOPPED),
  197. HCTX_STATE_NAME(TAG_ACTIVE),
  198. HCTX_STATE_NAME(SCHED_RESTART),
  199. };
  200. #undef HCTX_STATE_NAME
  201. static int hctx_state_show(void *data, struct seq_file *m)
  202. {
  203. struct blk_mq_hw_ctx *hctx = data;
  204. blk_flags_show(m, hctx->state, hctx_state_name,
  205. ARRAY_SIZE(hctx_state_name));
  206. seq_puts(m, "\n");
  207. return 0;
  208. }
  209. #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
  210. static const char *const alloc_policy_name[] = {
  211. BLK_TAG_ALLOC_NAME(FIFO),
  212. BLK_TAG_ALLOC_NAME(RR),
  213. };
  214. #undef BLK_TAG_ALLOC_NAME
  215. #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
  216. static const char *const hctx_flag_name[] = {
  217. HCTX_FLAG_NAME(SHOULD_MERGE),
  218. HCTX_FLAG_NAME(TAG_SHARED),
  219. HCTX_FLAG_NAME(SG_MERGE),
  220. HCTX_FLAG_NAME(BLOCKING),
  221. HCTX_FLAG_NAME(NO_SCHED),
  222. };
  223. #undef HCTX_FLAG_NAME
  224. static int hctx_flags_show(void *data, struct seq_file *m)
  225. {
  226. struct blk_mq_hw_ctx *hctx = data;
  227. const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
  228. seq_puts(m, "alloc_policy=");
  229. if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
  230. alloc_policy_name[alloc_policy])
  231. seq_puts(m, alloc_policy_name[alloc_policy]);
  232. else
  233. seq_printf(m, "%d", alloc_policy);
  234. seq_puts(m, " ");
  235. blk_flags_show(m,
  236. hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
  237. hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
  238. seq_puts(m, "\n");
  239. return 0;
  240. }
  241. #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
  242. static const char *const op_name[] = {
  243. REQ_OP_NAME(READ),
  244. REQ_OP_NAME(WRITE),
  245. REQ_OP_NAME(FLUSH),
  246. REQ_OP_NAME(DISCARD),
  247. REQ_OP_NAME(SECURE_ERASE),
  248. REQ_OP_NAME(ZONE_RESET),
  249. REQ_OP_NAME(WRITE_SAME),
  250. REQ_OP_NAME(WRITE_ZEROES),
  251. REQ_OP_NAME(SCSI_IN),
  252. REQ_OP_NAME(SCSI_OUT),
  253. REQ_OP_NAME(DRV_IN),
  254. REQ_OP_NAME(DRV_OUT),
  255. };
  256. #undef REQ_OP_NAME
  257. #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
  258. static const char *const cmd_flag_name[] = {
  259. CMD_FLAG_NAME(FAILFAST_DEV),
  260. CMD_FLAG_NAME(FAILFAST_TRANSPORT),
  261. CMD_FLAG_NAME(FAILFAST_DRIVER),
  262. CMD_FLAG_NAME(SYNC),
  263. CMD_FLAG_NAME(META),
  264. CMD_FLAG_NAME(PRIO),
  265. CMD_FLAG_NAME(NOMERGE),
  266. CMD_FLAG_NAME(IDLE),
  267. CMD_FLAG_NAME(INTEGRITY),
  268. CMD_FLAG_NAME(FUA),
  269. CMD_FLAG_NAME(PREFLUSH),
  270. CMD_FLAG_NAME(RAHEAD),
  271. CMD_FLAG_NAME(BACKGROUND),
  272. CMD_FLAG_NAME(NOUNMAP),
  273. CMD_FLAG_NAME(NOWAIT),
  274. };
  275. #undef CMD_FLAG_NAME
  276. #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
  277. static const char *const rqf_name[] = {
  278. RQF_NAME(SORTED),
  279. RQF_NAME(STARTED),
  280. RQF_NAME(QUEUED),
  281. RQF_NAME(SOFTBARRIER),
  282. RQF_NAME(FLUSH_SEQ),
  283. RQF_NAME(MIXED_MERGE),
  284. RQF_NAME(MQ_INFLIGHT),
  285. RQF_NAME(DONTPREP),
  286. RQF_NAME(PREEMPT),
  287. RQF_NAME(COPY_USER),
  288. RQF_NAME(FAILED),
  289. RQF_NAME(QUIET),
  290. RQF_NAME(ELVPRIV),
  291. RQF_NAME(IO_STAT),
  292. RQF_NAME(ALLOCED),
  293. RQF_NAME(PM),
  294. RQF_NAME(HASHED),
  295. RQF_NAME(STATS),
  296. RQF_NAME(SPECIAL_PAYLOAD),
  297. RQF_NAME(ZONE_WRITE_LOCKED),
  298. RQF_NAME(MQ_POLL_SLEPT),
  299. };
  300. #undef RQF_NAME
  301. static const char *const blk_mq_rq_state_name_array[] = {
  302. [MQ_RQ_IDLE] = "idle",
  303. [MQ_RQ_IN_FLIGHT] = "in_flight",
  304. [MQ_RQ_COMPLETE] = "complete",
  305. };
  306. static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
  307. {
  308. if (WARN_ON_ONCE((unsigned int)rq_state >=
  309. ARRAY_SIZE(blk_mq_rq_state_name_array)))
  310. return "(?)";
  311. return blk_mq_rq_state_name_array[rq_state];
  312. }
  313. int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
  314. {
  315. const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
  316. const unsigned int op = rq->cmd_flags & REQ_OP_MASK;
  317. seq_printf(m, "%p {.op=", rq);
  318. if (op < ARRAY_SIZE(op_name) && op_name[op])
  319. seq_printf(m, "%s", op_name[op]);
  320. else
  321. seq_printf(m, "%d", op);
  322. seq_puts(m, ", .cmd_flags=");
  323. blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
  324. ARRAY_SIZE(cmd_flag_name));
  325. seq_puts(m, ", .rq_flags=");
  326. blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
  327. ARRAY_SIZE(rqf_name));
  328. seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
  329. seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
  330. rq->internal_tag);
  331. if (mq_ops->show_rq)
  332. mq_ops->show_rq(m, rq);
  333. seq_puts(m, "}\n");
  334. return 0;
  335. }
  336. EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
  337. int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
  338. {
  339. return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
  340. }
  341. EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
  342. static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
  343. __acquires(&hctx->lock)
  344. {
  345. struct blk_mq_hw_ctx *hctx = m->private;
  346. spin_lock(&hctx->lock);
  347. return seq_list_start(&hctx->dispatch, *pos);
  348. }
  349. static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
  350. {
  351. struct blk_mq_hw_ctx *hctx = m->private;
  352. return seq_list_next(v, &hctx->dispatch, pos);
  353. }
  354. static void hctx_dispatch_stop(struct seq_file *m, void *v)
  355. __releases(&hctx->lock)
  356. {
  357. struct blk_mq_hw_ctx *hctx = m->private;
  358. spin_unlock(&hctx->lock);
  359. }
  360. static const struct seq_operations hctx_dispatch_seq_ops = {
  361. .start = hctx_dispatch_start,
  362. .next = hctx_dispatch_next,
  363. .stop = hctx_dispatch_stop,
  364. .show = blk_mq_debugfs_rq_show,
  365. };
  366. struct show_busy_params {
  367. struct seq_file *m;
  368. struct blk_mq_hw_ctx *hctx;
  369. };
  370. /*
  371. * Note: the state of a request may change while this function is in progress,
  372. * e.g. due to a concurrent blk_mq_finish_request() call.
  373. */
  374. static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
  375. {
  376. const struct show_busy_params *params = data;
  377. if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx)
  378. __blk_mq_debugfs_rq_show(params->m,
  379. list_entry_rq(&rq->queuelist));
  380. }
  381. static int hctx_busy_show(void *data, struct seq_file *m)
  382. {
  383. struct blk_mq_hw_ctx *hctx = data;
  384. struct show_busy_params params = { .m = m, .hctx = hctx };
  385. blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
  386. &params);
  387. return 0;
  388. }
  389. static int hctx_ctx_map_show(void *data, struct seq_file *m)
  390. {
  391. struct blk_mq_hw_ctx *hctx = data;
  392. sbitmap_bitmap_show(&hctx->ctx_map, m);
  393. return 0;
  394. }
  395. static void blk_mq_debugfs_tags_show(struct seq_file *m,
  396. struct blk_mq_tags *tags)
  397. {
  398. seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
  399. seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
  400. seq_printf(m, "active_queues=%d\n",
  401. atomic_read(&tags->active_queues));
  402. seq_puts(m, "\nbitmap_tags:\n");
  403. sbitmap_queue_show(&tags->bitmap_tags, m);
  404. if (tags->nr_reserved_tags) {
  405. seq_puts(m, "\nbreserved_tags:\n");
  406. sbitmap_queue_show(&tags->breserved_tags, m);
  407. }
  408. }
  409. static int hctx_tags_show(void *data, struct seq_file *m)
  410. {
  411. struct blk_mq_hw_ctx *hctx = data;
  412. struct request_queue *q = hctx->queue;
  413. int res;
  414. res = mutex_lock_interruptible(&q->sysfs_lock);
  415. if (res)
  416. goto out;
  417. if (hctx->tags)
  418. blk_mq_debugfs_tags_show(m, hctx->tags);
  419. mutex_unlock(&q->sysfs_lock);
  420. out:
  421. return res;
  422. }
  423. static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
  424. {
  425. struct blk_mq_hw_ctx *hctx = data;
  426. struct request_queue *q = hctx->queue;
  427. int res;
  428. res = mutex_lock_interruptible(&q->sysfs_lock);
  429. if (res)
  430. goto out;
  431. if (hctx->tags)
  432. sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
  433. mutex_unlock(&q->sysfs_lock);
  434. out:
  435. return res;
  436. }
  437. static int hctx_sched_tags_show(void *data, struct seq_file *m)
  438. {
  439. struct blk_mq_hw_ctx *hctx = data;
  440. struct request_queue *q = hctx->queue;
  441. int res;
  442. res = mutex_lock_interruptible(&q->sysfs_lock);
  443. if (res)
  444. goto out;
  445. if (hctx->sched_tags)
  446. blk_mq_debugfs_tags_show(m, hctx->sched_tags);
  447. mutex_unlock(&q->sysfs_lock);
  448. out:
  449. return res;
  450. }
  451. static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
  452. {
  453. struct blk_mq_hw_ctx *hctx = data;
  454. struct request_queue *q = hctx->queue;
  455. int res;
  456. res = mutex_lock_interruptible(&q->sysfs_lock);
  457. if (res)
  458. goto out;
  459. if (hctx->sched_tags)
  460. sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
  461. mutex_unlock(&q->sysfs_lock);
  462. out:
  463. return res;
  464. }
  465. static int hctx_io_poll_show(void *data, struct seq_file *m)
  466. {
  467. struct blk_mq_hw_ctx *hctx = data;
  468. seq_printf(m, "considered=%lu\n", hctx->poll_considered);
  469. seq_printf(m, "invoked=%lu\n", hctx->poll_invoked);
  470. seq_printf(m, "success=%lu\n", hctx->poll_success);
  471. return 0;
  472. }
  473. static ssize_t hctx_io_poll_write(void *data, const char __user *buf,
  474. size_t count, loff_t *ppos)
  475. {
  476. struct blk_mq_hw_ctx *hctx = data;
  477. hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
  478. return count;
  479. }
  480. static int hctx_dispatched_show(void *data, struct seq_file *m)
  481. {
  482. struct blk_mq_hw_ctx *hctx = data;
  483. int i;
  484. seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
  485. for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
  486. unsigned int d = 1U << (i - 1);
  487. seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]);
  488. }
  489. seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]);
  490. return 0;
  491. }
  492. static ssize_t hctx_dispatched_write(void *data, const char __user *buf,
  493. size_t count, loff_t *ppos)
  494. {
  495. struct blk_mq_hw_ctx *hctx = data;
  496. int i;
  497. for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++)
  498. hctx->dispatched[i] = 0;
  499. return count;
  500. }
  501. static int hctx_queued_show(void *data, struct seq_file *m)
  502. {
  503. struct blk_mq_hw_ctx *hctx = data;
  504. seq_printf(m, "%lu\n", hctx->queued);
  505. return 0;
  506. }
  507. static ssize_t hctx_queued_write(void *data, const char __user *buf,
  508. size_t count, loff_t *ppos)
  509. {
  510. struct blk_mq_hw_ctx *hctx = data;
  511. hctx->queued = 0;
  512. return count;
  513. }
  514. static int hctx_run_show(void *data, struct seq_file *m)
  515. {
  516. struct blk_mq_hw_ctx *hctx = data;
  517. seq_printf(m, "%lu\n", hctx->run);
  518. return 0;
  519. }
  520. static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
  521. loff_t *ppos)
  522. {
  523. struct blk_mq_hw_ctx *hctx = data;
  524. hctx->run = 0;
  525. return count;
  526. }
  527. static int hctx_active_show(void *data, struct seq_file *m)
  528. {
  529. struct blk_mq_hw_ctx *hctx = data;
  530. seq_printf(m, "%d\n", atomic_read(&hctx->nr_active));
  531. return 0;
  532. }
  533. static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
  534. {
  535. struct blk_mq_hw_ctx *hctx = data;
  536. seq_printf(m, "%u\n", hctx->dispatch_busy);
  537. return 0;
  538. }
  539. static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos)
  540. __acquires(&ctx->lock)
  541. {
  542. struct blk_mq_ctx *ctx = m->private;
  543. spin_lock(&ctx->lock);
  544. return seq_list_start(&ctx->rq_list, *pos);
  545. }
  546. static void *ctx_rq_list_next(struct seq_file *m, void *v, loff_t *pos)
  547. {
  548. struct blk_mq_ctx *ctx = m->private;
  549. return seq_list_next(v, &ctx->rq_list, pos);
  550. }
  551. static void ctx_rq_list_stop(struct seq_file *m, void *v)
  552. __releases(&ctx->lock)
  553. {
  554. struct blk_mq_ctx *ctx = m->private;
  555. spin_unlock(&ctx->lock);
  556. }
  557. static const struct seq_operations ctx_rq_list_seq_ops = {
  558. .start = ctx_rq_list_start,
  559. .next = ctx_rq_list_next,
  560. .stop = ctx_rq_list_stop,
  561. .show = blk_mq_debugfs_rq_show,
  562. };
  563. static int ctx_dispatched_show(void *data, struct seq_file *m)
  564. {
  565. struct blk_mq_ctx *ctx = data;
  566. seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]);
  567. return 0;
  568. }
  569. static ssize_t ctx_dispatched_write(void *data, const char __user *buf,
  570. size_t count, loff_t *ppos)
  571. {
  572. struct blk_mq_ctx *ctx = data;
  573. ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0;
  574. return count;
  575. }
  576. static int ctx_merged_show(void *data, struct seq_file *m)
  577. {
  578. struct blk_mq_ctx *ctx = data;
  579. seq_printf(m, "%lu\n", ctx->rq_merged);
  580. return 0;
  581. }
  582. static ssize_t ctx_merged_write(void *data, const char __user *buf,
  583. size_t count, loff_t *ppos)
  584. {
  585. struct blk_mq_ctx *ctx = data;
  586. ctx->rq_merged = 0;
  587. return count;
  588. }
  589. static int ctx_completed_show(void *data, struct seq_file *m)
  590. {
  591. struct blk_mq_ctx *ctx = data;
  592. seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]);
  593. return 0;
  594. }
  595. static ssize_t ctx_completed_write(void *data, const char __user *buf,
  596. size_t count, loff_t *ppos)
  597. {
  598. struct blk_mq_ctx *ctx = data;
  599. ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
  600. return count;
  601. }
  602. static int blk_mq_debugfs_show(struct seq_file *m, void *v)
  603. {
  604. const struct blk_mq_debugfs_attr *attr = m->private;
  605. void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
  606. return attr->show(data, m);
  607. }
  608. static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
  609. size_t count, loff_t *ppos)
  610. {
  611. struct seq_file *m = file->private_data;
  612. const struct blk_mq_debugfs_attr *attr = m->private;
  613. void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
  614. /*
  615. * Attributes that only implement .seq_ops are read-only and 'attr' is
  616. * the same with 'data' in this case.
  617. */
  618. if (attr == data || !attr->write)
  619. return -EPERM;
  620. return attr->write(data, buf, count, ppos);
  621. }
  622. static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
  623. {
  624. const struct blk_mq_debugfs_attr *attr = inode->i_private;
  625. void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
  626. struct seq_file *m;
  627. int ret;
  628. if (attr->seq_ops) {
  629. ret = seq_open(file, attr->seq_ops);
  630. if (!ret) {
  631. m = file->private_data;
  632. m->private = data;
  633. }
  634. return ret;
  635. }
  636. if (WARN_ON_ONCE(!attr->show))
  637. return -EPERM;
  638. return single_open(file, blk_mq_debugfs_show, inode->i_private);
  639. }
  640. static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
  641. {
  642. const struct blk_mq_debugfs_attr *attr = inode->i_private;
  643. if (attr->show)
  644. return single_release(inode, file);
  645. else
  646. return seq_release(inode, file);
  647. }
  648. static const struct file_operations blk_mq_debugfs_fops = {
  649. .open = blk_mq_debugfs_open,
  650. .read = seq_read,
  651. .write = blk_mq_debugfs_write,
  652. .llseek = seq_lseek,
  653. .release = blk_mq_debugfs_release,
  654. };
  655. static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
  656. {"state", 0400, hctx_state_show},
  657. {"flags", 0400, hctx_flags_show},
  658. {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
  659. {"busy", 0400, hctx_busy_show},
  660. {"ctx_map", 0400, hctx_ctx_map_show},
  661. {"tags", 0400, hctx_tags_show},
  662. {"tags_bitmap", 0400, hctx_tags_bitmap_show},
  663. {"sched_tags", 0400, hctx_sched_tags_show},
  664. {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
  665. {"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write},
  666. {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write},
  667. {"queued", 0600, hctx_queued_show, hctx_queued_write},
  668. {"run", 0600, hctx_run_show, hctx_run_write},
  669. {"active", 0400, hctx_active_show},
  670. {"dispatch_busy", 0400, hctx_dispatch_busy_show},
  671. {},
  672. };
  673. static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
  674. {"rq_list", 0400, .seq_ops = &ctx_rq_list_seq_ops},
  675. {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
  676. {"merged", 0600, ctx_merged_show, ctx_merged_write},
  677. {"completed", 0600, ctx_completed_show, ctx_completed_write},
  678. {},
  679. };
  680. static bool debugfs_create_files(struct dentry *parent, void *data,
  681. const struct blk_mq_debugfs_attr *attr)
  682. {
  683. d_inode(parent)->i_private = data;
  684. for (; attr->name; attr++) {
  685. if (!debugfs_create_file(attr->name, attr->mode, parent,
  686. (void *)attr, &blk_mq_debugfs_fops))
  687. return false;
  688. }
  689. return true;
  690. }
  691. int blk_mq_debugfs_register(struct request_queue *q)
  692. {
  693. struct blk_mq_hw_ctx *hctx;
  694. int i;
  695. if (!blk_debugfs_root)
  696. return -ENOENT;
  697. q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
  698. blk_debugfs_root);
  699. if (!q->debugfs_dir)
  700. return -ENOMEM;
  701. if (!debugfs_create_files(q->debugfs_dir, q,
  702. blk_mq_debugfs_queue_attrs))
  703. goto err;
  704. /*
  705. * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
  706. * didn't exist yet (because we don't know what to name the directory
  707. * until the queue is registered to a gendisk).
  708. */
  709. if (q->elevator && !q->sched_debugfs_dir)
  710. blk_mq_debugfs_register_sched(q);
  711. /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
  712. queue_for_each_hw_ctx(q, hctx, i) {
  713. if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx))
  714. goto err;
  715. if (q->elevator && !hctx->sched_debugfs_dir &&
  716. blk_mq_debugfs_register_sched_hctx(q, hctx))
  717. goto err;
  718. }
  719. return 0;
  720. err:
  721. blk_mq_debugfs_unregister(q);
  722. return -ENOMEM;
  723. }
  724. void blk_mq_debugfs_unregister(struct request_queue *q)
  725. {
  726. debugfs_remove_recursive(q->debugfs_dir);
  727. q->sched_debugfs_dir = NULL;
  728. q->debugfs_dir = NULL;
  729. }
  730. static int blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
  731. struct blk_mq_ctx *ctx)
  732. {
  733. struct dentry *ctx_dir;
  734. char name[20];
  735. snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
  736. ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
  737. if (!ctx_dir)
  738. return -ENOMEM;
  739. if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs))
  740. return -ENOMEM;
  741. return 0;
  742. }
  743. int blk_mq_debugfs_register_hctx(struct request_queue *q,
  744. struct blk_mq_hw_ctx *hctx)
  745. {
  746. struct blk_mq_ctx *ctx;
  747. char name[20];
  748. int i;
  749. if (!q->debugfs_dir)
  750. return -ENOENT;
  751. snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
  752. hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
  753. if (!hctx->debugfs_dir)
  754. return -ENOMEM;
  755. if (!debugfs_create_files(hctx->debugfs_dir, hctx,
  756. blk_mq_debugfs_hctx_attrs))
  757. goto err;
  758. hctx_for_each_ctx(hctx, ctx, i) {
  759. if (blk_mq_debugfs_register_ctx(hctx, ctx))
  760. goto err;
  761. }
  762. return 0;
  763. err:
  764. blk_mq_debugfs_unregister_hctx(hctx);
  765. return -ENOMEM;
  766. }
  767. void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
  768. {
  769. debugfs_remove_recursive(hctx->debugfs_dir);
  770. hctx->sched_debugfs_dir = NULL;
  771. hctx->debugfs_dir = NULL;
  772. }
  773. int blk_mq_debugfs_register_hctxs(struct request_queue *q)
  774. {
  775. struct blk_mq_hw_ctx *hctx;
  776. int i;
  777. queue_for_each_hw_ctx(q, hctx, i) {
  778. if (blk_mq_debugfs_register_hctx(q, hctx))
  779. return -ENOMEM;
  780. }
  781. return 0;
  782. }
  783. void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
  784. {
  785. struct blk_mq_hw_ctx *hctx;
  786. int i;
  787. queue_for_each_hw_ctx(q, hctx, i)
  788. blk_mq_debugfs_unregister_hctx(hctx);
  789. }
  790. int blk_mq_debugfs_register_sched(struct request_queue *q)
  791. {
  792. struct elevator_type *e = q->elevator->type;
  793. if (!q->debugfs_dir)
  794. return -ENOENT;
  795. if (!e->queue_debugfs_attrs)
  796. return 0;
  797. q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
  798. if (!q->sched_debugfs_dir)
  799. return -ENOMEM;
  800. if (!debugfs_create_files(q->sched_debugfs_dir, q,
  801. e->queue_debugfs_attrs))
  802. goto err;
  803. return 0;
  804. err:
  805. blk_mq_debugfs_unregister_sched(q);
  806. return -ENOMEM;
  807. }
  808. void blk_mq_debugfs_unregister_sched(struct request_queue *q)
  809. {
  810. debugfs_remove_recursive(q->sched_debugfs_dir);
  811. q->sched_debugfs_dir = NULL;
  812. }
  813. int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
  814. struct blk_mq_hw_ctx *hctx)
  815. {
  816. struct elevator_type *e = q->elevator->type;
  817. if (!hctx->debugfs_dir)
  818. return -ENOENT;
  819. if (!e->hctx_debugfs_attrs)
  820. return 0;
  821. hctx->sched_debugfs_dir = debugfs_create_dir("sched",
  822. hctx->debugfs_dir);
  823. if (!hctx->sched_debugfs_dir)
  824. return -ENOMEM;
  825. if (!debugfs_create_files(hctx->sched_debugfs_dir, hctx,
  826. e->hctx_debugfs_attrs))
  827. return -ENOMEM;
  828. return 0;
  829. }
  830. void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
  831. {
  832. debugfs_remove_recursive(hctx->sched_debugfs_dir);
  833. hctx->sched_debugfs_dir = NULL;
  834. }