blk-mq-debugfs.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010
  1. /*
  2. * Copyright (C) 2017 Facebook
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program. If not, see <https://www.gnu.org/licenses/>.
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/blkdev.h>
  18. #include <linux/debugfs.h>
  19. #include <linux/blk-mq.h>
  20. #include "blk.h"
  21. #include "blk-mq.h"
  22. #include "blk-mq-debugfs.h"
  23. #include "blk-mq-tag.h"
  24. static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
  25. {
  26. if (stat->nr_samples) {
  27. seq_printf(m, "samples=%d, mean=%lld, min=%llu, max=%llu",
  28. stat->nr_samples, stat->mean, stat->min, stat->max);
  29. } else {
  30. seq_puts(m, "samples=0");
  31. }
  32. }
  33. static int queue_poll_stat_show(void *data, struct seq_file *m)
  34. {
  35. struct request_queue *q = data;
  36. int bucket;
  37. for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS/2; bucket++) {
  38. seq_printf(m, "read (%d Bytes): ", 1 << (9+bucket));
  39. print_stat(m, &q->poll_stat[2*bucket]);
  40. seq_puts(m, "\n");
  41. seq_printf(m, "write (%d Bytes): ", 1 << (9+bucket));
  42. print_stat(m, &q->poll_stat[2*bucket+1]);
  43. seq_puts(m, "\n");
  44. }
  45. return 0;
  46. }
  47. static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
  48. __acquires(&q->requeue_lock)
  49. {
  50. struct request_queue *q = m->private;
  51. spin_lock_irq(&q->requeue_lock);
  52. return seq_list_start(&q->requeue_list, *pos);
  53. }
  54. static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
  55. {
  56. struct request_queue *q = m->private;
  57. return seq_list_next(v, &q->requeue_list, pos);
  58. }
  59. static void queue_requeue_list_stop(struct seq_file *m, void *v)
  60. __releases(&q->requeue_lock)
  61. {
  62. struct request_queue *q = m->private;
  63. spin_unlock_irq(&q->requeue_lock);
  64. }
  65. static const struct seq_operations queue_requeue_list_seq_ops = {
  66. .start = queue_requeue_list_start,
  67. .next = queue_requeue_list_next,
  68. .stop = queue_requeue_list_stop,
  69. .show = blk_mq_debugfs_rq_show,
  70. };
  71. static int blk_flags_show(struct seq_file *m, const unsigned long flags,
  72. const char *const *flag_name, int flag_name_count)
  73. {
  74. bool sep = false;
  75. int i;
  76. for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
  77. if (!(flags & BIT(i)))
  78. continue;
  79. if (sep)
  80. seq_puts(m, "|");
  81. sep = true;
  82. if (i < flag_name_count && flag_name[i])
  83. seq_puts(m, flag_name[i]);
  84. else
  85. seq_printf(m, "%d", i);
  86. }
  87. return 0;
  88. }
  89. #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
  90. static const char *const blk_queue_flag_name[] = {
  91. QUEUE_FLAG_NAME(QUEUED),
  92. QUEUE_FLAG_NAME(STOPPED),
  93. QUEUE_FLAG_NAME(DYING),
  94. QUEUE_FLAG_NAME(BYPASS),
  95. QUEUE_FLAG_NAME(BIDI),
  96. QUEUE_FLAG_NAME(NOMERGES),
  97. QUEUE_FLAG_NAME(SAME_COMP),
  98. QUEUE_FLAG_NAME(FAIL_IO),
  99. QUEUE_FLAG_NAME(NONROT),
  100. QUEUE_FLAG_NAME(IO_STAT),
  101. QUEUE_FLAG_NAME(DISCARD),
  102. QUEUE_FLAG_NAME(NOXMERGES),
  103. QUEUE_FLAG_NAME(ADD_RANDOM),
  104. QUEUE_FLAG_NAME(SECERASE),
  105. QUEUE_FLAG_NAME(SAME_FORCE),
  106. QUEUE_FLAG_NAME(DEAD),
  107. QUEUE_FLAG_NAME(INIT_DONE),
  108. QUEUE_FLAG_NAME(NO_SG_MERGE),
  109. QUEUE_FLAG_NAME(POLL),
  110. QUEUE_FLAG_NAME(WC),
  111. QUEUE_FLAG_NAME(FUA),
  112. QUEUE_FLAG_NAME(FLUSH_NQ),
  113. QUEUE_FLAG_NAME(DAX),
  114. QUEUE_FLAG_NAME(STATS),
  115. QUEUE_FLAG_NAME(POLL_STATS),
  116. QUEUE_FLAG_NAME(REGISTERED),
  117. QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
  118. QUEUE_FLAG_NAME(QUIESCED),
  119. QUEUE_FLAG_NAME(PREEMPT_ONLY),
  120. };
  121. #undef QUEUE_FLAG_NAME
  122. static int queue_state_show(void *data, struct seq_file *m)
  123. {
  124. struct request_queue *q = data;
  125. blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
  126. ARRAY_SIZE(blk_queue_flag_name));
  127. seq_puts(m, "\n");
  128. return 0;
  129. }
  130. static ssize_t queue_state_write(void *data, const char __user *buf,
  131. size_t count, loff_t *ppos)
  132. {
  133. struct request_queue *q = data;
  134. char opbuf[16] = { }, *op;
  135. /*
  136. * The "state" attribute is removed after blk_cleanup_queue() has called
  137. * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
  138. * triggering a use-after-free.
  139. */
  140. if (blk_queue_dead(q))
  141. return -ENOENT;
  142. if (count >= sizeof(opbuf)) {
  143. pr_err("%s: operation too long\n", __func__);
  144. goto inval;
  145. }
  146. if (copy_from_user(opbuf, buf, count))
  147. return -EFAULT;
  148. op = strstrip(opbuf);
  149. if (strcmp(op, "run") == 0) {
  150. blk_mq_run_hw_queues(q, true);
  151. } else if (strcmp(op, "start") == 0) {
  152. blk_mq_start_stopped_hw_queues(q, true);
  153. } else if (strcmp(op, "kick") == 0) {
  154. blk_mq_kick_requeue_list(q);
  155. } else {
  156. pr_err("%s: unsupported operation '%s'\n", __func__, op);
  157. inval:
  158. pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
  159. return -EINVAL;
  160. }
  161. return count;
  162. }
  163. static int queue_write_hint_show(void *data, struct seq_file *m)
  164. {
  165. struct request_queue *q = data;
  166. int i;
  167. for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
  168. seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]);
  169. return 0;
  170. }
  171. static ssize_t queue_write_hint_store(void *data, const char __user *buf,
  172. size_t count, loff_t *ppos)
  173. {
  174. struct request_queue *q = data;
  175. int i;
  176. for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
  177. q->write_hints[i] = 0;
  178. return count;
  179. }
  180. static int queue_zone_wlock_show(void *data, struct seq_file *m)
  181. {
  182. struct request_queue *q = data;
  183. unsigned int i;
  184. if (!q->seq_zones_wlock)
  185. return 0;
  186. for (i = 0; i < blk_queue_nr_zones(q); i++)
  187. if (test_bit(i, q->seq_zones_wlock))
  188. seq_printf(m, "%u\n", i);
  189. return 0;
  190. }
  191. static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
  192. { "poll_stat", 0400, queue_poll_stat_show },
  193. { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
  194. { "state", 0600, queue_state_show, queue_state_write },
  195. { "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
  196. { "zone_wlock", 0400, queue_zone_wlock_show, NULL },
  197. { },
  198. };
  199. #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
  200. static const char *const hctx_state_name[] = {
  201. HCTX_STATE_NAME(STOPPED),
  202. HCTX_STATE_NAME(TAG_ACTIVE),
  203. HCTX_STATE_NAME(SCHED_RESTART),
  204. HCTX_STATE_NAME(START_ON_RUN),
  205. };
  206. #undef HCTX_STATE_NAME
  207. static int hctx_state_show(void *data, struct seq_file *m)
  208. {
  209. struct blk_mq_hw_ctx *hctx = data;
  210. blk_flags_show(m, hctx->state, hctx_state_name,
  211. ARRAY_SIZE(hctx_state_name));
  212. seq_puts(m, "\n");
  213. return 0;
  214. }
  215. #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
  216. static const char *const alloc_policy_name[] = {
  217. BLK_TAG_ALLOC_NAME(FIFO),
  218. BLK_TAG_ALLOC_NAME(RR),
  219. };
  220. #undef BLK_TAG_ALLOC_NAME
  221. #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
  222. static const char *const hctx_flag_name[] = {
  223. HCTX_FLAG_NAME(SHOULD_MERGE),
  224. HCTX_FLAG_NAME(TAG_SHARED),
  225. HCTX_FLAG_NAME(SG_MERGE),
  226. HCTX_FLAG_NAME(BLOCKING),
  227. HCTX_FLAG_NAME(NO_SCHED),
  228. };
  229. #undef HCTX_FLAG_NAME
  230. static int hctx_flags_show(void *data, struct seq_file *m)
  231. {
  232. struct blk_mq_hw_ctx *hctx = data;
  233. const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
  234. seq_puts(m, "alloc_policy=");
  235. if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
  236. alloc_policy_name[alloc_policy])
  237. seq_puts(m, alloc_policy_name[alloc_policy]);
  238. else
  239. seq_printf(m, "%d", alloc_policy);
  240. seq_puts(m, " ");
  241. blk_flags_show(m,
  242. hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
  243. hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
  244. seq_puts(m, "\n");
  245. return 0;
  246. }
  247. #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
  248. static const char *const op_name[] = {
  249. REQ_OP_NAME(READ),
  250. REQ_OP_NAME(WRITE),
  251. REQ_OP_NAME(FLUSH),
  252. REQ_OP_NAME(DISCARD),
  253. REQ_OP_NAME(ZONE_REPORT),
  254. REQ_OP_NAME(SECURE_ERASE),
  255. REQ_OP_NAME(ZONE_RESET),
  256. REQ_OP_NAME(WRITE_SAME),
  257. REQ_OP_NAME(WRITE_ZEROES),
  258. REQ_OP_NAME(SCSI_IN),
  259. REQ_OP_NAME(SCSI_OUT),
  260. REQ_OP_NAME(DRV_IN),
  261. REQ_OP_NAME(DRV_OUT),
  262. };
  263. #undef REQ_OP_NAME
  264. #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
  265. static const char *const cmd_flag_name[] = {
  266. CMD_FLAG_NAME(FAILFAST_DEV),
  267. CMD_FLAG_NAME(FAILFAST_TRANSPORT),
  268. CMD_FLAG_NAME(FAILFAST_DRIVER),
  269. CMD_FLAG_NAME(SYNC),
  270. CMD_FLAG_NAME(META),
  271. CMD_FLAG_NAME(PRIO),
  272. CMD_FLAG_NAME(NOMERGE),
  273. CMD_FLAG_NAME(IDLE),
  274. CMD_FLAG_NAME(INTEGRITY),
  275. CMD_FLAG_NAME(FUA),
  276. CMD_FLAG_NAME(PREFLUSH),
  277. CMD_FLAG_NAME(RAHEAD),
  278. CMD_FLAG_NAME(BACKGROUND),
  279. CMD_FLAG_NAME(NOUNMAP),
  280. CMD_FLAG_NAME(NOWAIT),
  281. };
  282. #undef CMD_FLAG_NAME
  283. #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
  284. static const char *const rqf_name[] = {
  285. RQF_NAME(SORTED),
  286. RQF_NAME(STARTED),
  287. RQF_NAME(QUEUED),
  288. RQF_NAME(SOFTBARRIER),
  289. RQF_NAME(FLUSH_SEQ),
  290. RQF_NAME(MIXED_MERGE),
  291. RQF_NAME(MQ_INFLIGHT),
  292. RQF_NAME(DONTPREP),
  293. RQF_NAME(PREEMPT),
  294. RQF_NAME(COPY_USER),
  295. RQF_NAME(FAILED),
  296. RQF_NAME(QUIET),
  297. RQF_NAME(ELVPRIV),
  298. RQF_NAME(IO_STAT),
  299. RQF_NAME(ALLOCED),
  300. RQF_NAME(PM),
  301. RQF_NAME(HASHED),
  302. RQF_NAME(STATS),
  303. RQF_NAME(SPECIAL_PAYLOAD),
  304. RQF_NAME(ZONE_WRITE_LOCKED),
  305. RQF_NAME(MQ_TIMEOUT_EXPIRED),
  306. RQF_NAME(MQ_POLL_SLEPT),
  307. };
  308. #undef RQF_NAME
  309. static const char *const blk_mq_rq_state_name_array[] = {
  310. [MQ_RQ_IDLE] = "idle",
  311. [MQ_RQ_IN_FLIGHT] = "in_flight",
  312. [MQ_RQ_COMPLETE] = "complete",
  313. };
  314. static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
  315. {
  316. if (WARN_ON_ONCE((unsigned int)rq_state >
  317. ARRAY_SIZE(blk_mq_rq_state_name_array)))
  318. return "(?)";
  319. return blk_mq_rq_state_name_array[rq_state];
  320. }
  321. int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
  322. {
  323. const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
  324. const unsigned int op = rq->cmd_flags & REQ_OP_MASK;
  325. seq_printf(m, "%p {.op=", rq);
  326. if (op < ARRAY_SIZE(op_name) && op_name[op])
  327. seq_printf(m, "%s", op_name[op]);
  328. else
  329. seq_printf(m, "%d", op);
  330. seq_puts(m, ", .cmd_flags=");
  331. blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
  332. ARRAY_SIZE(cmd_flag_name));
  333. seq_puts(m, ", .rq_flags=");
  334. blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
  335. ARRAY_SIZE(rqf_name));
  336. seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
  337. seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
  338. rq->internal_tag);
  339. if (mq_ops->show_rq)
  340. mq_ops->show_rq(m, rq);
  341. seq_puts(m, "}\n");
  342. return 0;
  343. }
  344. EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
  345. int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
  346. {
  347. return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
  348. }
  349. EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
  350. static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
  351. __acquires(&hctx->lock)
  352. {
  353. struct blk_mq_hw_ctx *hctx = m->private;
  354. spin_lock(&hctx->lock);
  355. return seq_list_start(&hctx->dispatch, *pos);
  356. }
  357. static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
  358. {
  359. struct blk_mq_hw_ctx *hctx = m->private;
  360. return seq_list_next(v, &hctx->dispatch, pos);
  361. }
  362. static void hctx_dispatch_stop(struct seq_file *m, void *v)
  363. __releases(&hctx->lock)
  364. {
  365. struct blk_mq_hw_ctx *hctx = m->private;
  366. spin_unlock(&hctx->lock);
  367. }
  368. static const struct seq_operations hctx_dispatch_seq_ops = {
  369. .start = hctx_dispatch_start,
  370. .next = hctx_dispatch_next,
  371. .stop = hctx_dispatch_stop,
  372. .show = blk_mq_debugfs_rq_show,
  373. };
  374. struct show_busy_params {
  375. struct seq_file *m;
  376. struct blk_mq_hw_ctx *hctx;
  377. };
  378. /*
  379. * Note: the state of a request may change while this function is in progress,
  380. * e.g. due to a concurrent blk_mq_finish_request() call.
  381. */
  382. static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
  383. {
  384. const struct show_busy_params *params = data;
  385. if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx &&
  386. blk_mq_rq_state(rq) != MQ_RQ_IDLE)
  387. __blk_mq_debugfs_rq_show(params->m,
  388. list_entry_rq(&rq->queuelist));
  389. }
  390. static int hctx_busy_show(void *data, struct seq_file *m)
  391. {
  392. struct blk_mq_hw_ctx *hctx = data;
  393. struct show_busy_params params = { .m = m, .hctx = hctx };
  394. blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
  395. &params);
  396. return 0;
  397. }
  398. static int hctx_ctx_map_show(void *data, struct seq_file *m)
  399. {
  400. struct blk_mq_hw_ctx *hctx = data;
  401. sbitmap_bitmap_show(&hctx->ctx_map, m);
  402. return 0;
  403. }
  404. static void blk_mq_debugfs_tags_show(struct seq_file *m,
  405. struct blk_mq_tags *tags)
  406. {
  407. seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
  408. seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
  409. seq_printf(m, "active_queues=%d\n",
  410. atomic_read(&tags->active_queues));
  411. seq_puts(m, "\nbitmap_tags:\n");
  412. sbitmap_queue_show(&tags->bitmap_tags, m);
  413. if (tags->nr_reserved_tags) {
  414. seq_puts(m, "\nbreserved_tags:\n");
  415. sbitmap_queue_show(&tags->breserved_tags, m);
  416. }
  417. }
  418. static int hctx_tags_show(void *data, struct seq_file *m)
  419. {
  420. struct blk_mq_hw_ctx *hctx = data;
  421. struct request_queue *q = hctx->queue;
  422. int res;
  423. res = mutex_lock_interruptible(&q->sysfs_lock);
  424. if (res)
  425. goto out;
  426. if (hctx->tags)
  427. blk_mq_debugfs_tags_show(m, hctx->tags);
  428. mutex_unlock(&q->sysfs_lock);
  429. out:
  430. return res;
  431. }
  432. static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
  433. {
  434. struct blk_mq_hw_ctx *hctx = data;
  435. struct request_queue *q = hctx->queue;
  436. int res;
  437. res = mutex_lock_interruptible(&q->sysfs_lock);
  438. if (res)
  439. goto out;
  440. if (hctx->tags)
  441. sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
  442. mutex_unlock(&q->sysfs_lock);
  443. out:
  444. return res;
  445. }
  446. static int hctx_sched_tags_show(void *data, struct seq_file *m)
  447. {
  448. struct blk_mq_hw_ctx *hctx = data;
  449. struct request_queue *q = hctx->queue;
  450. int res;
  451. res = mutex_lock_interruptible(&q->sysfs_lock);
  452. if (res)
  453. goto out;
  454. if (hctx->sched_tags)
  455. blk_mq_debugfs_tags_show(m, hctx->sched_tags);
  456. mutex_unlock(&q->sysfs_lock);
  457. out:
  458. return res;
  459. }
  460. static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
  461. {
  462. struct blk_mq_hw_ctx *hctx = data;
  463. struct request_queue *q = hctx->queue;
  464. int res;
  465. res = mutex_lock_interruptible(&q->sysfs_lock);
  466. if (res)
  467. goto out;
  468. if (hctx->sched_tags)
  469. sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
  470. mutex_unlock(&q->sysfs_lock);
  471. out:
  472. return res;
  473. }
  474. static int hctx_io_poll_show(void *data, struct seq_file *m)
  475. {
  476. struct blk_mq_hw_ctx *hctx = data;
  477. seq_printf(m, "considered=%lu\n", hctx->poll_considered);
  478. seq_printf(m, "invoked=%lu\n", hctx->poll_invoked);
  479. seq_printf(m, "success=%lu\n", hctx->poll_success);
  480. return 0;
  481. }
  482. static ssize_t hctx_io_poll_write(void *data, const char __user *buf,
  483. size_t count, loff_t *ppos)
  484. {
  485. struct blk_mq_hw_ctx *hctx = data;
  486. hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
  487. return count;
  488. }
  489. static int hctx_dispatched_show(void *data, struct seq_file *m)
  490. {
  491. struct blk_mq_hw_ctx *hctx = data;
  492. int i;
  493. seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
  494. for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
  495. unsigned int d = 1U << (i - 1);
  496. seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]);
  497. }
  498. seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]);
  499. return 0;
  500. }
  501. static ssize_t hctx_dispatched_write(void *data, const char __user *buf,
  502. size_t count, loff_t *ppos)
  503. {
  504. struct blk_mq_hw_ctx *hctx = data;
  505. int i;
  506. for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++)
  507. hctx->dispatched[i] = 0;
  508. return count;
  509. }
  510. static int hctx_queued_show(void *data, struct seq_file *m)
  511. {
  512. struct blk_mq_hw_ctx *hctx = data;
  513. seq_printf(m, "%lu\n", hctx->queued);
  514. return 0;
  515. }
  516. static ssize_t hctx_queued_write(void *data, const char __user *buf,
  517. size_t count, loff_t *ppos)
  518. {
  519. struct blk_mq_hw_ctx *hctx = data;
  520. hctx->queued = 0;
  521. return count;
  522. }
  523. static int hctx_run_show(void *data, struct seq_file *m)
  524. {
  525. struct blk_mq_hw_ctx *hctx = data;
  526. seq_printf(m, "%lu\n", hctx->run);
  527. return 0;
  528. }
  529. static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
  530. loff_t *ppos)
  531. {
  532. struct blk_mq_hw_ctx *hctx = data;
  533. hctx->run = 0;
  534. return count;
  535. }
  536. static int hctx_active_show(void *data, struct seq_file *m)
  537. {
  538. struct blk_mq_hw_ctx *hctx = data;
  539. seq_printf(m, "%d\n", atomic_read(&hctx->nr_active));
  540. return 0;
  541. }
  542. static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos)
  543. __acquires(&ctx->lock)
  544. {
  545. struct blk_mq_ctx *ctx = m->private;
  546. spin_lock(&ctx->lock);
  547. return seq_list_start(&ctx->rq_list, *pos);
  548. }
  549. static void *ctx_rq_list_next(struct seq_file *m, void *v, loff_t *pos)
  550. {
  551. struct blk_mq_ctx *ctx = m->private;
  552. return seq_list_next(v, &ctx->rq_list, pos);
  553. }
  554. static void ctx_rq_list_stop(struct seq_file *m, void *v)
  555. __releases(&ctx->lock)
  556. {
  557. struct blk_mq_ctx *ctx = m->private;
  558. spin_unlock(&ctx->lock);
  559. }
  560. static const struct seq_operations ctx_rq_list_seq_ops = {
  561. .start = ctx_rq_list_start,
  562. .next = ctx_rq_list_next,
  563. .stop = ctx_rq_list_stop,
  564. .show = blk_mq_debugfs_rq_show,
  565. };
  566. static int ctx_dispatched_show(void *data, struct seq_file *m)
  567. {
  568. struct blk_mq_ctx *ctx = data;
  569. seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]);
  570. return 0;
  571. }
  572. static ssize_t ctx_dispatched_write(void *data, const char __user *buf,
  573. size_t count, loff_t *ppos)
  574. {
  575. struct blk_mq_ctx *ctx = data;
  576. ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0;
  577. return count;
  578. }
  579. static int ctx_merged_show(void *data, struct seq_file *m)
  580. {
  581. struct blk_mq_ctx *ctx = data;
  582. seq_printf(m, "%lu\n", ctx->rq_merged);
  583. return 0;
  584. }
  585. static ssize_t ctx_merged_write(void *data, const char __user *buf,
  586. size_t count, loff_t *ppos)
  587. {
  588. struct blk_mq_ctx *ctx = data;
  589. ctx->rq_merged = 0;
  590. return count;
  591. }
  592. static int ctx_completed_show(void *data, struct seq_file *m)
  593. {
  594. struct blk_mq_ctx *ctx = data;
  595. seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]);
  596. return 0;
  597. }
  598. static ssize_t ctx_completed_write(void *data, const char __user *buf,
  599. size_t count, loff_t *ppos)
  600. {
  601. struct blk_mq_ctx *ctx = data;
  602. ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
  603. return count;
  604. }
  605. static int blk_mq_debugfs_show(struct seq_file *m, void *v)
  606. {
  607. const struct blk_mq_debugfs_attr *attr = m->private;
  608. void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
  609. return attr->show(data, m);
  610. }
  611. static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
  612. size_t count, loff_t *ppos)
  613. {
  614. struct seq_file *m = file->private_data;
  615. const struct blk_mq_debugfs_attr *attr = m->private;
  616. void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
  617. /*
  618. * Attributes that only implement .seq_ops are read-only and 'attr' is
  619. * the same with 'data' in this case.
  620. */
  621. if (attr == data || !attr->write)
  622. return -EPERM;
  623. return attr->write(data, buf, count, ppos);
  624. }
  625. static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
  626. {
  627. const struct blk_mq_debugfs_attr *attr = inode->i_private;
  628. void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
  629. struct seq_file *m;
  630. int ret;
  631. if (attr->seq_ops) {
  632. ret = seq_open(file, attr->seq_ops);
  633. if (!ret) {
  634. m = file->private_data;
  635. m->private = data;
  636. }
  637. return ret;
  638. }
  639. if (WARN_ON_ONCE(!attr->show))
  640. return -EPERM;
  641. return single_open(file, blk_mq_debugfs_show, inode->i_private);
  642. }
  643. static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
  644. {
  645. const struct blk_mq_debugfs_attr *attr = inode->i_private;
  646. if (attr->show)
  647. return single_release(inode, file);
  648. else
  649. return seq_release(inode, file);
  650. }
  651. static const struct file_operations blk_mq_debugfs_fops = {
  652. .open = blk_mq_debugfs_open,
  653. .read = seq_read,
  654. .write = blk_mq_debugfs_write,
  655. .llseek = seq_lseek,
  656. .release = blk_mq_debugfs_release,
  657. };
  658. static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
  659. {"state", 0400, hctx_state_show},
  660. {"flags", 0400, hctx_flags_show},
  661. {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
  662. {"busy", 0400, hctx_busy_show},
  663. {"ctx_map", 0400, hctx_ctx_map_show},
  664. {"tags", 0400, hctx_tags_show},
  665. {"tags_bitmap", 0400, hctx_tags_bitmap_show},
  666. {"sched_tags", 0400, hctx_sched_tags_show},
  667. {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
  668. {"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write},
  669. {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write},
  670. {"queued", 0600, hctx_queued_show, hctx_queued_write},
  671. {"run", 0600, hctx_run_show, hctx_run_write},
  672. {"active", 0400, hctx_active_show},
  673. {},
  674. };
  675. static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
  676. {"rq_list", 0400, .seq_ops = &ctx_rq_list_seq_ops},
  677. {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
  678. {"merged", 0600, ctx_merged_show, ctx_merged_write},
  679. {"completed", 0600, ctx_completed_show, ctx_completed_write},
  680. {},
  681. };
  682. static bool debugfs_create_files(struct dentry *parent, void *data,
  683. const struct blk_mq_debugfs_attr *attr)
  684. {
  685. d_inode(parent)->i_private = data;
  686. for (; attr->name; attr++) {
  687. if (!debugfs_create_file(attr->name, attr->mode, parent,
  688. (void *)attr, &blk_mq_debugfs_fops))
  689. return false;
  690. }
  691. return true;
  692. }
  693. int blk_mq_debugfs_register(struct request_queue *q)
  694. {
  695. struct blk_mq_hw_ctx *hctx;
  696. int i;
  697. if (!blk_debugfs_root)
  698. return -ENOENT;
  699. q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
  700. blk_debugfs_root);
  701. if (!q->debugfs_dir)
  702. return -ENOMEM;
  703. if (!debugfs_create_files(q->debugfs_dir, q,
  704. blk_mq_debugfs_queue_attrs))
  705. goto err;
  706. /*
  707. * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
  708. * didn't exist yet (because we don't know what to name the directory
  709. * until the queue is registered to a gendisk).
  710. */
  711. if (q->elevator && !q->sched_debugfs_dir)
  712. blk_mq_debugfs_register_sched(q);
  713. /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
  714. queue_for_each_hw_ctx(q, hctx, i) {
  715. if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx))
  716. goto err;
  717. if (q->elevator && !hctx->sched_debugfs_dir &&
  718. blk_mq_debugfs_register_sched_hctx(q, hctx))
  719. goto err;
  720. }
  721. return 0;
  722. err:
  723. blk_mq_debugfs_unregister(q);
  724. return -ENOMEM;
  725. }
  726. void blk_mq_debugfs_unregister(struct request_queue *q)
  727. {
  728. debugfs_remove_recursive(q->debugfs_dir);
  729. q->sched_debugfs_dir = NULL;
  730. q->debugfs_dir = NULL;
  731. }
  732. static int blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
  733. struct blk_mq_ctx *ctx)
  734. {
  735. struct dentry *ctx_dir;
  736. char name[20];
  737. snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
  738. ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
  739. if (!ctx_dir)
  740. return -ENOMEM;
  741. if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs))
  742. return -ENOMEM;
  743. return 0;
  744. }
  745. int blk_mq_debugfs_register_hctx(struct request_queue *q,
  746. struct blk_mq_hw_ctx *hctx)
  747. {
  748. struct blk_mq_ctx *ctx;
  749. char name[20];
  750. int i;
  751. if (!q->debugfs_dir)
  752. return -ENOENT;
  753. snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
  754. hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
  755. if (!hctx->debugfs_dir)
  756. return -ENOMEM;
  757. if (!debugfs_create_files(hctx->debugfs_dir, hctx,
  758. blk_mq_debugfs_hctx_attrs))
  759. goto err;
  760. hctx_for_each_ctx(hctx, ctx, i) {
  761. if (blk_mq_debugfs_register_ctx(hctx, ctx))
  762. goto err;
  763. }
  764. return 0;
  765. err:
  766. blk_mq_debugfs_unregister_hctx(hctx);
  767. return -ENOMEM;
  768. }
  769. void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
  770. {
  771. debugfs_remove_recursive(hctx->debugfs_dir);
  772. hctx->sched_debugfs_dir = NULL;
  773. hctx->debugfs_dir = NULL;
  774. }
  775. int blk_mq_debugfs_register_hctxs(struct request_queue *q)
  776. {
  777. struct blk_mq_hw_ctx *hctx;
  778. int i;
  779. queue_for_each_hw_ctx(q, hctx, i) {
  780. if (blk_mq_debugfs_register_hctx(q, hctx))
  781. return -ENOMEM;
  782. }
  783. return 0;
  784. }
  785. void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
  786. {
  787. struct blk_mq_hw_ctx *hctx;
  788. int i;
  789. queue_for_each_hw_ctx(q, hctx, i)
  790. blk_mq_debugfs_unregister_hctx(hctx);
  791. }
  792. int blk_mq_debugfs_register_sched(struct request_queue *q)
  793. {
  794. struct elevator_type *e = q->elevator->type;
  795. if (!q->debugfs_dir)
  796. return -ENOENT;
  797. if (!e->queue_debugfs_attrs)
  798. return 0;
  799. q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
  800. if (!q->sched_debugfs_dir)
  801. return -ENOMEM;
  802. if (!debugfs_create_files(q->sched_debugfs_dir, q,
  803. e->queue_debugfs_attrs))
  804. goto err;
  805. return 0;
  806. err:
  807. blk_mq_debugfs_unregister_sched(q);
  808. return -ENOMEM;
  809. }
  810. void blk_mq_debugfs_unregister_sched(struct request_queue *q)
  811. {
  812. debugfs_remove_recursive(q->sched_debugfs_dir);
  813. q->sched_debugfs_dir = NULL;
  814. }
  815. int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
  816. struct blk_mq_hw_ctx *hctx)
  817. {
  818. struct elevator_type *e = q->elevator->type;
  819. if (!hctx->debugfs_dir)
  820. return -ENOENT;
  821. if (!e->hctx_debugfs_attrs)
  822. return 0;
  823. hctx->sched_debugfs_dir = debugfs_create_dir("sched",
  824. hctx->debugfs_dir);
  825. if (!hctx->sched_debugfs_dir)
  826. return -ENOMEM;
  827. if (!debugfs_create_files(hctx->sched_debugfs_dir, hctx,
  828. e->hctx_debugfs_attrs))
  829. return -ENOMEM;
  830. return 0;
  831. }
  832. void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
  833. {
  834. debugfs_remove_recursive(hctx->sched_debugfs_dir);
  835. hctx->sched_debugfs_dir = NULL;
  836. }