blktrace.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817
  1. /*
  2. * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program; if not, write to the Free Software
  15. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  16. *
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/blkdev.h>
  20. #include <linux/blktrace_api.h>
  21. #include <linux/percpu.h>
  22. #include <linux/init.h>
  23. #include <linux/mutex.h>
  24. #include <linux/slab.h>
  25. #include <linux/debugfs.h>
  26. #include <linux/export.h>
  27. #include <linux/time.h>
  28. #include <linux/uaccess.h>
  29. #include <linux/list.h>
  30. #include <trace/events/block.h>
  31. #include "trace_output.h"
  32. #ifdef CONFIG_BLK_DEV_IO_TRACE
  33. static unsigned int blktrace_seq __read_mostly = 1;
  34. static struct trace_array *blk_tr;
  35. static bool blk_tracer_enabled __read_mostly;
  36. static LIST_HEAD(running_trace_list);
  37. static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
  38. /* Select an alternative, minimalistic output than the original one */
  39. #define TRACE_BLK_OPT_CLASSIC 0x1
  40. static struct tracer_opt blk_tracer_opts[] = {
  41. /* Default disable the minimalistic output */
  42. { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
  43. { }
  44. };
  45. static struct tracer_flags blk_tracer_flags = {
  46. .val = 0,
  47. .opts = blk_tracer_opts,
  48. };
  49. /* Global reference count of probes */
  50. static atomic_t blk_probes_ref = ATOMIC_INIT(0);
  51. static void blk_register_tracepoints(void);
  52. static void blk_unregister_tracepoints(void);
  53. /*
  54. * Send out a notify message.
  55. */
  56. static void trace_note(struct blk_trace *bt, pid_t pid, int action,
  57. const void *data, size_t len)
  58. {
  59. struct blk_io_trace *t;
  60. struct ring_buffer_event *event = NULL;
  61. struct ring_buffer *buffer = NULL;
  62. int pc = 0;
  63. int cpu = smp_processor_id();
  64. bool blk_tracer = blk_tracer_enabled;
  65. if (blk_tracer) {
  66. buffer = blk_tr->trace_buffer.buffer;
  67. pc = preempt_count();
  68. event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
  69. sizeof(*t) + len,
  70. 0, pc);
  71. if (!event)
  72. return;
  73. t = ring_buffer_event_data(event);
  74. goto record_it;
  75. }
  76. if (!bt->rchan)
  77. return;
  78. t = relay_reserve(bt->rchan, sizeof(*t) + len);
  79. if (t) {
  80. t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
  81. t->time = ktime_to_ns(ktime_get());
  82. record_it:
  83. t->device = bt->dev;
  84. t->action = action;
  85. t->pid = pid;
  86. t->cpu = cpu;
  87. t->pdu_len = len;
  88. memcpy((void *) t + sizeof(*t), data, len);
  89. if (blk_tracer)
  90. trace_buffer_unlock_commit(buffer, event, 0, pc);
  91. }
  92. }
  93. /*
  94. * Send out a notify for this process, if we haven't done so since a trace
  95. * started
  96. */
  97. static void trace_note_tsk(struct task_struct *tsk)
  98. {
  99. unsigned long flags;
  100. struct blk_trace *bt;
  101. tsk->btrace_seq = blktrace_seq;
  102. spin_lock_irqsave(&running_trace_lock, flags);
  103. list_for_each_entry(bt, &running_trace_list, running_list) {
  104. trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
  105. sizeof(tsk->comm));
  106. }
  107. spin_unlock_irqrestore(&running_trace_lock, flags);
  108. }
  109. static void trace_note_time(struct blk_trace *bt)
  110. {
  111. struct timespec now;
  112. unsigned long flags;
  113. u32 words[2];
  114. getnstimeofday(&now);
  115. words[0] = now.tv_sec;
  116. words[1] = now.tv_nsec;
  117. local_irq_save(flags);
  118. trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
  119. local_irq_restore(flags);
  120. }
  121. void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
  122. {
  123. int n;
  124. va_list args;
  125. unsigned long flags;
  126. char *buf;
  127. if (unlikely(bt->trace_state != Blktrace_running &&
  128. !blk_tracer_enabled))
  129. return;
  130. /*
  131. * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
  132. * message to the trace.
  133. */
  134. if (!(bt->act_mask & BLK_TC_NOTIFY))
  135. return;
  136. local_irq_save(flags);
  137. buf = this_cpu_ptr(bt->msg_data);
  138. va_start(args, fmt);
  139. n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
  140. va_end(args);
  141. trace_note(bt, 0, BLK_TN_MESSAGE, buf, n);
  142. local_irq_restore(flags);
  143. }
  144. EXPORT_SYMBOL_GPL(__trace_note_message);
  145. static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
  146. pid_t pid)
  147. {
  148. if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
  149. return 1;
  150. if (sector && (sector < bt->start_lba || sector > bt->end_lba))
  151. return 1;
  152. if (bt->pid && pid != bt->pid)
  153. return 1;
  154. return 0;
  155. }
  156. /*
  157. * Data direction bit lookup
  158. */
  159. static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
  160. BLK_TC_ACT(BLK_TC_WRITE) };
  161. #define BLK_TC_RAHEAD BLK_TC_AHEAD
  162. /* The ilog2() calls fall out because they're constant */
  163. #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
  164. (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
  165. /*
  166. * The worker for the various blk_add_trace*() types. Fills out a
  167. * blk_io_trace structure and places it in a per-cpu subbuffer.
  168. */
  169. static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
  170. int rw, u32 what, int error, int pdu_len, void *pdu_data)
  171. {
  172. struct task_struct *tsk = current;
  173. struct ring_buffer_event *event = NULL;
  174. struct ring_buffer *buffer = NULL;
  175. struct blk_io_trace *t;
  176. unsigned long flags = 0;
  177. unsigned long *sequence;
  178. pid_t pid;
  179. int cpu, pc = 0;
  180. bool blk_tracer = blk_tracer_enabled;
  181. if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
  182. return;
  183. what |= ddir_act[rw & WRITE];
  184. what |= MASK_TC_BIT(rw, SYNC);
  185. what |= MASK_TC_BIT(rw, RAHEAD);
  186. what |= MASK_TC_BIT(rw, META);
  187. what |= MASK_TC_BIT(rw, DISCARD);
  188. what |= MASK_TC_BIT(rw, FLUSH);
  189. what |= MASK_TC_BIT(rw, FUA);
  190. pid = tsk->pid;
  191. if (act_log_check(bt, what, sector, pid))
  192. return;
  193. cpu = raw_smp_processor_id();
  194. if (blk_tracer) {
  195. tracing_record_cmdline(current);
  196. buffer = blk_tr->trace_buffer.buffer;
  197. pc = preempt_count();
  198. event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
  199. sizeof(*t) + pdu_len,
  200. 0, pc);
  201. if (!event)
  202. return;
  203. t = ring_buffer_event_data(event);
  204. goto record_it;
  205. }
  206. if (unlikely(tsk->btrace_seq != blktrace_seq))
  207. trace_note_tsk(tsk);
  208. /*
  209. * A word about the locking here - we disable interrupts to reserve
  210. * some space in the relay per-cpu buffer, to prevent an irq
  211. * from coming in and stepping on our toes.
  212. */
  213. local_irq_save(flags);
  214. t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
  215. if (t) {
  216. sequence = per_cpu_ptr(bt->sequence, cpu);
  217. t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
  218. t->sequence = ++(*sequence);
  219. t->time = ktime_to_ns(ktime_get());
  220. record_it:
  221. /*
  222. * These two are not needed in ftrace as they are in the
  223. * generic trace_entry, filled by tracing_generic_entry_update,
  224. * but for the trace_event->bin() synthesizer benefit we do it
  225. * here too.
  226. */
  227. t->cpu = cpu;
  228. t->pid = pid;
  229. t->sector = sector;
  230. t->bytes = bytes;
  231. t->action = what;
  232. t->device = bt->dev;
  233. t->error = error;
  234. t->pdu_len = pdu_len;
  235. if (pdu_len)
  236. memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
  237. if (blk_tracer) {
  238. trace_buffer_unlock_commit(buffer, event, 0, pc);
  239. return;
  240. }
  241. }
  242. local_irq_restore(flags);
  243. }
  244. static struct dentry *blk_tree_root;
  245. static DEFINE_MUTEX(blk_tree_mutex);
  246. static void blk_trace_free(struct blk_trace *bt)
  247. {
  248. debugfs_remove(bt->msg_file);
  249. debugfs_remove(bt->dropped_file);
  250. relay_close(bt->rchan);
  251. debugfs_remove(bt->dir);
  252. free_percpu(bt->sequence);
  253. free_percpu(bt->msg_data);
  254. kfree(bt);
  255. }
  256. static void blk_trace_cleanup(struct blk_trace *bt)
  257. {
  258. blk_trace_free(bt);
  259. if (atomic_dec_and_test(&blk_probes_ref))
  260. blk_unregister_tracepoints();
  261. }
  262. int blk_trace_remove(struct request_queue *q)
  263. {
  264. struct blk_trace *bt;
  265. bt = xchg(&q->blk_trace, NULL);
  266. if (!bt)
  267. return -EINVAL;
  268. if (bt->trace_state != Blktrace_running)
  269. blk_trace_cleanup(bt);
  270. return 0;
  271. }
  272. EXPORT_SYMBOL_GPL(blk_trace_remove);
  273. static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
  274. size_t count, loff_t *ppos)
  275. {
  276. struct blk_trace *bt = filp->private_data;
  277. char buf[16];
  278. snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
  279. return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
  280. }
  281. static const struct file_operations blk_dropped_fops = {
  282. .owner = THIS_MODULE,
  283. .open = simple_open,
  284. .read = blk_dropped_read,
  285. .llseek = default_llseek,
  286. };
  287. static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
  288. size_t count, loff_t *ppos)
  289. {
  290. char *msg;
  291. struct blk_trace *bt;
  292. if (count >= BLK_TN_MAX_MSG)
  293. return -EINVAL;
  294. msg = kmalloc(count + 1, GFP_KERNEL);
  295. if (msg == NULL)
  296. return -ENOMEM;
  297. if (copy_from_user(msg, buffer, count)) {
  298. kfree(msg);
  299. return -EFAULT;
  300. }
  301. msg[count] = '\0';
  302. bt = filp->private_data;
  303. __trace_note_message(bt, "%s", msg);
  304. kfree(msg);
  305. return count;
  306. }
  307. static const struct file_operations blk_msg_fops = {
  308. .owner = THIS_MODULE,
  309. .open = simple_open,
  310. .write = blk_msg_write,
  311. .llseek = noop_llseek,
  312. };
  313. /*
  314. * Keep track of how many times we encountered a full subbuffer, to aid
  315. * the user space app in telling how many lost events there were.
  316. */
  317. static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
  318. void *prev_subbuf, size_t prev_padding)
  319. {
  320. struct blk_trace *bt;
  321. if (!relay_buf_full(buf))
  322. return 1;
  323. bt = buf->chan->private_data;
  324. atomic_inc(&bt->dropped);
  325. return 0;
  326. }
  327. static int blk_remove_buf_file_callback(struct dentry *dentry)
  328. {
  329. debugfs_remove(dentry);
  330. return 0;
  331. }
  332. static struct dentry *blk_create_buf_file_callback(const char *filename,
  333. struct dentry *parent,
  334. umode_t mode,
  335. struct rchan_buf *buf,
  336. int *is_global)
  337. {
  338. return debugfs_create_file(filename, mode, parent, buf,
  339. &relay_file_operations);
  340. }
  341. static struct rchan_callbacks blk_relay_callbacks = {
  342. .subbuf_start = blk_subbuf_start_callback,
  343. .create_buf_file = blk_create_buf_file_callback,
  344. .remove_buf_file = blk_remove_buf_file_callback,
  345. };
  346. static void blk_trace_setup_lba(struct blk_trace *bt,
  347. struct block_device *bdev)
  348. {
  349. struct hd_struct *part = NULL;
  350. if (bdev)
  351. part = bdev->bd_part;
  352. if (part) {
  353. bt->start_lba = part->start_sect;
  354. bt->end_lba = part->start_sect + part->nr_sects;
  355. } else {
  356. bt->start_lba = 0;
  357. bt->end_lba = -1ULL;
  358. }
  359. }
  360. /*
  361. * Setup everything required to start tracing
  362. */
  363. int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
  364. struct block_device *bdev,
  365. struct blk_user_trace_setup *buts)
  366. {
  367. struct blk_trace *old_bt, *bt = NULL;
  368. struct dentry *dir = NULL;
  369. int ret;
  370. if (!buts->buf_size || !buts->buf_nr)
  371. return -EINVAL;
  372. strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
  373. buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
  374. /*
  375. * some device names have larger paths - convert the slashes
  376. * to underscores for this to work as expected
  377. */
  378. strreplace(buts->name, '/', '_');
  379. bt = kzalloc(sizeof(*bt), GFP_KERNEL);
  380. if (!bt)
  381. return -ENOMEM;
  382. ret = -ENOMEM;
  383. bt->sequence = alloc_percpu(unsigned long);
  384. if (!bt->sequence)
  385. goto err;
  386. bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
  387. if (!bt->msg_data)
  388. goto err;
  389. ret = -ENOENT;
  390. mutex_lock(&blk_tree_mutex);
  391. if (!blk_tree_root) {
  392. blk_tree_root = debugfs_create_dir("block", NULL);
  393. if (!blk_tree_root) {
  394. mutex_unlock(&blk_tree_mutex);
  395. goto err;
  396. }
  397. }
  398. mutex_unlock(&blk_tree_mutex);
  399. dir = debugfs_create_dir(buts->name, blk_tree_root);
  400. if (!dir)
  401. goto err;
  402. bt->dir = dir;
  403. bt->dev = dev;
  404. atomic_set(&bt->dropped, 0);
  405. INIT_LIST_HEAD(&bt->running_list);
  406. ret = -EIO;
  407. bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
  408. &blk_dropped_fops);
  409. if (!bt->dropped_file)
  410. goto err;
  411. bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
  412. if (!bt->msg_file)
  413. goto err;
  414. bt->rchan = relay_open("trace", dir, buts->buf_size,
  415. buts->buf_nr, &blk_relay_callbacks, bt);
  416. if (!bt->rchan)
  417. goto err;
  418. bt->act_mask = buts->act_mask;
  419. if (!bt->act_mask)
  420. bt->act_mask = (u16) -1;
  421. blk_trace_setup_lba(bt, bdev);
  422. /* overwrite with user settings */
  423. if (buts->start_lba)
  424. bt->start_lba = buts->start_lba;
  425. if (buts->end_lba)
  426. bt->end_lba = buts->end_lba;
  427. bt->pid = buts->pid;
  428. bt->trace_state = Blktrace_setup;
  429. ret = -EBUSY;
  430. old_bt = xchg(&q->blk_trace, bt);
  431. if (old_bt) {
  432. (void) xchg(&q->blk_trace, old_bt);
  433. goto err;
  434. }
  435. if (atomic_inc_return(&blk_probes_ref) == 1)
  436. blk_register_tracepoints();
  437. return 0;
  438. err:
  439. blk_trace_free(bt);
  440. return ret;
  441. }
  442. int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
  443. struct block_device *bdev,
  444. char __user *arg)
  445. {
  446. struct blk_user_trace_setup buts;
  447. int ret;
  448. ret = copy_from_user(&buts, arg, sizeof(buts));
  449. if (ret)
  450. return -EFAULT;
  451. ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
  452. if (ret)
  453. return ret;
  454. if (copy_to_user(arg, &buts, sizeof(buts))) {
  455. blk_trace_remove(q);
  456. return -EFAULT;
  457. }
  458. return 0;
  459. }
  460. EXPORT_SYMBOL_GPL(blk_trace_setup);
  461. #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
  462. static int compat_blk_trace_setup(struct request_queue *q, char *name,
  463. dev_t dev, struct block_device *bdev,
  464. char __user *arg)
  465. {
  466. struct blk_user_trace_setup buts;
  467. struct compat_blk_user_trace_setup cbuts;
  468. int ret;
  469. if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
  470. return -EFAULT;
  471. buts = (struct blk_user_trace_setup) {
  472. .act_mask = cbuts.act_mask,
  473. .buf_size = cbuts.buf_size,
  474. .buf_nr = cbuts.buf_nr,
  475. .start_lba = cbuts.start_lba,
  476. .end_lba = cbuts.end_lba,
  477. .pid = cbuts.pid,
  478. };
  479. ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
  480. if (ret)
  481. return ret;
  482. if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
  483. blk_trace_remove(q);
  484. return -EFAULT;
  485. }
  486. return 0;
  487. }
  488. #endif
  489. int blk_trace_startstop(struct request_queue *q, int start)
  490. {
  491. int ret;
  492. struct blk_trace *bt = q->blk_trace;
  493. if (bt == NULL)
  494. return -EINVAL;
  495. /*
  496. * For starting a trace, we can transition from a setup or stopped
  497. * trace. For stopping a trace, the state must be running
  498. */
  499. ret = -EINVAL;
  500. if (start) {
  501. if (bt->trace_state == Blktrace_setup ||
  502. bt->trace_state == Blktrace_stopped) {
  503. blktrace_seq++;
  504. smp_mb();
  505. bt->trace_state = Blktrace_running;
  506. spin_lock_irq(&running_trace_lock);
  507. list_add(&bt->running_list, &running_trace_list);
  508. spin_unlock_irq(&running_trace_lock);
  509. trace_note_time(bt);
  510. ret = 0;
  511. }
  512. } else {
  513. if (bt->trace_state == Blktrace_running) {
  514. bt->trace_state = Blktrace_stopped;
  515. spin_lock_irq(&running_trace_lock);
  516. list_del_init(&bt->running_list);
  517. spin_unlock_irq(&running_trace_lock);
  518. relay_flush(bt->rchan);
  519. ret = 0;
  520. }
  521. }
  522. return ret;
  523. }
  524. EXPORT_SYMBOL_GPL(blk_trace_startstop);
  525. /**
  526. * blk_trace_ioctl: - handle the ioctls associated with tracing
  527. * @bdev: the block device
  528. * @cmd: the ioctl cmd
  529. * @arg: the argument data, if any
  530. *
  531. **/
  532. int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
  533. {
  534. struct request_queue *q;
  535. int ret, start = 0;
  536. char b[BDEVNAME_SIZE];
  537. q = bdev_get_queue(bdev);
  538. if (!q)
  539. return -ENXIO;
  540. mutex_lock(&bdev->bd_mutex);
  541. switch (cmd) {
  542. case BLKTRACESETUP:
  543. bdevname(bdev, b);
  544. ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
  545. break;
  546. #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
  547. case BLKTRACESETUP32:
  548. bdevname(bdev, b);
  549. ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
  550. break;
  551. #endif
  552. case BLKTRACESTART:
  553. start = 1;
  554. case BLKTRACESTOP:
  555. ret = blk_trace_startstop(q, start);
  556. break;
  557. case BLKTRACETEARDOWN:
  558. ret = blk_trace_remove(q);
  559. break;
  560. default:
  561. ret = -ENOTTY;
  562. break;
  563. }
  564. mutex_unlock(&bdev->bd_mutex);
  565. return ret;
  566. }
  567. /**
  568. * blk_trace_shutdown: - stop and cleanup trace structures
  569. * @q: the request queue associated with the device
  570. *
  571. **/
  572. void blk_trace_shutdown(struct request_queue *q)
  573. {
  574. if (q->blk_trace) {
  575. blk_trace_startstop(q, 0);
  576. blk_trace_remove(q);
  577. }
  578. }
  579. /*
  580. * blktrace probes
  581. */
  582. /**
  583. * blk_add_trace_rq - Add a trace for a request oriented action
  584. * @q: queue the io is for
  585. * @rq: the source request
  586. * @nr_bytes: number of completed bytes
  587. * @what: the action
  588. *
  589. * Description:
  590. * Records an action against a request. Will log the bio offset + size.
  591. *
  592. **/
  593. static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
  594. unsigned int nr_bytes, u32 what)
  595. {
  596. struct blk_trace *bt = q->blk_trace;
  597. if (likely(!bt))
  598. return;
  599. if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
  600. what |= BLK_TC_ACT(BLK_TC_PC);
  601. __blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags,
  602. what, rq->errors, rq->cmd_len, rq->cmd);
  603. } else {
  604. what |= BLK_TC_ACT(BLK_TC_FS);
  605. __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes,
  606. rq->cmd_flags, what, rq->errors, 0, NULL);
  607. }
  608. }
  609. static void blk_add_trace_rq_abort(void *ignore,
  610. struct request_queue *q, struct request *rq)
  611. {
  612. blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ABORT);
  613. }
  614. static void blk_add_trace_rq_insert(void *ignore,
  615. struct request_queue *q, struct request *rq)
  616. {
  617. blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_INSERT);
  618. }
  619. static void blk_add_trace_rq_issue(void *ignore,
  620. struct request_queue *q, struct request *rq)
  621. {
  622. blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ISSUE);
  623. }
  624. static void blk_add_trace_rq_requeue(void *ignore,
  625. struct request_queue *q,
  626. struct request *rq)
  627. {
  628. blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_REQUEUE);
  629. }
  630. static void blk_add_trace_rq_complete(void *ignore,
  631. struct request_queue *q,
  632. struct request *rq,
  633. unsigned int nr_bytes)
  634. {
  635. blk_add_trace_rq(q, rq, nr_bytes, BLK_TA_COMPLETE);
  636. }
  637. /**
  638. * blk_add_trace_bio - Add a trace for a bio oriented action
  639. * @q: queue the io is for
  640. * @bio: the source bio
  641. * @what: the action
  642. * @error: error, if any
  643. *
  644. * Description:
  645. * Records an action against a bio. Will log the bio offset + size.
  646. *
  647. **/
  648. static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
  649. u32 what, int error)
  650. {
  651. struct blk_trace *bt = q->blk_trace;
  652. if (likely(!bt))
  653. return;
  654. if (!error && !bio_flagged(bio, BIO_UPTODATE))
  655. error = EIO;
  656. __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
  657. bio->bi_rw, what, error, 0, NULL);
  658. }
  659. static void blk_add_trace_bio_bounce(void *ignore,
  660. struct request_queue *q, struct bio *bio)
  661. {
  662. blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
  663. }
  664. static void blk_add_trace_bio_complete(void *ignore,
  665. struct request_queue *q, struct bio *bio,
  666. int error)
  667. {
  668. blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
  669. }
  670. static void blk_add_trace_bio_backmerge(void *ignore,
  671. struct request_queue *q,
  672. struct request *rq,
  673. struct bio *bio)
  674. {
  675. blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
  676. }
  677. static void blk_add_trace_bio_frontmerge(void *ignore,
  678. struct request_queue *q,
  679. struct request *rq,
  680. struct bio *bio)
  681. {
  682. blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
  683. }
  684. static void blk_add_trace_bio_queue(void *ignore,
  685. struct request_queue *q, struct bio *bio)
  686. {
  687. blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
  688. }
  689. static void blk_add_trace_getrq(void *ignore,
  690. struct request_queue *q,
  691. struct bio *bio, int rw)
  692. {
  693. if (bio)
  694. blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
  695. else {
  696. struct blk_trace *bt = q->blk_trace;
  697. if (bt)
  698. __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
  699. }
  700. }
  701. static void blk_add_trace_sleeprq(void *ignore,
  702. struct request_queue *q,
  703. struct bio *bio, int rw)
  704. {
  705. if (bio)
  706. blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
  707. else {
  708. struct blk_trace *bt = q->blk_trace;
  709. if (bt)
  710. __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
  711. 0, 0, NULL);
  712. }
  713. }
  714. static void blk_add_trace_plug(void *ignore, struct request_queue *q)
  715. {
  716. struct blk_trace *bt = q->blk_trace;
  717. if (bt)
  718. __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
  719. }
  720. static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
  721. unsigned int depth, bool explicit)
  722. {
  723. struct blk_trace *bt = q->blk_trace;
  724. if (bt) {
  725. __be64 rpdu = cpu_to_be64(depth);
  726. u32 what;
  727. if (explicit)
  728. what = BLK_TA_UNPLUG_IO;
  729. else
  730. what = BLK_TA_UNPLUG_TIMER;
  731. __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
  732. }
  733. }
  734. static void blk_add_trace_split(void *ignore,
  735. struct request_queue *q, struct bio *bio,
  736. unsigned int pdu)
  737. {
  738. struct blk_trace *bt = q->blk_trace;
  739. if (bt) {
  740. __be64 rpdu = cpu_to_be64(pdu);
  741. __blk_add_trace(bt, bio->bi_iter.bi_sector,
  742. bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT,
  743. !bio_flagged(bio, BIO_UPTODATE),
  744. sizeof(rpdu), &rpdu);
  745. }
  746. }
  747. /**
  748. * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
  749. * @ignore: trace callback data parameter (not used)
  750. * @q: queue the io is for
  751. * @bio: the source bio
  752. * @dev: target device
  753. * @from: source sector
  754. *
  755. * Description:
  756. * Device mapper or raid target sometimes need to split a bio because
  757. * it spans a stripe (or similar). Add a trace for that action.
  758. *
  759. **/
  760. static void blk_add_trace_bio_remap(void *ignore,
  761. struct request_queue *q, struct bio *bio,
  762. dev_t dev, sector_t from)
  763. {
  764. struct blk_trace *bt = q->blk_trace;
  765. struct blk_io_trace_remap r;
  766. if (likely(!bt))
  767. return;
  768. r.device_from = cpu_to_be32(dev);
  769. r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev);
  770. r.sector_from = cpu_to_be64(from);
  771. __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
  772. bio->bi_rw, BLK_TA_REMAP,
  773. !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
  774. }
  775. /**
  776. * blk_add_trace_rq_remap - Add a trace for a request-remap operation
  777. * @ignore: trace callback data parameter (not used)
  778. * @q: queue the io is for
  779. * @rq: the source request
  780. * @dev: target device
  781. * @from: source sector
  782. *
  783. * Description:
  784. * Device mapper remaps request to other devices.
  785. * Add a trace for that action.
  786. *
  787. **/
  788. static void blk_add_trace_rq_remap(void *ignore,
  789. struct request_queue *q,
  790. struct request *rq, dev_t dev,
  791. sector_t from)
  792. {
  793. struct blk_trace *bt = q->blk_trace;
  794. struct blk_io_trace_remap r;
  795. if (likely(!bt))
  796. return;
  797. r.device_from = cpu_to_be32(dev);
  798. r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
  799. r.sector_from = cpu_to_be64(from);
  800. __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
  801. rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
  802. sizeof(r), &r);
  803. }
  804. /**
  805. * blk_add_driver_data - Add binary message with driver-specific data
  806. * @q: queue the io is for
  807. * @rq: io request
  808. * @data: driver-specific data
  809. * @len: length of driver-specific data
  810. *
  811. * Description:
  812. * Some drivers might want to write driver-specific data per request.
  813. *
  814. **/
  815. void blk_add_driver_data(struct request_queue *q,
  816. struct request *rq,
  817. void *data, size_t len)
  818. {
  819. struct blk_trace *bt = q->blk_trace;
  820. if (likely(!bt))
  821. return;
  822. if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
  823. __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
  824. BLK_TA_DRV_DATA, rq->errors, len, data);
  825. else
  826. __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0,
  827. BLK_TA_DRV_DATA, rq->errors, len, data);
  828. }
  829. EXPORT_SYMBOL_GPL(blk_add_driver_data);
  830. static void blk_register_tracepoints(void)
  831. {
  832. int ret;
  833. ret = register_trace_block_rq_abort(blk_add_trace_rq_abort, NULL);
  834. WARN_ON(ret);
  835. ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
  836. WARN_ON(ret);
  837. ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
  838. WARN_ON(ret);
  839. ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
  840. WARN_ON(ret);
  841. ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
  842. WARN_ON(ret);
  843. ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
  844. WARN_ON(ret);
  845. ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
  846. WARN_ON(ret);
  847. ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
  848. WARN_ON(ret);
  849. ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
  850. WARN_ON(ret);
  851. ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
  852. WARN_ON(ret);
  853. ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
  854. WARN_ON(ret);
  855. ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
  856. WARN_ON(ret);
  857. ret = register_trace_block_plug(blk_add_trace_plug, NULL);
  858. WARN_ON(ret);
  859. ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
  860. WARN_ON(ret);
  861. ret = register_trace_block_split(blk_add_trace_split, NULL);
  862. WARN_ON(ret);
  863. ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
  864. WARN_ON(ret);
  865. ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
  866. WARN_ON(ret);
  867. }
  868. static void blk_unregister_tracepoints(void)
  869. {
  870. unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
  871. unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
  872. unregister_trace_block_split(blk_add_trace_split, NULL);
  873. unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
  874. unregister_trace_block_plug(blk_add_trace_plug, NULL);
  875. unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
  876. unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
  877. unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
  878. unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
  879. unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
  880. unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
  881. unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
  882. unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
  883. unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
  884. unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
  885. unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
  886. unregister_trace_block_rq_abort(blk_add_trace_rq_abort, NULL);
  887. tracepoint_synchronize_unregister();
  888. }
  889. /*
  890. * struct blk_io_tracer formatting routines
  891. */
  892. static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
  893. {
  894. int i = 0;
  895. int tc = t->action >> BLK_TC_SHIFT;
  896. if (t->action == BLK_TN_MESSAGE) {
  897. rwbs[i++] = 'N';
  898. goto out;
  899. }
  900. if (tc & BLK_TC_FLUSH)
  901. rwbs[i++] = 'F';
  902. if (tc & BLK_TC_DISCARD)
  903. rwbs[i++] = 'D';
  904. else if (tc & BLK_TC_WRITE)
  905. rwbs[i++] = 'W';
  906. else if (t->bytes)
  907. rwbs[i++] = 'R';
  908. else
  909. rwbs[i++] = 'N';
  910. if (tc & BLK_TC_FUA)
  911. rwbs[i++] = 'F';
  912. if (tc & BLK_TC_AHEAD)
  913. rwbs[i++] = 'A';
  914. if (tc & BLK_TC_SYNC)
  915. rwbs[i++] = 'S';
  916. if (tc & BLK_TC_META)
  917. rwbs[i++] = 'M';
  918. out:
  919. rwbs[i] = '\0';
  920. }
  921. static inline
  922. const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
  923. {
  924. return (const struct blk_io_trace *)ent;
  925. }
  926. static inline const void *pdu_start(const struct trace_entry *ent)
  927. {
  928. return te_blk_io_trace(ent) + 1;
  929. }
  930. static inline u32 t_action(const struct trace_entry *ent)
  931. {
  932. return te_blk_io_trace(ent)->action;
  933. }
  934. static inline u32 t_bytes(const struct trace_entry *ent)
  935. {
  936. return te_blk_io_trace(ent)->bytes;
  937. }
  938. static inline u32 t_sec(const struct trace_entry *ent)
  939. {
  940. return te_blk_io_trace(ent)->bytes >> 9;
  941. }
  942. static inline unsigned long long t_sector(const struct trace_entry *ent)
  943. {
  944. return te_blk_io_trace(ent)->sector;
  945. }
  946. static inline __u16 t_error(const struct trace_entry *ent)
  947. {
  948. return te_blk_io_trace(ent)->error;
  949. }
  950. static __u64 get_pdu_int(const struct trace_entry *ent)
  951. {
  952. const __u64 *val = pdu_start(ent);
  953. return be64_to_cpu(*val);
  954. }
  955. static void get_pdu_remap(const struct trace_entry *ent,
  956. struct blk_io_trace_remap *r)
  957. {
  958. const struct blk_io_trace_remap *__r = pdu_start(ent);
  959. __u64 sector_from = __r->sector_from;
  960. r->device_from = be32_to_cpu(__r->device_from);
  961. r->device_to = be32_to_cpu(__r->device_to);
  962. r->sector_from = be64_to_cpu(sector_from);
  963. }
  964. typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act);
  965. static void blk_log_action_classic(struct trace_iterator *iter, const char *act)
  966. {
  967. char rwbs[RWBS_LEN];
  968. unsigned long long ts = iter->ts;
  969. unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
  970. unsigned secs = (unsigned long)ts;
  971. const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
  972. fill_rwbs(rwbs, t);
  973. trace_seq_printf(&iter->seq,
  974. "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
  975. MAJOR(t->device), MINOR(t->device), iter->cpu,
  976. secs, nsec_rem, iter->ent->pid, act, rwbs);
  977. }
  978. static void blk_log_action(struct trace_iterator *iter, const char *act)
  979. {
  980. char rwbs[RWBS_LEN];
  981. const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
  982. fill_rwbs(rwbs, t);
  983. trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
  984. MAJOR(t->device), MINOR(t->device), act, rwbs);
  985. }
  986. static void blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent)
  987. {
  988. const unsigned char *pdu_buf;
  989. int pdu_len;
  990. int i, end;
  991. pdu_buf = pdu_start(ent);
  992. pdu_len = te_blk_io_trace(ent)->pdu_len;
  993. if (!pdu_len)
  994. return;
  995. /* find the last zero that needs to be printed */
  996. for (end = pdu_len - 1; end >= 0; end--)
  997. if (pdu_buf[end])
  998. break;
  999. end++;
  1000. trace_seq_putc(s, '(');
  1001. for (i = 0; i < pdu_len; i++) {
  1002. trace_seq_printf(s, "%s%02x",
  1003. i == 0 ? "" : " ", pdu_buf[i]);
  1004. /*
  1005. * stop when the rest is just zeroes and indicate so
  1006. * with a ".." appended
  1007. */
  1008. if (i == end && end != pdu_len - 1) {
  1009. trace_seq_puts(s, " ..) ");
  1010. return;
  1011. }
  1012. }
  1013. trace_seq_puts(s, ") ");
  1014. }
  1015. static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent)
  1016. {
  1017. char cmd[TASK_COMM_LEN];
  1018. trace_find_cmdline(ent->pid, cmd);
  1019. if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
  1020. trace_seq_printf(s, "%u ", t_bytes(ent));
  1021. blk_log_dump_pdu(s, ent);
  1022. trace_seq_printf(s, "[%s]\n", cmd);
  1023. } else {
  1024. if (t_sec(ent))
  1025. trace_seq_printf(s, "%llu + %u [%s]\n",
  1026. t_sector(ent), t_sec(ent), cmd);
  1027. else
  1028. trace_seq_printf(s, "[%s]\n", cmd);
  1029. }
  1030. }
  1031. static void blk_log_with_error(struct trace_seq *s,
  1032. const struct trace_entry *ent)
  1033. {
  1034. if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
  1035. blk_log_dump_pdu(s, ent);
  1036. trace_seq_printf(s, "[%d]\n", t_error(ent));
  1037. } else {
  1038. if (t_sec(ent))
  1039. trace_seq_printf(s, "%llu + %u [%d]\n",
  1040. t_sector(ent),
  1041. t_sec(ent), t_error(ent));
  1042. else
  1043. trace_seq_printf(s, "%llu [%d]\n",
  1044. t_sector(ent), t_error(ent));
  1045. }
  1046. }
  1047. static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent)
  1048. {
  1049. struct blk_io_trace_remap r = { .device_from = 0, };
  1050. get_pdu_remap(ent, &r);
  1051. trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
  1052. t_sector(ent), t_sec(ent),
  1053. MAJOR(r.device_from), MINOR(r.device_from),
  1054. (unsigned long long)r.sector_from);
  1055. }
  1056. static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent)
  1057. {
  1058. char cmd[TASK_COMM_LEN];
  1059. trace_find_cmdline(ent->pid, cmd);
  1060. trace_seq_printf(s, "[%s]\n", cmd);
  1061. }
  1062. static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent)
  1063. {
  1064. char cmd[TASK_COMM_LEN];
  1065. trace_find_cmdline(ent->pid, cmd);
  1066. trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent));
  1067. }
  1068. static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent)
  1069. {
  1070. char cmd[TASK_COMM_LEN];
  1071. trace_find_cmdline(ent->pid, cmd);
  1072. trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
  1073. get_pdu_int(ent), cmd);
  1074. }
  1075. static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent)
  1076. {
  1077. const struct blk_io_trace *t = te_blk_io_trace(ent);
  1078. trace_seq_putmem(s, t + 1, t->pdu_len);
  1079. trace_seq_putc(s, '\n');
  1080. }
  1081. /*
  1082. * struct tracer operations
  1083. */
  1084. static void blk_tracer_print_header(struct seq_file *m)
  1085. {
  1086. if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
  1087. return;
  1088. seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n"
  1089. "# | | | | | |\n");
  1090. }
  1091. static void blk_tracer_start(struct trace_array *tr)
  1092. {
  1093. blk_tracer_enabled = true;
  1094. }
  1095. static int blk_tracer_init(struct trace_array *tr)
  1096. {
  1097. blk_tr = tr;
  1098. blk_tracer_start(tr);
  1099. return 0;
  1100. }
  1101. static void blk_tracer_stop(struct trace_array *tr)
  1102. {
  1103. blk_tracer_enabled = false;
  1104. }
  1105. static void blk_tracer_reset(struct trace_array *tr)
  1106. {
  1107. blk_tracer_stop(tr);
  1108. }
  1109. static const struct {
  1110. const char *act[2];
  1111. void (*print)(struct trace_seq *s, const struct trace_entry *ent);
  1112. } what2act[] = {
  1113. [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
  1114. [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
  1115. [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
  1116. [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
  1117. [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic },
  1118. [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error },
  1119. [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic },
  1120. [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
  1121. [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
  1122. [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
  1123. [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
  1124. [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
  1125. [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
  1126. [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },
  1127. [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap },
  1128. };
  1129. static enum print_line_t print_one_line(struct trace_iterator *iter,
  1130. bool classic)
  1131. {
  1132. struct trace_seq *s = &iter->seq;
  1133. const struct blk_io_trace *t;
  1134. u16 what;
  1135. bool long_act;
  1136. blk_log_action_t *log_action;
  1137. t = te_blk_io_trace(iter->ent);
  1138. what = t->action & ((1 << BLK_TC_SHIFT) - 1);
  1139. long_act = !!(trace_flags & TRACE_ITER_VERBOSE);
  1140. log_action = classic ? &blk_log_action_classic : &blk_log_action;
  1141. if (t->action == BLK_TN_MESSAGE) {
  1142. log_action(iter, long_act ? "message" : "m");
  1143. blk_log_msg(s, iter->ent);
  1144. }
  1145. if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
  1146. trace_seq_printf(s, "Unknown action %x\n", what);
  1147. else {
  1148. log_action(iter, what2act[what].act[long_act]);
  1149. what2act[what].print(s, iter->ent);
  1150. }
  1151. return trace_handle_return(s);
  1152. }
  1153. static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
  1154. int flags, struct trace_event *event)
  1155. {
  1156. return print_one_line(iter, false);
  1157. }
  1158. static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
  1159. {
  1160. struct trace_seq *s = &iter->seq;
  1161. struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
  1162. const int offset = offsetof(struct blk_io_trace, sector);
  1163. struct blk_io_trace old = {
  1164. .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
  1165. .time = iter->ts,
  1166. };
  1167. trace_seq_putmem(s, &old, offset);
  1168. trace_seq_putmem(s, &t->sector,
  1169. sizeof(old) - offset + t->pdu_len);
  1170. }
  1171. static enum print_line_t
  1172. blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
  1173. struct trace_event *event)
  1174. {
  1175. blk_trace_synthesize_old_trace(iter);
  1176. return trace_handle_return(&iter->seq);
  1177. }
  1178. static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
  1179. {
  1180. if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
  1181. return TRACE_TYPE_UNHANDLED;
  1182. return print_one_line(iter, true);
  1183. }
  1184. static int
  1185. blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  1186. {
  1187. /* don't output context-info for blk_classic output */
  1188. if (bit == TRACE_BLK_OPT_CLASSIC) {
  1189. if (set)
  1190. trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
  1191. else
  1192. trace_flags |= TRACE_ITER_CONTEXT_INFO;
  1193. }
  1194. return 0;
  1195. }
  1196. static struct tracer blk_tracer __read_mostly = {
  1197. .name = "blk",
  1198. .init = blk_tracer_init,
  1199. .reset = blk_tracer_reset,
  1200. .start = blk_tracer_start,
  1201. .stop = blk_tracer_stop,
  1202. .print_header = blk_tracer_print_header,
  1203. .print_line = blk_tracer_print_line,
  1204. .flags = &blk_tracer_flags,
  1205. .set_flag = blk_tracer_set_flag,
  1206. };
  1207. static struct trace_event_functions trace_blk_event_funcs = {
  1208. .trace = blk_trace_event_print,
  1209. .binary = blk_trace_event_print_binary,
  1210. };
  1211. static struct trace_event trace_blk_event = {
  1212. .type = TRACE_BLK,
  1213. .funcs = &trace_blk_event_funcs,
  1214. };
  1215. static int __init init_blk_tracer(void)
  1216. {
  1217. if (!register_trace_event(&trace_blk_event)) {
  1218. pr_warning("Warning: could not register block events\n");
  1219. return 1;
  1220. }
  1221. if (register_tracer(&blk_tracer) != 0) {
  1222. pr_warning("Warning: could not register the block tracer\n");
  1223. unregister_trace_event(&trace_blk_event);
  1224. return 1;
  1225. }
  1226. return 0;
  1227. }
  1228. device_initcall(init_blk_tracer);
  1229. static int blk_trace_remove_queue(struct request_queue *q)
  1230. {
  1231. struct blk_trace *bt;
  1232. bt = xchg(&q->blk_trace, NULL);
  1233. if (bt == NULL)
  1234. return -EINVAL;
  1235. if (atomic_dec_and_test(&blk_probes_ref))
  1236. blk_unregister_tracepoints();
  1237. blk_trace_free(bt);
  1238. return 0;
  1239. }
  1240. /*
  1241. * Setup everything required to start tracing
  1242. */
  1243. static int blk_trace_setup_queue(struct request_queue *q,
  1244. struct block_device *bdev)
  1245. {
  1246. struct blk_trace *old_bt, *bt = NULL;
  1247. int ret = -ENOMEM;
  1248. bt = kzalloc(sizeof(*bt), GFP_KERNEL);
  1249. if (!bt)
  1250. return -ENOMEM;
  1251. bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
  1252. if (!bt->msg_data)
  1253. goto free_bt;
  1254. bt->dev = bdev->bd_dev;
  1255. bt->act_mask = (u16)-1;
  1256. blk_trace_setup_lba(bt, bdev);
  1257. old_bt = xchg(&q->blk_trace, bt);
  1258. if (old_bt != NULL) {
  1259. (void)xchg(&q->blk_trace, old_bt);
  1260. ret = -EBUSY;
  1261. goto free_bt;
  1262. }
  1263. if (atomic_inc_return(&blk_probes_ref) == 1)
  1264. blk_register_tracepoints();
  1265. return 0;
  1266. free_bt:
  1267. blk_trace_free(bt);
  1268. return ret;
  1269. }
  1270. /*
  1271. * sysfs interface to enable and configure tracing
  1272. */
  1273. static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
  1274. struct device_attribute *attr,
  1275. char *buf);
  1276. static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
  1277. struct device_attribute *attr,
  1278. const char *buf, size_t count);
  1279. #define BLK_TRACE_DEVICE_ATTR(_name) \
  1280. DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
  1281. sysfs_blk_trace_attr_show, \
  1282. sysfs_blk_trace_attr_store)
  1283. static BLK_TRACE_DEVICE_ATTR(enable);
  1284. static BLK_TRACE_DEVICE_ATTR(act_mask);
  1285. static BLK_TRACE_DEVICE_ATTR(pid);
  1286. static BLK_TRACE_DEVICE_ATTR(start_lba);
  1287. static BLK_TRACE_DEVICE_ATTR(end_lba);
  1288. static struct attribute *blk_trace_attrs[] = {
  1289. &dev_attr_enable.attr,
  1290. &dev_attr_act_mask.attr,
  1291. &dev_attr_pid.attr,
  1292. &dev_attr_start_lba.attr,
  1293. &dev_attr_end_lba.attr,
  1294. NULL
  1295. };
  1296. struct attribute_group blk_trace_attr_group = {
  1297. .name = "trace",
  1298. .attrs = blk_trace_attrs,
  1299. };
  1300. static const struct {
  1301. int mask;
  1302. const char *str;
  1303. } mask_maps[] = {
  1304. { BLK_TC_READ, "read" },
  1305. { BLK_TC_WRITE, "write" },
  1306. { BLK_TC_FLUSH, "flush" },
  1307. { BLK_TC_SYNC, "sync" },
  1308. { BLK_TC_QUEUE, "queue" },
  1309. { BLK_TC_REQUEUE, "requeue" },
  1310. { BLK_TC_ISSUE, "issue" },
  1311. { BLK_TC_COMPLETE, "complete" },
  1312. { BLK_TC_FS, "fs" },
  1313. { BLK_TC_PC, "pc" },
  1314. { BLK_TC_AHEAD, "ahead" },
  1315. { BLK_TC_META, "meta" },
  1316. { BLK_TC_DISCARD, "discard" },
  1317. { BLK_TC_DRV_DATA, "drv_data" },
  1318. { BLK_TC_FUA, "fua" },
  1319. };
  1320. static int blk_trace_str2mask(const char *str)
  1321. {
  1322. int i;
  1323. int mask = 0;
  1324. char *buf, *s, *token;
  1325. buf = kstrdup(str, GFP_KERNEL);
  1326. if (buf == NULL)
  1327. return -ENOMEM;
  1328. s = strstrip(buf);
  1329. while (1) {
  1330. token = strsep(&s, ",");
  1331. if (token == NULL)
  1332. break;
  1333. if (*token == '\0')
  1334. continue;
  1335. for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
  1336. if (strcasecmp(token, mask_maps[i].str) == 0) {
  1337. mask |= mask_maps[i].mask;
  1338. break;
  1339. }
  1340. }
  1341. if (i == ARRAY_SIZE(mask_maps)) {
  1342. mask = -EINVAL;
  1343. break;
  1344. }
  1345. }
  1346. kfree(buf);
  1347. return mask;
  1348. }
  1349. static ssize_t blk_trace_mask2str(char *buf, int mask)
  1350. {
  1351. int i;
  1352. char *p = buf;
  1353. for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
  1354. if (mask & mask_maps[i].mask) {
  1355. p += sprintf(p, "%s%s",
  1356. (p == buf) ? "" : ",", mask_maps[i].str);
  1357. }
  1358. }
  1359. *p++ = '\n';
  1360. return p - buf;
  1361. }
  1362. static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
  1363. {
  1364. if (bdev->bd_disk == NULL)
  1365. return NULL;
  1366. return bdev_get_queue(bdev);
  1367. }
  1368. static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
  1369. struct device_attribute *attr,
  1370. char *buf)
  1371. {
  1372. struct hd_struct *p = dev_to_part(dev);
  1373. struct request_queue *q;
  1374. struct block_device *bdev;
  1375. ssize_t ret = -ENXIO;
  1376. bdev = bdget(part_devt(p));
  1377. if (bdev == NULL)
  1378. goto out;
  1379. q = blk_trace_get_queue(bdev);
  1380. if (q == NULL)
  1381. goto out_bdput;
  1382. mutex_lock(&bdev->bd_mutex);
  1383. if (attr == &dev_attr_enable) {
  1384. ret = sprintf(buf, "%u\n", !!q->blk_trace);
  1385. goto out_unlock_bdev;
  1386. }
  1387. if (q->blk_trace == NULL)
  1388. ret = sprintf(buf, "disabled\n");
  1389. else if (attr == &dev_attr_act_mask)
  1390. ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
  1391. else if (attr == &dev_attr_pid)
  1392. ret = sprintf(buf, "%u\n", q->blk_trace->pid);
  1393. else if (attr == &dev_attr_start_lba)
  1394. ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
  1395. else if (attr == &dev_attr_end_lba)
  1396. ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
  1397. out_unlock_bdev:
  1398. mutex_unlock(&bdev->bd_mutex);
  1399. out_bdput:
  1400. bdput(bdev);
  1401. out:
  1402. return ret;
  1403. }
  1404. static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
  1405. struct device_attribute *attr,
  1406. const char *buf, size_t count)
  1407. {
  1408. struct block_device *bdev;
  1409. struct request_queue *q;
  1410. struct hd_struct *p;
  1411. u64 value;
  1412. ssize_t ret = -EINVAL;
  1413. if (count == 0)
  1414. goto out;
  1415. if (attr == &dev_attr_act_mask) {
  1416. if (sscanf(buf, "%llx", &value) != 1) {
  1417. /* Assume it is a list of trace category names */
  1418. ret = blk_trace_str2mask(buf);
  1419. if (ret < 0)
  1420. goto out;
  1421. value = ret;
  1422. }
  1423. } else if (sscanf(buf, "%llu", &value) != 1)
  1424. goto out;
  1425. ret = -ENXIO;
  1426. p = dev_to_part(dev);
  1427. bdev = bdget(part_devt(p));
  1428. if (bdev == NULL)
  1429. goto out;
  1430. q = blk_trace_get_queue(bdev);
  1431. if (q == NULL)
  1432. goto out_bdput;
  1433. mutex_lock(&bdev->bd_mutex);
  1434. if (attr == &dev_attr_enable) {
  1435. if (value)
  1436. ret = blk_trace_setup_queue(q, bdev);
  1437. else
  1438. ret = blk_trace_remove_queue(q);
  1439. goto out_unlock_bdev;
  1440. }
  1441. ret = 0;
  1442. if (q->blk_trace == NULL)
  1443. ret = blk_trace_setup_queue(q, bdev);
  1444. if (ret == 0) {
  1445. if (attr == &dev_attr_act_mask)
  1446. q->blk_trace->act_mask = value;
  1447. else if (attr == &dev_attr_pid)
  1448. q->blk_trace->pid = value;
  1449. else if (attr == &dev_attr_start_lba)
  1450. q->blk_trace->start_lba = value;
  1451. else if (attr == &dev_attr_end_lba)
  1452. q->blk_trace->end_lba = value;
  1453. }
  1454. out_unlock_bdev:
  1455. mutex_unlock(&bdev->bd_mutex);
  1456. out_bdput:
  1457. bdput(bdev);
  1458. out:
  1459. return ret ? ret : count;
  1460. }
  1461. int blk_trace_init_sysfs(struct device *dev)
  1462. {
  1463. return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
  1464. }
  1465. void blk_trace_remove_sysfs(struct device *dev)
  1466. {
  1467. sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
  1468. }
  1469. #endif /* CONFIG_BLK_DEV_IO_TRACE */
  1470. #ifdef CONFIG_EVENT_TRACING
  1471. void blk_dump_cmd(char *buf, struct request *rq)
  1472. {
  1473. int i, end;
  1474. int len = rq->cmd_len;
  1475. unsigned char *cmd = rq->cmd;
  1476. if (rq->cmd_type != REQ_TYPE_BLOCK_PC) {
  1477. buf[0] = '\0';
  1478. return;
  1479. }
  1480. for (end = len - 1; end >= 0; end--)
  1481. if (cmd[end])
  1482. break;
  1483. end++;
  1484. for (i = 0; i < len; i++) {
  1485. buf += sprintf(buf, "%s%02x", i == 0 ? "" : " ", cmd[i]);
  1486. if (i == end && end != len - 1) {
  1487. sprintf(buf, " ..");
  1488. break;
  1489. }
  1490. }
  1491. }
  1492. void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
  1493. {
  1494. int i = 0;
  1495. if (rw & REQ_FLUSH)
  1496. rwbs[i++] = 'F';
  1497. if (rw & WRITE)
  1498. rwbs[i++] = 'W';
  1499. else if (rw & REQ_DISCARD)
  1500. rwbs[i++] = 'D';
  1501. else if (bytes)
  1502. rwbs[i++] = 'R';
  1503. else
  1504. rwbs[i++] = 'N';
  1505. if (rw & REQ_FUA)
  1506. rwbs[i++] = 'F';
  1507. if (rw & REQ_RAHEAD)
  1508. rwbs[i++] = 'A';
  1509. if (rw & REQ_SYNC)
  1510. rwbs[i++] = 'S';
  1511. if (rw & REQ_META)
  1512. rwbs[i++] = 'M';
  1513. if (rw & REQ_SECURE)
  1514. rwbs[i++] = 'E';
  1515. rwbs[i] = '\0';
  1516. }
  1517. EXPORT_SYMBOL_GPL(blk_fill_rwbs);
  1518. #endif /* CONFIG_EVENT_TRACING */