seq_file.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968
  1. /*
  2. * linux/fs/seq_file.c
  3. *
  4. * helper functions for making synthetic files from sequences of records.
  5. * initial implementation -- AV, Oct 2001.
  6. */
  7. #include <linux/fs.h>
  8. #include <linux/export.h>
  9. #include <linux/seq_file.h>
  10. #include <linux/vmalloc.h>
  11. #include <linux/slab.h>
  12. #include <linux/cred.h>
  13. #include <linux/mm.h>
  14. #include <asm/uaccess.h>
  15. #include <asm/page.h>
  16. static void seq_set_overflow(struct seq_file *m)
  17. {
  18. m->count = m->size;
  19. }
  20. static void *seq_buf_alloc(unsigned long size)
  21. {
  22. void *buf;
  23. /*
  24. * __GFP_NORETRY to avoid oom-killings with high-order allocations -
  25. * it's better to fall back to vmalloc() than to kill things.
  26. */
  27. buf = kmalloc(size, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
  28. if (!buf && size > PAGE_SIZE)
  29. buf = vmalloc(size);
  30. return buf;
  31. }
  32. /**
  33. * seq_open - initialize sequential file
  34. * @file: file we initialize
  35. * @op: method table describing the sequence
  36. *
  37. * seq_open() sets @file, associating it with a sequence described
  38. * by @op. @op->start() sets the iterator up and returns the first
  39. * element of sequence. @op->stop() shuts it down. @op->next()
  40. * returns the next element of sequence. @op->show() prints element
  41. * into the buffer. In case of error ->start() and ->next() return
  42. * ERR_PTR(error). In the end of sequence they return %NULL. ->show()
  43. * returns 0 in case of success and negative number in case of error.
  44. * Returning SEQ_SKIP means "discard this element and move on".
  45. */
  46. int seq_open(struct file *file, const struct seq_operations *op)
  47. {
  48. struct seq_file *p = file->private_data;
  49. if (!p) {
  50. p = kmalloc(sizeof(*p), GFP_KERNEL);
  51. if (!p)
  52. return -ENOMEM;
  53. file->private_data = p;
  54. }
  55. memset(p, 0, sizeof(*p));
  56. mutex_init(&p->lock);
  57. p->op = op;
  58. #ifdef CONFIG_USER_NS
  59. p->user_ns = file->f_cred->user_ns;
  60. #endif
  61. /*
  62. * Wrappers around seq_open(e.g. swaps_open) need to be
  63. * aware of this. If they set f_version themselves, they
  64. * should call seq_open first and then set f_version.
  65. */
  66. file->f_version = 0;
  67. /*
  68. * seq_files support lseek() and pread(). They do not implement
  69. * write() at all, but we clear FMODE_PWRITE here for historical
  70. * reasons.
  71. *
  72. * If a client of seq_files a) implements file.write() and b) wishes to
  73. * support pwrite() then that client will need to implement its own
  74. * file.open() which calls seq_open() and then sets FMODE_PWRITE.
  75. */
  76. file->f_mode &= ~FMODE_PWRITE;
  77. return 0;
  78. }
  79. EXPORT_SYMBOL(seq_open);
  80. static int traverse(struct seq_file *m, loff_t offset)
  81. {
  82. loff_t pos = 0, index;
  83. int error = 0;
  84. void *p;
  85. m->version = 0;
  86. index = 0;
  87. m->count = m->from = 0;
  88. if (!offset) {
  89. m->index = index;
  90. return 0;
  91. }
  92. if (!m->buf) {
  93. m->buf = seq_buf_alloc(m->size = PAGE_SIZE);
  94. if (!m->buf)
  95. return -ENOMEM;
  96. }
  97. p = m->op->start(m, &index);
  98. while (p) {
  99. error = PTR_ERR(p);
  100. if (IS_ERR(p))
  101. break;
  102. error = m->op->show(m, p);
  103. if (error < 0)
  104. break;
  105. if (unlikely(error)) {
  106. error = 0;
  107. m->count = 0;
  108. }
  109. if (seq_has_overflowed(m))
  110. goto Eoverflow;
  111. if (pos + m->count > offset) {
  112. m->from = offset - pos;
  113. m->count -= m->from;
  114. m->index = index;
  115. break;
  116. }
  117. pos += m->count;
  118. m->count = 0;
  119. if (pos == offset) {
  120. index++;
  121. m->index = index;
  122. break;
  123. }
  124. p = m->op->next(m, p, &index);
  125. }
  126. m->op->stop(m, p);
  127. m->index = index;
  128. return error;
  129. Eoverflow:
  130. m->op->stop(m, p);
  131. kvfree(m->buf);
  132. m->count = 0;
  133. m->buf = seq_buf_alloc(m->size <<= 1);
  134. return !m->buf ? -ENOMEM : -EAGAIN;
  135. }
  136. /**
  137. * seq_read - ->read() method for sequential files.
  138. * @file: the file to read from
  139. * @buf: the buffer to read to
  140. * @size: the maximum number of bytes to read
  141. * @ppos: the current position in the file
  142. *
  143. * Ready-made ->f_op->read()
  144. */
  145. ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
  146. {
  147. struct seq_file *m = file->private_data;
  148. size_t copied = 0;
  149. loff_t pos;
  150. size_t n;
  151. void *p;
  152. int err = 0;
  153. mutex_lock(&m->lock);
  154. /*
  155. * seq_file->op->..m_start/m_stop/m_next may do special actions
  156. * or optimisations based on the file->f_version, so we want to
  157. * pass the file->f_version to those methods.
  158. *
  159. * seq_file->version is just copy of f_version, and seq_file
  160. * methods can treat it simply as file version.
  161. * It is copied in first and copied out after all operations.
  162. * It is convenient to have it as part of structure to avoid the
  163. * need of passing another argument to all the seq_file methods.
  164. */
  165. m->version = file->f_version;
  166. /* Don't assume *ppos is where we left it */
  167. if (unlikely(*ppos != m->read_pos)) {
  168. while ((err = traverse(m, *ppos)) == -EAGAIN)
  169. ;
  170. if (err) {
  171. /* With prejudice... */
  172. m->read_pos = 0;
  173. m->version = 0;
  174. m->index = 0;
  175. m->count = 0;
  176. goto Done;
  177. } else {
  178. m->read_pos = *ppos;
  179. }
  180. }
  181. /* grab buffer if we didn't have one */
  182. if (!m->buf) {
  183. m->buf = seq_buf_alloc(m->size = PAGE_SIZE);
  184. if (!m->buf)
  185. goto Enomem;
  186. }
  187. /* if not empty - flush it first */
  188. if (m->count) {
  189. n = min(m->count, size);
  190. err = copy_to_user(buf, m->buf + m->from, n);
  191. if (err)
  192. goto Efault;
  193. m->count -= n;
  194. m->from += n;
  195. size -= n;
  196. buf += n;
  197. copied += n;
  198. if (!m->count)
  199. m->index++;
  200. if (!size)
  201. goto Done;
  202. }
  203. /* we need at least one record in buffer */
  204. pos = m->index;
  205. p = m->op->start(m, &pos);
  206. while (1) {
  207. err = PTR_ERR(p);
  208. if (!p || IS_ERR(p))
  209. break;
  210. err = m->op->show(m, p);
  211. if (err < 0)
  212. break;
  213. if (unlikely(err))
  214. m->count = 0;
  215. if (unlikely(!m->count)) {
  216. p = m->op->next(m, p, &pos);
  217. m->index = pos;
  218. continue;
  219. }
  220. if (m->count < m->size)
  221. goto Fill;
  222. m->op->stop(m, p);
  223. kvfree(m->buf);
  224. m->count = 0;
  225. m->buf = seq_buf_alloc(m->size <<= 1);
  226. if (!m->buf)
  227. goto Enomem;
  228. m->version = 0;
  229. pos = m->index;
  230. p = m->op->start(m, &pos);
  231. }
  232. m->op->stop(m, p);
  233. m->count = 0;
  234. goto Done;
  235. Fill:
  236. /* they want more? let's try to get some more */
  237. while (m->count < size) {
  238. size_t offs = m->count;
  239. loff_t next = pos;
  240. p = m->op->next(m, p, &next);
  241. if (!p || IS_ERR(p)) {
  242. err = PTR_ERR(p);
  243. break;
  244. }
  245. err = m->op->show(m, p);
  246. if (seq_has_overflowed(m) || err) {
  247. m->count = offs;
  248. if (likely(err <= 0))
  249. break;
  250. }
  251. pos = next;
  252. }
  253. m->op->stop(m, p);
  254. n = min(m->count, size);
  255. err = copy_to_user(buf, m->buf, n);
  256. if (err)
  257. goto Efault;
  258. copied += n;
  259. m->count -= n;
  260. if (m->count)
  261. m->from = n;
  262. else
  263. pos++;
  264. m->index = pos;
  265. Done:
  266. if (!copied)
  267. copied = err;
  268. else {
  269. *ppos += copied;
  270. m->read_pos += copied;
  271. }
  272. file->f_version = m->version;
  273. mutex_unlock(&m->lock);
  274. return copied;
  275. Enomem:
  276. err = -ENOMEM;
  277. goto Done;
  278. Efault:
  279. err = -EFAULT;
  280. goto Done;
  281. }
  282. EXPORT_SYMBOL(seq_read);
  283. /**
  284. * seq_lseek - ->llseek() method for sequential files.
  285. * @file: the file in question
  286. * @offset: new position
  287. * @whence: 0 for absolute, 1 for relative position
  288. *
  289. * Ready-made ->f_op->llseek()
  290. */
  291. loff_t seq_lseek(struct file *file, loff_t offset, int whence)
  292. {
  293. struct seq_file *m = file->private_data;
  294. loff_t retval = -EINVAL;
  295. mutex_lock(&m->lock);
  296. m->version = file->f_version;
  297. switch (whence) {
  298. case SEEK_CUR:
  299. offset += file->f_pos;
  300. case SEEK_SET:
  301. if (offset < 0)
  302. break;
  303. retval = offset;
  304. if (offset != m->read_pos) {
  305. while ((retval = traverse(m, offset)) == -EAGAIN)
  306. ;
  307. if (retval) {
  308. /* with extreme prejudice... */
  309. file->f_pos = 0;
  310. m->read_pos = 0;
  311. m->version = 0;
  312. m->index = 0;
  313. m->count = 0;
  314. } else {
  315. m->read_pos = offset;
  316. retval = file->f_pos = offset;
  317. }
  318. } else {
  319. file->f_pos = offset;
  320. }
  321. }
  322. file->f_version = m->version;
  323. mutex_unlock(&m->lock);
  324. return retval;
  325. }
  326. EXPORT_SYMBOL(seq_lseek);
  327. /**
  328. * seq_release - free the structures associated with sequential file.
  329. * @file: file in question
  330. * @inode: its inode
  331. *
  332. * Frees the structures associated with sequential file; can be used
  333. * as ->f_op->release() if you don't have private data to destroy.
  334. */
  335. int seq_release(struct inode *inode, struct file *file)
  336. {
  337. struct seq_file *m = file->private_data;
  338. kvfree(m->buf);
  339. kfree(m);
  340. return 0;
  341. }
  342. EXPORT_SYMBOL(seq_release);
  343. /**
  344. * seq_escape - print string into buffer, escaping some characters
  345. * @m: target buffer
  346. * @s: string
  347. * @esc: set of characters that need escaping
  348. *
  349. * Puts string into buffer, replacing each occurrence of character from
  350. * @esc with usual octal escape. Returns 0 in case of success, -1 - in
  351. * case of overflow.
  352. */
  353. int seq_escape(struct seq_file *m, const char *s, const char *esc)
  354. {
  355. char *end = m->buf + m->size;
  356. char *p;
  357. char c;
  358. for (p = m->buf + m->count; (c = *s) != '\0' && p < end; s++) {
  359. if (!strchr(esc, c)) {
  360. *p++ = c;
  361. continue;
  362. }
  363. if (p + 3 < end) {
  364. *p++ = '\\';
  365. *p++ = '0' + ((c & 0300) >> 6);
  366. *p++ = '0' + ((c & 070) >> 3);
  367. *p++ = '0' + (c & 07);
  368. continue;
  369. }
  370. seq_set_overflow(m);
  371. return -1;
  372. }
  373. m->count = p - m->buf;
  374. return 0;
  375. }
  376. EXPORT_SYMBOL(seq_escape);
  377. int seq_vprintf(struct seq_file *m, const char *f, va_list args)
  378. {
  379. int len;
  380. if (m->count < m->size) {
  381. len = vsnprintf(m->buf + m->count, m->size - m->count, f, args);
  382. if (m->count + len < m->size) {
  383. m->count += len;
  384. return 0;
  385. }
  386. }
  387. seq_set_overflow(m);
  388. return -1;
  389. }
  390. EXPORT_SYMBOL(seq_vprintf);
  391. int seq_printf(struct seq_file *m, const char *f, ...)
  392. {
  393. int ret;
  394. va_list args;
  395. va_start(args, f);
  396. ret = seq_vprintf(m, f, args);
  397. va_end(args);
  398. return ret;
  399. }
  400. EXPORT_SYMBOL(seq_printf);
  401. /**
  402. * mangle_path - mangle and copy path to buffer beginning
  403. * @s: buffer start
  404. * @p: beginning of path in above buffer
  405. * @esc: set of characters that need escaping
  406. *
  407. * Copy the path from @p to @s, replacing each occurrence of character from
  408. * @esc with usual octal escape.
  409. * Returns pointer past last written character in @s, or NULL in case of
  410. * failure.
  411. */
  412. char *mangle_path(char *s, const char *p, const char *esc)
  413. {
  414. while (s <= p) {
  415. char c = *p++;
  416. if (!c) {
  417. return s;
  418. } else if (!strchr(esc, c)) {
  419. *s++ = c;
  420. } else if (s + 4 > p) {
  421. break;
  422. } else {
  423. *s++ = '\\';
  424. *s++ = '0' + ((c & 0300) >> 6);
  425. *s++ = '0' + ((c & 070) >> 3);
  426. *s++ = '0' + (c & 07);
  427. }
  428. }
  429. return NULL;
  430. }
  431. EXPORT_SYMBOL(mangle_path);
  432. /**
  433. * seq_path - seq_file interface to print a pathname
  434. * @m: the seq_file handle
  435. * @path: the struct path to print
  436. * @esc: set of characters to escape in the output
  437. *
  438. * return the absolute path of 'path', as represented by the
  439. * dentry / mnt pair in the path parameter.
  440. */
  441. int seq_path(struct seq_file *m, const struct path *path, const char *esc)
  442. {
  443. char *buf;
  444. size_t size = seq_get_buf(m, &buf);
  445. int res = -1;
  446. if (size) {
  447. char *p = d_path(path, buf, size);
  448. if (!IS_ERR(p)) {
  449. char *end = mangle_path(buf, p, esc);
  450. if (end)
  451. res = end - buf;
  452. }
  453. }
  454. seq_commit(m, res);
  455. return res;
  456. }
  457. EXPORT_SYMBOL(seq_path);
  458. /*
  459. * Same as seq_path, but relative to supplied root.
  460. */
  461. int seq_path_root(struct seq_file *m, const struct path *path,
  462. const struct path *root, const char *esc)
  463. {
  464. char *buf;
  465. size_t size = seq_get_buf(m, &buf);
  466. int res = -ENAMETOOLONG;
  467. if (size) {
  468. char *p;
  469. p = __d_path(path, root, buf, size);
  470. if (!p)
  471. return SEQ_SKIP;
  472. res = PTR_ERR(p);
  473. if (!IS_ERR(p)) {
  474. char *end = mangle_path(buf, p, esc);
  475. if (end)
  476. res = end - buf;
  477. else
  478. res = -ENAMETOOLONG;
  479. }
  480. }
  481. seq_commit(m, res);
  482. return res < 0 && res != -ENAMETOOLONG ? res : 0;
  483. }
  484. /*
  485. * returns the path of the 'dentry' from the root of its filesystem.
  486. */
  487. int seq_dentry(struct seq_file *m, struct dentry *dentry, const char *esc)
  488. {
  489. char *buf;
  490. size_t size = seq_get_buf(m, &buf);
  491. int res = -1;
  492. if (size) {
  493. char *p = dentry_path(dentry, buf, size);
  494. if (!IS_ERR(p)) {
  495. char *end = mangle_path(buf, p, esc);
  496. if (end)
  497. res = end - buf;
  498. }
  499. }
  500. seq_commit(m, res);
  501. return res;
  502. }
  503. static void *single_start(struct seq_file *p, loff_t *pos)
  504. {
  505. return NULL + (*pos == 0);
  506. }
  507. static void *single_next(struct seq_file *p, void *v, loff_t *pos)
  508. {
  509. ++*pos;
  510. return NULL;
  511. }
  512. static void single_stop(struct seq_file *p, void *v)
  513. {
  514. }
  515. int single_open(struct file *file, int (*show)(struct seq_file *, void *),
  516. void *data)
  517. {
  518. struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
  519. int res = -ENOMEM;
  520. if (op) {
  521. op->start = single_start;
  522. op->next = single_next;
  523. op->stop = single_stop;
  524. op->show = show;
  525. res = seq_open(file, op);
  526. if (!res)
  527. ((struct seq_file *)file->private_data)->private = data;
  528. else
  529. kfree(op);
  530. }
  531. return res;
  532. }
  533. EXPORT_SYMBOL(single_open);
  534. int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
  535. void *data, size_t size)
  536. {
  537. char *buf = seq_buf_alloc(size);
  538. int ret;
  539. if (!buf)
  540. return -ENOMEM;
  541. ret = single_open(file, show, data);
  542. if (ret) {
  543. kvfree(buf);
  544. return ret;
  545. }
  546. ((struct seq_file *)file->private_data)->buf = buf;
  547. ((struct seq_file *)file->private_data)->size = size;
  548. return 0;
  549. }
  550. EXPORT_SYMBOL(single_open_size);
  551. int single_release(struct inode *inode, struct file *file)
  552. {
  553. const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
  554. int res = seq_release(inode, file);
  555. kfree(op);
  556. return res;
  557. }
  558. EXPORT_SYMBOL(single_release);
  559. int seq_release_private(struct inode *inode, struct file *file)
  560. {
  561. struct seq_file *seq = file->private_data;
  562. kfree(seq->private);
  563. seq->private = NULL;
  564. return seq_release(inode, file);
  565. }
  566. EXPORT_SYMBOL(seq_release_private);
  567. void *__seq_open_private(struct file *f, const struct seq_operations *ops,
  568. int psize)
  569. {
  570. int rc;
  571. void *private;
  572. struct seq_file *seq;
  573. private = kzalloc(psize, GFP_KERNEL);
  574. if (private == NULL)
  575. goto out;
  576. rc = seq_open(f, ops);
  577. if (rc < 0)
  578. goto out_free;
  579. seq = f->private_data;
  580. seq->private = private;
  581. return private;
  582. out_free:
  583. kfree(private);
  584. out:
  585. return NULL;
  586. }
  587. EXPORT_SYMBOL(__seq_open_private);
  588. int seq_open_private(struct file *filp, const struct seq_operations *ops,
  589. int psize)
  590. {
  591. return __seq_open_private(filp, ops, psize) ? 0 : -ENOMEM;
  592. }
  593. EXPORT_SYMBOL(seq_open_private);
  594. int seq_putc(struct seq_file *m, char c)
  595. {
  596. if (m->count < m->size) {
  597. m->buf[m->count++] = c;
  598. return 0;
  599. }
  600. return -1;
  601. }
  602. EXPORT_SYMBOL(seq_putc);
  603. int seq_puts(struct seq_file *m, const char *s)
  604. {
  605. int len = strlen(s);
  606. if (m->count + len < m->size) {
  607. memcpy(m->buf + m->count, s, len);
  608. m->count += len;
  609. return 0;
  610. }
  611. seq_set_overflow(m);
  612. return -1;
  613. }
  614. EXPORT_SYMBOL(seq_puts);
  615. /*
  616. * A helper routine for putting decimal numbers without rich format of printf().
  617. * only 'unsigned long long' is supported.
  618. * This routine will put one byte delimiter + number into seq_file.
  619. * This routine is very quick when you show lots of numbers.
  620. * In usual cases, it will be better to use seq_printf(). It's easier to read.
  621. */
  622. int seq_put_decimal_ull(struct seq_file *m, char delimiter,
  623. unsigned long long num)
  624. {
  625. int len;
  626. if (m->count + 2 >= m->size) /* we'll write 2 bytes at least */
  627. goto overflow;
  628. if (delimiter)
  629. m->buf[m->count++] = delimiter;
  630. if (num < 10) {
  631. m->buf[m->count++] = num + '0';
  632. return 0;
  633. }
  634. len = num_to_str(m->buf + m->count, m->size - m->count, num);
  635. if (!len)
  636. goto overflow;
  637. m->count += len;
  638. return 0;
  639. overflow:
  640. seq_set_overflow(m);
  641. return -1;
  642. }
  643. EXPORT_SYMBOL(seq_put_decimal_ull);
  644. int seq_put_decimal_ll(struct seq_file *m, char delimiter,
  645. long long num)
  646. {
  647. if (num < 0) {
  648. if (m->count + 3 >= m->size) {
  649. seq_set_overflow(m);
  650. return -1;
  651. }
  652. if (delimiter)
  653. m->buf[m->count++] = delimiter;
  654. num = -num;
  655. delimiter = '-';
  656. }
  657. return seq_put_decimal_ull(m, delimiter, num);
  658. }
  659. EXPORT_SYMBOL(seq_put_decimal_ll);
  660. /**
  661. * seq_write - write arbitrary data to buffer
  662. * @seq: seq_file identifying the buffer to which data should be written
  663. * @data: data address
  664. * @len: number of bytes
  665. *
  666. * Return 0 on success, non-zero otherwise.
  667. */
  668. int seq_write(struct seq_file *seq, const void *data, size_t len)
  669. {
  670. if (seq->count + len < seq->size) {
  671. memcpy(seq->buf + seq->count, data, len);
  672. seq->count += len;
  673. return 0;
  674. }
  675. seq_set_overflow(seq);
  676. return -1;
  677. }
  678. EXPORT_SYMBOL(seq_write);
  679. /**
  680. * seq_pad - write padding spaces to buffer
  681. * @m: seq_file identifying the buffer to which data should be written
  682. * @c: the byte to append after padding if non-zero
  683. */
  684. void seq_pad(struct seq_file *m, char c)
  685. {
  686. int size = m->pad_until - m->count;
  687. if (size > 0)
  688. seq_printf(m, "%*s", size, "");
  689. if (c)
  690. seq_putc(m, c);
  691. }
  692. EXPORT_SYMBOL(seq_pad);
  693. struct list_head *seq_list_start(struct list_head *head, loff_t pos)
  694. {
  695. struct list_head *lh;
  696. list_for_each(lh, head)
  697. if (pos-- == 0)
  698. return lh;
  699. return NULL;
  700. }
  701. EXPORT_SYMBOL(seq_list_start);
  702. struct list_head *seq_list_start_head(struct list_head *head, loff_t pos)
  703. {
  704. if (!pos)
  705. return head;
  706. return seq_list_start(head, pos - 1);
  707. }
  708. EXPORT_SYMBOL(seq_list_start_head);
  709. struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos)
  710. {
  711. struct list_head *lh;
  712. lh = ((struct list_head *)v)->next;
  713. ++*ppos;
  714. return lh == head ? NULL : lh;
  715. }
  716. EXPORT_SYMBOL(seq_list_next);
  717. /**
  718. * seq_hlist_start - start an iteration of a hlist
  719. * @head: the head of the hlist
  720. * @pos: the start position of the sequence
  721. *
  722. * Called at seq_file->op->start().
  723. */
  724. struct hlist_node *seq_hlist_start(struct hlist_head *head, loff_t pos)
  725. {
  726. struct hlist_node *node;
  727. hlist_for_each(node, head)
  728. if (pos-- == 0)
  729. return node;
  730. return NULL;
  731. }
  732. EXPORT_SYMBOL(seq_hlist_start);
  733. /**
  734. * seq_hlist_start_head - start an iteration of a hlist
  735. * @head: the head of the hlist
  736. * @pos: the start position of the sequence
  737. *
  738. * Called at seq_file->op->start(). Call this function if you want to
  739. * print a header at the top of the output.
  740. */
  741. struct hlist_node *seq_hlist_start_head(struct hlist_head *head, loff_t pos)
  742. {
  743. if (!pos)
  744. return SEQ_START_TOKEN;
  745. return seq_hlist_start(head, pos - 1);
  746. }
  747. EXPORT_SYMBOL(seq_hlist_start_head);
  748. /**
  749. * seq_hlist_next - move to the next position of the hlist
  750. * @v: the current iterator
  751. * @head: the head of the hlist
  752. * @ppos: the current position
  753. *
  754. * Called at seq_file->op->next().
  755. */
  756. struct hlist_node *seq_hlist_next(void *v, struct hlist_head *head,
  757. loff_t *ppos)
  758. {
  759. struct hlist_node *node = v;
  760. ++*ppos;
  761. if (v == SEQ_START_TOKEN)
  762. return head->first;
  763. else
  764. return node->next;
  765. }
  766. EXPORT_SYMBOL(seq_hlist_next);
  767. /**
  768. * seq_hlist_start_rcu - start an iteration of a hlist protected by RCU
  769. * @head: the head of the hlist
  770. * @pos: the start position of the sequence
  771. *
  772. * Called at seq_file->op->start().
  773. *
  774. * This list-traversal primitive may safely run concurrently with
  775. * the _rcu list-mutation primitives such as hlist_add_head_rcu()
  776. * as long as the traversal is guarded by rcu_read_lock().
  777. */
  778. struct hlist_node *seq_hlist_start_rcu(struct hlist_head *head,
  779. loff_t pos)
  780. {
  781. struct hlist_node *node;
  782. __hlist_for_each_rcu(node, head)
  783. if (pos-- == 0)
  784. return node;
  785. return NULL;
  786. }
  787. EXPORT_SYMBOL(seq_hlist_start_rcu);
  788. /**
  789. * seq_hlist_start_head_rcu - start an iteration of a hlist protected by RCU
  790. * @head: the head of the hlist
  791. * @pos: the start position of the sequence
  792. *
  793. * Called at seq_file->op->start(). Call this function if you want to
  794. * print a header at the top of the output.
  795. *
  796. * This list-traversal primitive may safely run concurrently with
  797. * the _rcu list-mutation primitives such as hlist_add_head_rcu()
  798. * as long as the traversal is guarded by rcu_read_lock().
  799. */
  800. struct hlist_node *seq_hlist_start_head_rcu(struct hlist_head *head,
  801. loff_t pos)
  802. {
  803. if (!pos)
  804. return SEQ_START_TOKEN;
  805. return seq_hlist_start_rcu(head, pos - 1);
  806. }
  807. EXPORT_SYMBOL(seq_hlist_start_head_rcu);
  808. /**
  809. * seq_hlist_next_rcu - move to the next position of the hlist protected by RCU
  810. * @v: the current iterator
  811. * @head: the head of the hlist
  812. * @ppos: the current position
  813. *
  814. * Called at seq_file->op->next().
  815. *
  816. * This list-traversal primitive may safely run concurrently with
  817. * the _rcu list-mutation primitives such as hlist_add_head_rcu()
  818. * as long as the traversal is guarded by rcu_read_lock().
  819. */
  820. struct hlist_node *seq_hlist_next_rcu(void *v,
  821. struct hlist_head *head,
  822. loff_t *ppos)
  823. {
  824. struct hlist_node *node = v;
  825. ++*ppos;
  826. if (v == SEQ_START_TOKEN)
  827. return rcu_dereference(head->first);
  828. else
  829. return rcu_dereference(node->next);
  830. }
  831. EXPORT_SYMBOL(seq_hlist_next_rcu);
  832. /**
  833. * seq_hlist_start_precpu - start an iteration of a percpu hlist array
  834. * @head: pointer to percpu array of struct hlist_heads
  835. * @cpu: pointer to cpu "cursor"
  836. * @pos: start position of sequence
  837. *
  838. * Called at seq_file->op->start().
  839. */
  840. struct hlist_node *
  841. seq_hlist_start_percpu(struct hlist_head __percpu *head, int *cpu, loff_t pos)
  842. {
  843. struct hlist_node *node;
  844. for_each_possible_cpu(*cpu) {
  845. hlist_for_each(node, per_cpu_ptr(head, *cpu)) {
  846. if (pos-- == 0)
  847. return node;
  848. }
  849. }
  850. return NULL;
  851. }
  852. EXPORT_SYMBOL(seq_hlist_start_percpu);
  853. /**
  854. * seq_hlist_next_percpu - move to the next position of the percpu hlist array
  855. * @v: pointer to current hlist_node
  856. * @head: pointer to percpu array of struct hlist_heads
  857. * @cpu: pointer to cpu "cursor"
  858. * @pos: start position of sequence
  859. *
  860. * Called at seq_file->op->next().
  861. */
  862. struct hlist_node *
  863. seq_hlist_next_percpu(void *v, struct hlist_head __percpu *head,
  864. int *cpu, loff_t *pos)
  865. {
  866. struct hlist_node *node = v;
  867. ++*pos;
  868. if (node->next)
  869. return node->next;
  870. for (*cpu = cpumask_next(*cpu, cpu_possible_mask); *cpu < nr_cpu_ids;
  871. *cpu = cpumask_next(*cpu, cpu_possible_mask)) {
  872. struct hlist_head *bucket = per_cpu_ptr(head, *cpu);
  873. if (!hlist_empty(bucket))
  874. return bucket->first;
  875. }
  876. return NULL;
  877. }
  878. EXPORT_SYMBOL(seq_hlist_next_percpu);