file.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952
  1. /*
  2. * fs/kernfs/file.c - kernfs file implementation
  3. *
  4. * Copyright (c) 2001-3 Patrick Mochel
  5. * Copyright (c) 2007 SUSE Linux Products GmbH
  6. * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
  7. *
  8. * This file is released under the GPLv2.
  9. */
  10. #include <linux/fs.h>
  11. #include <linux/seq_file.h>
  12. #include <linux/slab.h>
  13. #include <linux/poll.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/sched.h>
  16. #include <linux/fsnotify.h>
  17. #include "kernfs-internal.h"
  18. /*
  19. * There's one kernfs_open_file for each open file and one kernfs_open_node
  20. * for each kernfs_node with one or more open files.
  21. *
  22. * kernfs_node->attr.open points to kernfs_open_node. attr.open is
  23. * protected by kernfs_open_node_lock.
  24. *
  25. * filp->private_data points to seq_file whose ->private points to
  26. * kernfs_open_file. kernfs_open_files are chained at
  27. * kernfs_open_node->files, which is protected by kernfs_open_file_mutex.
  28. */
  29. static DEFINE_SPINLOCK(kernfs_open_node_lock);
  30. static DEFINE_MUTEX(kernfs_open_file_mutex);
  31. struct kernfs_open_node {
  32. atomic_t refcnt;
  33. atomic_t event;
  34. wait_queue_head_t poll;
  35. struct list_head files; /* goes through kernfs_open_file.list */
  36. };
  37. /*
  38. * kernfs_notify() may be called from any context and bounces notifications
  39. * through a work item. To minimize space overhead in kernfs_node, the
  40. * pending queue is implemented as a singly linked list of kernfs_nodes.
  41. * The list is terminated with the self pointer so that whether a
  42. * kernfs_node is on the list or not can be determined by testing the next
  43. * pointer for NULL.
  44. */
  45. #define KERNFS_NOTIFY_EOL ((void *)&kernfs_notify_list)
  46. static DEFINE_SPINLOCK(kernfs_notify_lock);
  47. static struct kernfs_node *kernfs_notify_list = KERNFS_NOTIFY_EOL;
  48. static struct kernfs_open_file *kernfs_of(struct file *file)
  49. {
  50. return ((struct seq_file *)file->private_data)->private;
  51. }
  52. /*
  53. * Determine the kernfs_ops for the given kernfs_node. This function must
  54. * be called while holding an active reference.
  55. */
  56. static const struct kernfs_ops *kernfs_ops(struct kernfs_node *kn)
  57. {
  58. if (kn->flags & KERNFS_LOCKDEP)
  59. lockdep_assert_held(kn);
  60. return kn->attr.ops;
  61. }
  62. /*
  63. * As kernfs_seq_stop() is also called after kernfs_seq_start() or
  64. * kernfs_seq_next() failure, it needs to distinguish whether it's stopping
  65. * a seq_file iteration which is fully initialized with an active reference
  66. * or an aborted kernfs_seq_start() due to get_active failure. The
  67. * position pointer is the only context for each seq_file iteration and
  68. * thus the stop condition should be encoded in it. As the return value is
  69. * directly visible to userland, ERR_PTR(-ENODEV) is the only acceptable
  70. * choice to indicate get_active failure.
  71. *
  72. * Unfortunately, this is complicated due to the optional custom seq_file
  73. * operations which may return ERR_PTR(-ENODEV) too. kernfs_seq_stop()
  74. * can't distinguish whether ERR_PTR(-ENODEV) is from get_active failure or
  75. * custom seq_file operations and thus can't decide whether put_active
  76. * should be performed or not only on ERR_PTR(-ENODEV).
  77. *
  78. * This is worked around by factoring out the custom seq_stop() and
  79. * put_active part into kernfs_seq_stop_active(), skipping it from
  80. * kernfs_seq_stop() if ERR_PTR(-ENODEV) while invoking it directly after
  81. * custom seq_file operations fail with ERR_PTR(-ENODEV) - this ensures
  82. * that kernfs_seq_stop_active() is skipped only after get_active failure.
  83. */
  84. static void kernfs_seq_stop_active(struct seq_file *sf, void *v)
  85. {
  86. struct kernfs_open_file *of = sf->private;
  87. const struct kernfs_ops *ops = kernfs_ops(of->kn);
  88. if (ops->seq_stop)
  89. ops->seq_stop(sf, v);
  90. kernfs_put_active(of->kn);
  91. }
  92. static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
  93. {
  94. struct kernfs_open_file *of = sf->private;
  95. const struct kernfs_ops *ops;
  96. /*
  97. * @of->mutex nests outside active ref and is just to ensure that
  98. * the ops aren't called concurrently for the same open file.
  99. */
  100. mutex_lock(&of->mutex);
  101. if (!kernfs_get_active(of->kn))
  102. return ERR_PTR(-ENODEV);
  103. ops = kernfs_ops(of->kn);
  104. if (ops->seq_start) {
  105. void *next = ops->seq_start(sf, ppos);
  106. /* see the comment above kernfs_seq_stop_active() */
  107. if (next == ERR_PTR(-ENODEV))
  108. kernfs_seq_stop_active(sf, next);
  109. return next;
  110. } else {
  111. /*
  112. * The same behavior and code as single_open(). Returns
  113. * !NULL if pos is at the beginning; otherwise, NULL.
  114. */
  115. return NULL + !*ppos;
  116. }
  117. }
  118. static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos)
  119. {
  120. struct kernfs_open_file *of = sf->private;
  121. const struct kernfs_ops *ops = kernfs_ops(of->kn);
  122. if (ops->seq_next) {
  123. void *next = ops->seq_next(sf, v, ppos);
  124. /* see the comment above kernfs_seq_stop_active() */
  125. if (next == ERR_PTR(-ENODEV))
  126. kernfs_seq_stop_active(sf, next);
  127. return next;
  128. } else {
  129. /*
  130. * The same behavior and code as single_open(), always
  131. * terminate after the initial read.
  132. */
  133. ++*ppos;
  134. return NULL;
  135. }
  136. }
  137. static void kernfs_seq_stop(struct seq_file *sf, void *v)
  138. {
  139. struct kernfs_open_file *of = sf->private;
  140. if (v != ERR_PTR(-ENODEV))
  141. kernfs_seq_stop_active(sf, v);
  142. mutex_unlock(&of->mutex);
  143. }
  144. static int kernfs_seq_show(struct seq_file *sf, void *v)
  145. {
  146. struct kernfs_open_file *of = sf->private;
  147. of->event = atomic_read(&of->kn->attr.open->event);
  148. return of->kn->attr.ops->seq_show(sf, v);
  149. }
  150. static const struct seq_operations kernfs_seq_ops = {
  151. .start = kernfs_seq_start,
  152. .next = kernfs_seq_next,
  153. .stop = kernfs_seq_stop,
  154. .show = kernfs_seq_show,
  155. };
  156. /*
  157. * As reading a bin file can have side-effects, the exact offset and bytes
  158. * specified in read(2) call should be passed to the read callback making
  159. * it difficult to use seq_file. Implement simplistic custom buffering for
  160. * bin files.
  161. */
  162. static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
  163. char __user *user_buf, size_t count,
  164. loff_t *ppos)
  165. {
  166. ssize_t len = min_t(size_t, count, PAGE_SIZE);
  167. const struct kernfs_ops *ops;
  168. char *buf;
  169. buf = kmalloc(len, GFP_KERNEL);
  170. if (!buf)
  171. return -ENOMEM;
  172. /*
  173. * @of->mutex nests outside active ref and is just to ensure that
  174. * the ops aren't called concurrently for the same open file.
  175. */
  176. mutex_lock(&of->mutex);
  177. if (!kernfs_get_active(of->kn)) {
  178. len = -ENODEV;
  179. mutex_unlock(&of->mutex);
  180. goto out_free;
  181. }
  182. ops = kernfs_ops(of->kn);
  183. if (ops->read)
  184. len = ops->read(of, buf, len, *ppos);
  185. else
  186. len = -EINVAL;
  187. kernfs_put_active(of->kn);
  188. mutex_unlock(&of->mutex);
  189. if (len < 0)
  190. goto out_free;
  191. if (copy_to_user(user_buf, buf, len)) {
  192. len = -EFAULT;
  193. goto out_free;
  194. }
  195. *ppos += len;
  196. out_free:
  197. kfree(buf);
  198. return len;
  199. }
  200. /**
  201. * kernfs_fop_read - kernfs vfs read callback
  202. * @file: file pointer
  203. * @user_buf: data to write
  204. * @count: number of bytes
  205. * @ppos: starting offset
  206. */
  207. static ssize_t kernfs_fop_read(struct file *file, char __user *user_buf,
  208. size_t count, loff_t *ppos)
  209. {
  210. struct kernfs_open_file *of = kernfs_of(file);
  211. if (of->kn->flags & KERNFS_HAS_SEQ_SHOW)
  212. return seq_read(file, user_buf, count, ppos);
  213. else
  214. return kernfs_file_direct_read(of, user_buf, count, ppos);
  215. }
  216. /**
  217. * kernfs_fop_write - kernfs vfs write callback
  218. * @file: file pointer
  219. * @user_buf: data to write
  220. * @count: number of bytes
  221. * @ppos: starting offset
  222. *
  223. * Copy data in from userland and pass it to the matching kernfs write
  224. * operation.
  225. *
  226. * There is no easy way for us to know if userspace is only doing a partial
  227. * write, so we don't support them. We expect the entire buffer to come on
  228. * the first write. Hint: if you're writing a value, first read the file,
  229. * modify only the the value you're changing, then write entire buffer
  230. * back.
  231. */
  232. static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
  233. size_t count, loff_t *ppos)
  234. {
  235. struct kernfs_open_file *of = kernfs_of(file);
  236. const struct kernfs_ops *ops;
  237. size_t len;
  238. char *buf;
  239. if (of->atomic_write_len) {
  240. len = count;
  241. if (len > of->atomic_write_len)
  242. return -E2BIG;
  243. } else {
  244. len = min_t(size_t, count, PAGE_SIZE);
  245. }
  246. buf = kmalloc(len + 1, GFP_KERNEL);
  247. if (!buf)
  248. return -ENOMEM;
  249. if (copy_from_user(buf, user_buf, len)) {
  250. len = -EFAULT;
  251. goto out_free;
  252. }
  253. buf[len] = '\0'; /* guarantee string termination */
  254. /*
  255. * @of->mutex nests outside active ref and is just to ensure that
  256. * the ops aren't called concurrently for the same open file.
  257. */
  258. mutex_lock(&of->mutex);
  259. if (!kernfs_get_active(of->kn)) {
  260. mutex_unlock(&of->mutex);
  261. len = -ENODEV;
  262. goto out_free;
  263. }
  264. ops = kernfs_ops(of->kn);
  265. if (ops->write)
  266. len = ops->write(of, buf, len, *ppos);
  267. else
  268. len = -EINVAL;
  269. kernfs_put_active(of->kn);
  270. mutex_unlock(&of->mutex);
  271. if (len > 0)
  272. *ppos += len;
  273. out_free:
  274. kfree(buf);
  275. return len;
  276. }
  277. static void kernfs_vma_open(struct vm_area_struct *vma)
  278. {
  279. struct file *file = vma->vm_file;
  280. struct kernfs_open_file *of = kernfs_of(file);
  281. if (!of->vm_ops)
  282. return;
  283. if (!kernfs_get_active(of->kn))
  284. return;
  285. if (of->vm_ops->open)
  286. of->vm_ops->open(vma);
  287. kernfs_put_active(of->kn);
  288. }
  289. static int kernfs_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  290. {
  291. struct file *file = vma->vm_file;
  292. struct kernfs_open_file *of = kernfs_of(file);
  293. int ret;
  294. if (!of->vm_ops)
  295. return VM_FAULT_SIGBUS;
  296. if (!kernfs_get_active(of->kn))
  297. return VM_FAULT_SIGBUS;
  298. ret = VM_FAULT_SIGBUS;
  299. if (of->vm_ops->fault)
  300. ret = of->vm_ops->fault(vma, vmf);
  301. kernfs_put_active(of->kn);
  302. return ret;
  303. }
  304. static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
  305. struct vm_fault *vmf)
  306. {
  307. struct file *file = vma->vm_file;
  308. struct kernfs_open_file *of = kernfs_of(file);
  309. int ret;
  310. if (!of->vm_ops)
  311. return VM_FAULT_SIGBUS;
  312. if (!kernfs_get_active(of->kn))
  313. return VM_FAULT_SIGBUS;
  314. ret = 0;
  315. if (of->vm_ops->page_mkwrite)
  316. ret = of->vm_ops->page_mkwrite(vma, vmf);
  317. else
  318. file_update_time(file);
  319. kernfs_put_active(of->kn);
  320. return ret;
  321. }
  322. static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
  323. void *buf, int len, int write)
  324. {
  325. struct file *file = vma->vm_file;
  326. struct kernfs_open_file *of = kernfs_of(file);
  327. int ret;
  328. if (!of->vm_ops)
  329. return -EINVAL;
  330. if (!kernfs_get_active(of->kn))
  331. return -EINVAL;
  332. ret = -EINVAL;
  333. if (of->vm_ops->access)
  334. ret = of->vm_ops->access(vma, addr, buf, len, write);
  335. kernfs_put_active(of->kn);
  336. return ret;
  337. }
  338. #ifdef CONFIG_NUMA
  339. static int kernfs_vma_set_policy(struct vm_area_struct *vma,
  340. struct mempolicy *new)
  341. {
  342. struct file *file = vma->vm_file;
  343. struct kernfs_open_file *of = kernfs_of(file);
  344. int ret;
  345. if (!of->vm_ops)
  346. return 0;
  347. if (!kernfs_get_active(of->kn))
  348. return -EINVAL;
  349. ret = 0;
  350. if (of->vm_ops->set_policy)
  351. ret = of->vm_ops->set_policy(vma, new);
  352. kernfs_put_active(of->kn);
  353. return ret;
  354. }
  355. static struct mempolicy *kernfs_vma_get_policy(struct vm_area_struct *vma,
  356. unsigned long addr)
  357. {
  358. struct file *file = vma->vm_file;
  359. struct kernfs_open_file *of = kernfs_of(file);
  360. struct mempolicy *pol;
  361. if (!of->vm_ops)
  362. return vma->vm_policy;
  363. if (!kernfs_get_active(of->kn))
  364. return vma->vm_policy;
  365. pol = vma->vm_policy;
  366. if (of->vm_ops->get_policy)
  367. pol = of->vm_ops->get_policy(vma, addr);
  368. kernfs_put_active(of->kn);
  369. return pol;
  370. }
  371. static int kernfs_vma_migrate(struct vm_area_struct *vma,
  372. const nodemask_t *from, const nodemask_t *to,
  373. unsigned long flags)
  374. {
  375. struct file *file = vma->vm_file;
  376. struct kernfs_open_file *of = kernfs_of(file);
  377. int ret;
  378. if (!of->vm_ops)
  379. return 0;
  380. if (!kernfs_get_active(of->kn))
  381. return 0;
  382. ret = 0;
  383. if (of->vm_ops->migrate)
  384. ret = of->vm_ops->migrate(vma, from, to, flags);
  385. kernfs_put_active(of->kn);
  386. return ret;
  387. }
  388. #endif
  389. static const struct vm_operations_struct kernfs_vm_ops = {
  390. .open = kernfs_vma_open,
  391. .fault = kernfs_vma_fault,
  392. .page_mkwrite = kernfs_vma_page_mkwrite,
  393. .access = kernfs_vma_access,
  394. #ifdef CONFIG_NUMA
  395. .set_policy = kernfs_vma_set_policy,
  396. .get_policy = kernfs_vma_get_policy,
  397. .migrate = kernfs_vma_migrate,
  398. #endif
  399. };
  400. static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
  401. {
  402. struct kernfs_open_file *of = kernfs_of(file);
  403. const struct kernfs_ops *ops;
  404. int rc;
  405. /*
  406. * mmap path and of->mutex are prone to triggering spurious lockdep
  407. * warnings and we don't want to add spurious locking dependency
  408. * between the two. Check whether mmap is actually implemented
  409. * without grabbing @of->mutex by testing HAS_MMAP flag. See the
  410. * comment in kernfs_file_open() for more details.
  411. */
  412. if (!(of->kn->flags & KERNFS_HAS_MMAP))
  413. return -ENODEV;
  414. mutex_lock(&of->mutex);
  415. rc = -ENODEV;
  416. if (!kernfs_get_active(of->kn))
  417. goto out_unlock;
  418. ops = kernfs_ops(of->kn);
  419. rc = ops->mmap(of, vma);
  420. if (rc)
  421. goto out_put;
  422. /*
  423. * PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup()
  424. * to satisfy versions of X which crash if the mmap fails: that
  425. * substitutes a new vm_file, and we don't then want bin_vm_ops.
  426. */
  427. if (vma->vm_file != file)
  428. goto out_put;
  429. rc = -EINVAL;
  430. if (of->mmapped && of->vm_ops != vma->vm_ops)
  431. goto out_put;
  432. /*
  433. * It is not possible to successfully wrap close.
  434. * So error if someone is trying to use close.
  435. */
  436. rc = -EINVAL;
  437. if (vma->vm_ops && vma->vm_ops->close)
  438. goto out_put;
  439. rc = 0;
  440. of->mmapped = 1;
  441. of->vm_ops = vma->vm_ops;
  442. vma->vm_ops = &kernfs_vm_ops;
  443. out_put:
  444. kernfs_put_active(of->kn);
  445. out_unlock:
  446. mutex_unlock(&of->mutex);
  447. return rc;
  448. }
  449. /**
  450. * kernfs_get_open_node - get or create kernfs_open_node
  451. * @kn: target kernfs_node
  452. * @of: kernfs_open_file for this instance of open
  453. *
  454. * If @kn->attr.open exists, increment its reference count; otherwise,
  455. * create one. @of is chained to the files list.
  456. *
  457. * LOCKING:
  458. * Kernel thread context (may sleep).
  459. *
  460. * RETURNS:
  461. * 0 on success, -errno on failure.
  462. */
  463. static int kernfs_get_open_node(struct kernfs_node *kn,
  464. struct kernfs_open_file *of)
  465. {
  466. struct kernfs_open_node *on, *new_on = NULL;
  467. retry:
  468. mutex_lock(&kernfs_open_file_mutex);
  469. spin_lock_irq(&kernfs_open_node_lock);
  470. if (!kn->attr.open && new_on) {
  471. kn->attr.open = new_on;
  472. new_on = NULL;
  473. }
  474. on = kn->attr.open;
  475. if (on) {
  476. atomic_inc(&on->refcnt);
  477. list_add_tail(&of->list, &on->files);
  478. }
  479. spin_unlock_irq(&kernfs_open_node_lock);
  480. mutex_unlock(&kernfs_open_file_mutex);
  481. if (on) {
  482. kfree(new_on);
  483. return 0;
  484. }
  485. /* not there, initialize a new one and retry */
  486. new_on = kmalloc(sizeof(*new_on), GFP_KERNEL);
  487. if (!new_on)
  488. return -ENOMEM;
  489. atomic_set(&new_on->refcnt, 0);
  490. atomic_set(&new_on->event, 1);
  491. init_waitqueue_head(&new_on->poll);
  492. INIT_LIST_HEAD(&new_on->files);
  493. goto retry;
  494. }
  495. /**
  496. * kernfs_put_open_node - put kernfs_open_node
  497. * @kn: target kernfs_nodet
  498. * @of: associated kernfs_open_file
  499. *
  500. * Put @kn->attr.open and unlink @of from the files list. If
  501. * reference count reaches zero, disassociate and free it.
  502. *
  503. * LOCKING:
  504. * None.
  505. */
  506. static void kernfs_put_open_node(struct kernfs_node *kn,
  507. struct kernfs_open_file *of)
  508. {
  509. struct kernfs_open_node *on = kn->attr.open;
  510. unsigned long flags;
  511. mutex_lock(&kernfs_open_file_mutex);
  512. spin_lock_irqsave(&kernfs_open_node_lock, flags);
  513. if (of)
  514. list_del(&of->list);
  515. if (atomic_dec_and_test(&on->refcnt))
  516. kn->attr.open = NULL;
  517. else
  518. on = NULL;
  519. spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
  520. mutex_unlock(&kernfs_open_file_mutex);
  521. kfree(on);
  522. }
  523. static int kernfs_fop_open(struct inode *inode, struct file *file)
  524. {
  525. struct kernfs_node *kn = file->f_path.dentry->d_fsdata;
  526. struct kernfs_root *root = kernfs_root(kn);
  527. const struct kernfs_ops *ops;
  528. struct kernfs_open_file *of;
  529. bool has_read, has_write, has_mmap;
  530. int error = -EACCES;
  531. if (!kernfs_get_active(kn))
  532. return -ENODEV;
  533. ops = kernfs_ops(kn);
  534. has_read = ops->seq_show || ops->read || ops->mmap;
  535. has_write = ops->write || ops->mmap;
  536. has_mmap = ops->mmap;
  537. /* see the flag definition for details */
  538. if (root->flags & KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK) {
  539. if ((file->f_mode & FMODE_WRITE) &&
  540. (!(inode->i_mode & S_IWUGO) || !has_write))
  541. goto err_out;
  542. if ((file->f_mode & FMODE_READ) &&
  543. (!(inode->i_mode & S_IRUGO) || !has_read))
  544. goto err_out;
  545. }
  546. /* allocate a kernfs_open_file for the file */
  547. error = -ENOMEM;
  548. of = kzalloc(sizeof(struct kernfs_open_file), GFP_KERNEL);
  549. if (!of)
  550. goto err_out;
  551. /*
  552. * The following is done to give a different lockdep key to
  553. * @of->mutex for files which implement mmap. This is a rather
  554. * crude way to avoid false positive lockdep warning around
  555. * mm->mmap_sem - mmap nests @of->mutex under mm->mmap_sem and
  556. * reading /sys/block/sda/trace/act_mask grabs sr_mutex, under
  557. * which mm->mmap_sem nests, while holding @of->mutex. As each
  558. * open file has a separate mutex, it's okay as long as those don't
  559. * happen on the same file. At this point, we can't easily give
  560. * each file a separate locking class. Let's differentiate on
  561. * whether the file has mmap or not for now.
  562. *
  563. * Both paths of the branch look the same. They're supposed to
  564. * look that way and give @of->mutex different static lockdep keys.
  565. */
  566. if (has_mmap)
  567. mutex_init(&of->mutex);
  568. else
  569. mutex_init(&of->mutex);
  570. of->kn = kn;
  571. of->file = file;
  572. /*
  573. * Write path needs to atomic_write_len outside active reference.
  574. * Cache it in open_file. See kernfs_fop_write() for details.
  575. */
  576. of->atomic_write_len = ops->atomic_write_len;
  577. /*
  578. * Always instantiate seq_file even if read access doesn't use
  579. * seq_file or is not requested. This unifies private data access
  580. * and readable regular files are the vast majority anyway.
  581. */
  582. if (ops->seq_show)
  583. error = seq_open(file, &kernfs_seq_ops);
  584. else
  585. error = seq_open(file, NULL);
  586. if (error)
  587. goto err_free;
  588. ((struct seq_file *)file->private_data)->private = of;
  589. /* seq_file clears PWRITE unconditionally, restore it if WRITE */
  590. if (file->f_mode & FMODE_WRITE)
  591. file->f_mode |= FMODE_PWRITE;
  592. /* make sure we have open node struct */
  593. error = kernfs_get_open_node(kn, of);
  594. if (error)
  595. goto err_close;
  596. /* open succeeded, put active references */
  597. kernfs_put_active(kn);
  598. return 0;
  599. err_close:
  600. seq_release(inode, file);
  601. err_free:
  602. kfree(of);
  603. err_out:
  604. kernfs_put_active(kn);
  605. return error;
  606. }
  607. static int kernfs_fop_release(struct inode *inode, struct file *filp)
  608. {
  609. struct kernfs_node *kn = filp->f_path.dentry->d_fsdata;
  610. struct kernfs_open_file *of = kernfs_of(filp);
  611. kernfs_put_open_node(kn, of);
  612. seq_release(inode, filp);
  613. kfree(of);
  614. return 0;
  615. }
  616. void kernfs_unmap_bin_file(struct kernfs_node *kn)
  617. {
  618. struct kernfs_open_node *on;
  619. struct kernfs_open_file *of;
  620. if (!(kn->flags & KERNFS_HAS_MMAP))
  621. return;
  622. spin_lock_irq(&kernfs_open_node_lock);
  623. on = kn->attr.open;
  624. if (on)
  625. atomic_inc(&on->refcnt);
  626. spin_unlock_irq(&kernfs_open_node_lock);
  627. if (!on)
  628. return;
  629. mutex_lock(&kernfs_open_file_mutex);
  630. list_for_each_entry(of, &on->files, list) {
  631. struct inode *inode = file_inode(of->file);
  632. unmap_mapping_range(inode->i_mapping, 0, 0, 1);
  633. }
  634. mutex_unlock(&kernfs_open_file_mutex);
  635. kernfs_put_open_node(kn, NULL);
  636. }
  637. /*
  638. * Kernfs attribute files are pollable. The idea is that you read
  639. * the content and then you use 'poll' or 'select' to wait for
  640. * the content to change. When the content changes (assuming the
  641. * manager for the kobject supports notification), poll will
  642. * return POLLERR|POLLPRI, and select will return the fd whether
  643. * it is waiting for read, write, or exceptions.
  644. * Once poll/select indicates that the value has changed, you
  645. * need to close and re-open the file, or seek to 0 and read again.
  646. * Reminder: this only works for attributes which actively support
  647. * it, and it is not possible to test an attribute from userspace
  648. * to see if it supports poll (Neither 'poll' nor 'select' return
  649. * an appropriate error code). When in doubt, set a suitable timeout value.
  650. */
  651. static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
  652. {
  653. struct kernfs_open_file *of = kernfs_of(filp);
  654. struct kernfs_node *kn = filp->f_path.dentry->d_fsdata;
  655. struct kernfs_open_node *on = kn->attr.open;
  656. /* need parent for the kobj, grab both */
  657. if (!kernfs_get_active(kn))
  658. goto trigger;
  659. poll_wait(filp, &on->poll, wait);
  660. kernfs_put_active(kn);
  661. if (of->event != atomic_read(&on->event))
  662. goto trigger;
  663. return DEFAULT_POLLMASK;
  664. trigger:
  665. return DEFAULT_POLLMASK|POLLERR|POLLPRI;
  666. }
  667. static void kernfs_notify_workfn(struct work_struct *work)
  668. {
  669. struct kernfs_node *kn;
  670. struct kernfs_open_node *on;
  671. struct kernfs_super_info *info;
  672. repeat:
  673. /* pop one off the notify_list */
  674. spin_lock_irq(&kernfs_notify_lock);
  675. kn = kernfs_notify_list;
  676. if (kn == KERNFS_NOTIFY_EOL) {
  677. spin_unlock_irq(&kernfs_notify_lock);
  678. return;
  679. }
  680. kernfs_notify_list = kn->attr.notify_next;
  681. kn->attr.notify_next = NULL;
  682. spin_unlock_irq(&kernfs_notify_lock);
  683. /* kick poll */
  684. spin_lock_irq(&kernfs_open_node_lock);
  685. on = kn->attr.open;
  686. if (on) {
  687. atomic_inc(&on->event);
  688. wake_up_interruptible(&on->poll);
  689. }
  690. spin_unlock_irq(&kernfs_open_node_lock);
  691. /* kick fsnotify */
  692. mutex_lock(&kernfs_mutex);
  693. list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
  694. struct inode *inode;
  695. struct dentry *dentry;
  696. inode = ilookup(info->sb, kn->ino);
  697. if (!inode)
  698. continue;
  699. dentry = d_find_any_alias(inode);
  700. if (dentry) {
  701. fsnotify_parent(NULL, dentry, FS_MODIFY);
  702. fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE,
  703. NULL, 0);
  704. dput(dentry);
  705. }
  706. iput(inode);
  707. }
  708. mutex_unlock(&kernfs_mutex);
  709. kernfs_put(kn);
  710. goto repeat;
  711. }
  712. /**
  713. * kernfs_notify - notify a kernfs file
  714. * @kn: file to notify
  715. *
  716. * Notify @kn such that poll(2) on @kn wakes up. Maybe be called from any
  717. * context.
  718. */
  719. void kernfs_notify(struct kernfs_node *kn)
  720. {
  721. static DECLARE_WORK(kernfs_notify_work, kernfs_notify_workfn);
  722. unsigned long flags;
  723. if (WARN_ON(kernfs_type(kn) != KERNFS_FILE))
  724. return;
  725. spin_lock_irqsave(&kernfs_notify_lock, flags);
  726. if (!kn->attr.notify_next) {
  727. kernfs_get(kn);
  728. kn->attr.notify_next = kernfs_notify_list;
  729. kernfs_notify_list = kn;
  730. schedule_work(&kernfs_notify_work);
  731. }
  732. spin_unlock_irqrestore(&kernfs_notify_lock, flags);
  733. }
  734. EXPORT_SYMBOL_GPL(kernfs_notify);
  735. const struct file_operations kernfs_file_fops = {
  736. .read = kernfs_fop_read,
  737. .write = kernfs_fop_write,
  738. .llseek = generic_file_llseek,
  739. .mmap = kernfs_fop_mmap,
  740. .open = kernfs_fop_open,
  741. .release = kernfs_fop_release,
  742. .poll = kernfs_fop_poll,
  743. };
  744. /**
  745. * __kernfs_create_file - kernfs internal function to create a file
  746. * @parent: directory to create the file in
  747. * @name: name of the file
  748. * @mode: mode of the file
  749. * @size: size of the file
  750. * @ops: kernfs operations for the file
  751. * @priv: private data for the file
  752. * @ns: optional namespace tag of the file
  753. * @static_name: don't copy file name
  754. * @key: lockdep key for the file's active_ref, %NULL to disable lockdep
  755. *
  756. * Returns the created node on success, ERR_PTR() value on error.
  757. */
  758. struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
  759. const char *name,
  760. umode_t mode, loff_t size,
  761. const struct kernfs_ops *ops,
  762. void *priv, const void *ns,
  763. bool name_is_static,
  764. struct lock_class_key *key)
  765. {
  766. struct kernfs_node *kn;
  767. unsigned flags;
  768. int rc;
  769. flags = KERNFS_FILE;
  770. if (name_is_static)
  771. flags |= KERNFS_STATIC_NAME;
  772. kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG, flags);
  773. if (!kn)
  774. return ERR_PTR(-ENOMEM);
  775. kn->attr.ops = ops;
  776. kn->attr.size = size;
  777. kn->ns = ns;
  778. kn->priv = priv;
  779. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  780. if (key) {
  781. lockdep_init_map(&kn->dep_map, "s_active", key, 0);
  782. kn->flags |= KERNFS_LOCKDEP;
  783. }
  784. #endif
  785. /*
  786. * kn->attr.ops is accesible only while holding active ref. We
  787. * need to know whether some ops are implemented outside active
  788. * ref. Cache their existence in flags.
  789. */
  790. if (ops->seq_show)
  791. kn->flags |= KERNFS_HAS_SEQ_SHOW;
  792. if (ops->mmap)
  793. kn->flags |= KERNFS_HAS_MMAP;
  794. rc = kernfs_add_one(kn);
  795. if (rc) {
  796. kernfs_put(kn);
  797. return ERR_PTR(rc);
  798. }
  799. return kn;
  800. }