file.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953
  1. /*
  2. * fs/kernfs/file.c - kernfs file implementation
  3. *
  4. * Copyright (c) 2001-3 Patrick Mochel
  5. * Copyright (c) 2007 SUSE Linux Products GmbH
  6. * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
  7. *
  8. * This file is released under the GPLv2.
  9. */
  10. #include <linux/fs.h>
  11. #include <linux/seq_file.h>
  12. #include <linux/slab.h>
  13. #include <linux/poll.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/sched.h>
  16. #include <linux/fsnotify.h>
  17. #include "kernfs-internal.h"
  18. /*
  19. * There's one kernfs_open_file for each open file and one kernfs_open_node
  20. * for each kernfs_node with one or more open files.
  21. *
  22. * kernfs_node->attr.open points to kernfs_open_node. attr.open is
  23. * protected by kernfs_open_node_lock.
  24. *
  25. * filp->private_data points to seq_file whose ->private points to
  26. * kernfs_open_file. kernfs_open_files are chained at
  27. * kernfs_open_node->files, which is protected by kernfs_open_file_mutex.
  28. */
  29. static DEFINE_SPINLOCK(kernfs_open_node_lock);
  30. static DEFINE_MUTEX(kernfs_open_file_mutex);
  31. struct kernfs_open_node {
  32. atomic_t refcnt;
  33. atomic_t event;
  34. wait_queue_head_t poll;
  35. struct list_head files; /* goes through kernfs_open_file.list */
  36. };
  37. /*
  38. * kernfs_notify() may be called from any context and bounces notifications
  39. * through a work item. To minimize space overhead in kernfs_node, the
  40. * pending queue is implemented as a singly linked list of kernfs_nodes.
  41. * The list is terminated with the self pointer so that whether a
  42. * kernfs_node is on the list or not can be determined by testing the next
  43. * pointer for NULL.
  44. */
  45. #define KERNFS_NOTIFY_EOL ((void *)&kernfs_notify_list)
  46. static DEFINE_SPINLOCK(kernfs_notify_lock);
  47. static struct kernfs_node *kernfs_notify_list = KERNFS_NOTIFY_EOL;
  48. static struct kernfs_open_file *kernfs_of(struct file *file)
  49. {
  50. return ((struct seq_file *)file->private_data)->private;
  51. }
  52. /*
  53. * Determine the kernfs_ops for the given kernfs_node. This function must
  54. * be called while holding an active reference.
  55. */
  56. static const struct kernfs_ops *kernfs_ops(struct kernfs_node *kn)
  57. {
  58. if (kn->flags & KERNFS_LOCKDEP)
  59. lockdep_assert_held(kn);
  60. return kn->attr.ops;
  61. }
  62. /*
  63. * As kernfs_seq_stop() is also called after kernfs_seq_start() or
  64. * kernfs_seq_next() failure, it needs to distinguish whether it's stopping
  65. * a seq_file iteration which is fully initialized with an active reference
  66. * or an aborted kernfs_seq_start() due to get_active failure. The
  67. * position pointer is the only context for each seq_file iteration and
  68. * thus the stop condition should be encoded in it. As the return value is
  69. * directly visible to userland, ERR_PTR(-ENODEV) is the only acceptable
  70. * choice to indicate get_active failure.
  71. *
  72. * Unfortunately, this is complicated due to the optional custom seq_file
  73. * operations which may return ERR_PTR(-ENODEV) too. kernfs_seq_stop()
  74. * can't distinguish whether ERR_PTR(-ENODEV) is from get_active failure or
  75. * custom seq_file operations and thus can't decide whether put_active
  76. * should be performed or not only on ERR_PTR(-ENODEV).
  77. *
  78. * This is worked around by factoring out the custom seq_stop() and
  79. * put_active part into kernfs_seq_stop_active(), skipping it from
  80. * kernfs_seq_stop() if ERR_PTR(-ENODEV) while invoking it directly after
  81. * custom seq_file operations fail with ERR_PTR(-ENODEV) - this ensures
  82. * that kernfs_seq_stop_active() is skipped only after get_active failure.
  83. */
  84. static void kernfs_seq_stop_active(struct seq_file *sf, void *v)
  85. {
  86. struct kernfs_open_file *of = sf->private;
  87. const struct kernfs_ops *ops = kernfs_ops(of->kn);
  88. if (ops->seq_stop)
  89. ops->seq_stop(sf, v);
  90. kernfs_put_active(of->kn);
  91. }
  92. static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
  93. {
  94. struct kernfs_open_file *of = sf->private;
  95. const struct kernfs_ops *ops;
  96. /*
  97. * @of->mutex nests outside active ref and is primarily to ensure that
  98. * the ops aren't called concurrently for the same open file.
  99. */
  100. mutex_lock(&of->mutex);
  101. if (!kernfs_get_active(of->kn))
  102. return ERR_PTR(-ENODEV);
  103. ops = kernfs_ops(of->kn);
  104. if (ops->seq_start) {
  105. void *next = ops->seq_start(sf, ppos);
  106. /* see the comment above kernfs_seq_stop_active() */
  107. if (next == ERR_PTR(-ENODEV))
  108. kernfs_seq_stop_active(sf, next);
  109. return next;
  110. } else {
  111. /*
  112. * The same behavior and code as single_open(). Returns
  113. * !NULL if pos is at the beginning; otherwise, NULL.
  114. */
  115. return NULL + !*ppos;
  116. }
  117. }
  118. static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos)
  119. {
  120. struct kernfs_open_file *of = sf->private;
  121. const struct kernfs_ops *ops = kernfs_ops(of->kn);
  122. if (ops->seq_next) {
  123. void *next = ops->seq_next(sf, v, ppos);
  124. /* see the comment above kernfs_seq_stop_active() */
  125. if (next == ERR_PTR(-ENODEV))
  126. kernfs_seq_stop_active(sf, next);
  127. return next;
  128. } else {
  129. /*
  130. * The same behavior and code as single_open(), always
  131. * terminate after the initial read.
  132. */
  133. ++*ppos;
  134. return NULL;
  135. }
  136. }
  137. static void kernfs_seq_stop(struct seq_file *sf, void *v)
  138. {
  139. struct kernfs_open_file *of = sf->private;
  140. if (v != ERR_PTR(-ENODEV))
  141. kernfs_seq_stop_active(sf, v);
  142. mutex_unlock(&of->mutex);
  143. }
  144. static int kernfs_seq_show(struct seq_file *sf, void *v)
  145. {
  146. struct kernfs_open_file *of = sf->private;
  147. of->event = atomic_read(&of->kn->attr.open->event);
  148. return of->kn->attr.ops->seq_show(sf, v);
  149. }
  150. static const struct seq_operations kernfs_seq_ops = {
  151. .start = kernfs_seq_start,
  152. .next = kernfs_seq_next,
  153. .stop = kernfs_seq_stop,
  154. .show = kernfs_seq_show,
  155. };
  156. /*
  157. * As reading a bin file can have side-effects, the exact offset and bytes
  158. * specified in read(2) call should be passed to the read callback making
  159. * it difficult to use seq_file. Implement simplistic custom buffering for
  160. * bin files.
  161. */
  162. static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
  163. char __user *user_buf, size_t count,
  164. loff_t *ppos)
  165. {
  166. ssize_t len = min_t(size_t, count, PAGE_SIZE);
  167. const struct kernfs_ops *ops;
  168. char *buf;
  169. buf = of->prealloc_buf;
  170. if (!buf)
  171. buf = kmalloc(len, GFP_KERNEL);
  172. if (!buf)
  173. return -ENOMEM;
  174. /*
  175. * @of->mutex nests outside active ref and is used both to ensure that
  176. * the ops aren't called concurrently for the same open file, and
  177. * to provide exclusive access to ->prealloc_buf (when that exists).
  178. */
  179. mutex_lock(&of->mutex);
  180. if (!kernfs_get_active(of->kn)) {
  181. len = -ENODEV;
  182. mutex_unlock(&of->mutex);
  183. goto out_free;
  184. }
  185. ops = kernfs_ops(of->kn);
  186. if (ops->read)
  187. len = ops->read(of, buf, len, *ppos);
  188. else
  189. len = -EINVAL;
  190. if (len < 0)
  191. goto out_unlock;
  192. if (copy_to_user(user_buf, buf, len)) {
  193. len = -EFAULT;
  194. goto out_unlock;
  195. }
  196. *ppos += len;
  197. out_unlock:
  198. kernfs_put_active(of->kn);
  199. mutex_unlock(&of->mutex);
  200. out_free:
  201. if (buf != of->prealloc_buf)
  202. kfree(buf);
  203. return len;
  204. }
  205. /**
  206. * kernfs_fop_read - kernfs vfs read callback
  207. * @file: file pointer
  208. * @user_buf: data to write
  209. * @count: number of bytes
  210. * @ppos: starting offset
  211. */
  212. static ssize_t kernfs_fop_read(struct file *file, char __user *user_buf,
  213. size_t count, loff_t *ppos)
  214. {
  215. struct kernfs_open_file *of = kernfs_of(file);
  216. if (of->kn->flags & KERNFS_HAS_SEQ_SHOW)
  217. return seq_read(file, user_buf, count, ppos);
  218. else
  219. return kernfs_file_direct_read(of, user_buf, count, ppos);
  220. }
  221. /**
  222. * kernfs_fop_write - kernfs vfs write callback
  223. * @file: file pointer
  224. * @user_buf: data to write
  225. * @count: number of bytes
  226. * @ppos: starting offset
  227. *
  228. * Copy data in from userland and pass it to the matching kernfs write
  229. * operation.
  230. *
  231. * There is no easy way for us to know if userspace is only doing a partial
  232. * write, so we don't support them. We expect the entire buffer to come on
  233. * the first write. Hint: if you're writing a value, first read the file,
  234. * modify only the the value you're changing, then write entire buffer
  235. * back.
  236. */
  237. static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
  238. size_t count, loff_t *ppos)
  239. {
  240. struct kernfs_open_file *of = kernfs_of(file);
  241. const struct kernfs_ops *ops;
  242. size_t len;
  243. char *buf;
  244. if (of->atomic_write_len) {
  245. len = count;
  246. if (len > of->atomic_write_len)
  247. return -E2BIG;
  248. } else {
  249. len = min_t(size_t, count, PAGE_SIZE);
  250. }
  251. buf = of->prealloc_buf;
  252. if (!buf)
  253. buf = kmalloc(len + 1, GFP_KERNEL);
  254. if (!buf)
  255. return -ENOMEM;
  256. /*
  257. * @of->mutex nests outside active ref and is used both to ensure that
  258. * the ops aren't called concurrently for the same open file, and
  259. * to provide exclusive access to ->prealloc_buf (when that exists).
  260. */
  261. mutex_lock(&of->mutex);
  262. if (!kernfs_get_active(of->kn)) {
  263. mutex_unlock(&of->mutex);
  264. len = -ENODEV;
  265. goto out_free;
  266. }
  267. if (copy_from_user(buf, user_buf, len)) {
  268. len = -EFAULT;
  269. goto out_unlock;
  270. }
  271. buf[len] = '\0'; /* guarantee string termination */
  272. ops = kernfs_ops(of->kn);
  273. if (ops->write)
  274. len = ops->write(of, buf, len, *ppos);
  275. else
  276. len = -EINVAL;
  277. if (len > 0)
  278. *ppos += len;
  279. out_unlock:
  280. kernfs_put_active(of->kn);
  281. mutex_unlock(&of->mutex);
  282. out_free:
  283. if (buf != of->prealloc_buf)
  284. kfree(buf);
  285. return len;
  286. }
  287. static void kernfs_vma_open(struct vm_area_struct *vma)
  288. {
  289. struct file *file = vma->vm_file;
  290. struct kernfs_open_file *of = kernfs_of(file);
  291. if (!of->vm_ops)
  292. return;
  293. if (!kernfs_get_active(of->kn))
  294. return;
  295. if (of->vm_ops->open)
  296. of->vm_ops->open(vma);
  297. kernfs_put_active(of->kn);
  298. }
  299. static int kernfs_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  300. {
  301. struct file *file = vma->vm_file;
  302. struct kernfs_open_file *of = kernfs_of(file);
  303. int ret;
  304. if (!of->vm_ops)
  305. return VM_FAULT_SIGBUS;
  306. if (!kernfs_get_active(of->kn))
  307. return VM_FAULT_SIGBUS;
  308. ret = VM_FAULT_SIGBUS;
  309. if (of->vm_ops->fault)
  310. ret = of->vm_ops->fault(vma, vmf);
  311. kernfs_put_active(of->kn);
  312. return ret;
  313. }
  314. static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
  315. struct vm_fault *vmf)
  316. {
  317. struct file *file = vma->vm_file;
  318. struct kernfs_open_file *of = kernfs_of(file);
  319. int ret;
  320. if (!of->vm_ops)
  321. return VM_FAULT_SIGBUS;
  322. if (!kernfs_get_active(of->kn))
  323. return VM_FAULT_SIGBUS;
  324. ret = 0;
  325. if (of->vm_ops->page_mkwrite)
  326. ret = of->vm_ops->page_mkwrite(vma, vmf);
  327. else
  328. file_update_time(file);
  329. kernfs_put_active(of->kn);
  330. return ret;
  331. }
  332. static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
  333. void *buf, int len, int write)
  334. {
  335. struct file *file = vma->vm_file;
  336. struct kernfs_open_file *of = kernfs_of(file);
  337. int ret;
  338. if (!of->vm_ops)
  339. return -EINVAL;
  340. if (!kernfs_get_active(of->kn))
  341. return -EINVAL;
  342. ret = -EINVAL;
  343. if (of->vm_ops->access)
  344. ret = of->vm_ops->access(vma, addr, buf, len, write);
  345. kernfs_put_active(of->kn);
  346. return ret;
  347. }
  348. #ifdef CONFIG_NUMA
  349. static int kernfs_vma_set_policy(struct vm_area_struct *vma,
  350. struct mempolicy *new)
  351. {
  352. struct file *file = vma->vm_file;
  353. struct kernfs_open_file *of = kernfs_of(file);
  354. int ret;
  355. if (!of->vm_ops)
  356. return 0;
  357. if (!kernfs_get_active(of->kn))
  358. return -EINVAL;
  359. ret = 0;
  360. if (of->vm_ops->set_policy)
  361. ret = of->vm_ops->set_policy(vma, new);
  362. kernfs_put_active(of->kn);
  363. return ret;
  364. }
  365. static struct mempolicy *kernfs_vma_get_policy(struct vm_area_struct *vma,
  366. unsigned long addr)
  367. {
  368. struct file *file = vma->vm_file;
  369. struct kernfs_open_file *of = kernfs_of(file);
  370. struct mempolicy *pol;
  371. if (!of->vm_ops)
  372. return vma->vm_policy;
  373. if (!kernfs_get_active(of->kn))
  374. return vma->vm_policy;
  375. pol = vma->vm_policy;
  376. if (of->vm_ops->get_policy)
  377. pol = of->vm_ops->get_policy(vma, addr);
  378. kernfs_put_active(of->kn);
  379. return pol;
  380. }
  381. #endif
  382. static const struct vm_operations_struct kernfs_vm_ops = {
  383. .open = kernfs_vma_open,
  384. .fault = kernfs_vma_fault,
  385. .page_mkwrite = kernfs_vma_page_mkwrite,
  386. .access = kernfs_vma_access,
  387. #ifdef CONFIG_NUMA
  388. .set_policy = kernfs_vma_set_policy,
  389. .get_policy = kernfs_vma_get_policy,
  390. #endif
  391. };
  392. static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
  393. {
  394. struct kernfs_open_file *of = kernfs_of(file);
  395. const struct kernfs_ops *ops;
  396. int rc;
  397. /*
  398. * mmap path and of->mutex are prone to triggering spurious lockdep
  399. * warnings and we don't want to add spurious locking dependency
  400. * between the two. Check whether mmap is actually implemented
  401. * without grabbing @of->mutex by testing HAS_MMAP flag. See the
  402. * comment in kernfs_file_open() for more details.
  403. */
  404. if (!(of->kn->flags & KERNFS_HAS_MMAP))
  405. return -ENODEV;
  406. mutex_lock(&of->mutex);
  407. rc = -ENODEV;
  408. if (!kernfs_get_active(of->kn))
  409. goto out_unlock;
  410. ops = kernfs_ops(of->kn);
  411. rc = ops->mmap(of, vma);
  412. if (rc)
  413. goto out_put;
  414. /*
  415. * PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup()
  416. * to satisfy versions of X which crash if the mmap fails: that
  417. * substitutes a new vm_file, and we don't then want bin_vm_ops.
  418. */
  419. if (vma->vm_file != file)
  420. goto out_put;
  421. rc = -EINVAL;
  422. if (of->mmapped && of->vm_ops != vma->vm_ops)
  423. goto out_put;
  424. /*
  425. * It is not possible to successfully wrap close.
  426. * So error if someone is trying to use close.
  427. */
  428. rc = -EINVAL;
  429. if (vma->vm_ops && vma->vm_ops->close)
  430. goto out_put;
  431. rc = 0;
  432. of->mmapped = 1;
  433. of->vm_ops = vma->vm_ops;
  434. vma->vm_ops = &kernfs_vm_ops;
  435. out_put:
  436. kernfs_put_active(of->kn);
  437. out_unlock:
  438. mutex_unlock(&of->mutex);
  439. return rc;
  440. }
  441. /**
  442. * kernfs_get_open_node - get or create kernfs_open_node
  443. * @kn: target kernfs_node
  444. * @of: kernfs_open_file for this instance of open
  445. *
  446. * If @kn->attr.open exists, increment its reference count; otherwise,
  447. * create one. @of is chained to the files list.
  448. *
  449. * LOCKING:
  450. * Kernel thread context (may sleep).
  451. *
  452. * RETURNS:
  453. * 0 on success, -errno on failure.
  454. */
  455. static int kernfs_get_open_node(struct kernfs_node *kn,
  456. struct kernfs_open_file *of)
  457. {
  458. struct kernfs_open_node *on, *new_on = NULL;
  459. retry:
  460. mutex_lock(&kernfs_open_file_mutex);
  461. spin_lock_irq(&kernfs_open_node_lock);
  462. if (!kn->attr.open && new_on) {
  463. kn->attr.open = new_on;
  464. new_on = NULL;
  465. }
  466. on = kn->attr.open;
  467. if (on) {
  468. atomic_inc(&on->refcnt);
  469. list_add_tail(&of->list, &on->files);
  470. }
  471. spin_unlock_irq(&kernfs_open_node_lock);
  472. mutex_unlock(&kernfs_open_file_mutex);
  473. if (on) {
  474. kfree(new_on);
  475. return 0;
  476. }
  477. /* not there, initialize a new one and retry */
  478. new_on = kmalloc(sizeof(*new_on), GFP_KERNEL);
  479. if (!new_on)
  480. return -ENOMEM;
  481. atomic_set(&new_on->refcnt, 0);
  482. atomic_set(&new_on->event, 1);
  483. init_waitqueue_head(&new_on->poll);
  484. INIT_LIST_HEAD(&new_on->files);
  485. goto retry;
  486. }
  487. /**
  488. * kernfs_put_open_node - put kernfs_open_node
  489. * @kn: target kernfs_nodet
  490. * @of: associated kernfs_open_file
  491. *
  492. * Put @kn->attr.open and unlink @of from the files list. If
  493. * reference count reaches zero, disassociate and free it.
  494. *
  495. * LOCKING:
  496. * None.
  497. */
  498. static void kernfs_put_open_node(struct kernfs_node *kn,
  499. struct kernfs_open_file *of)
  500. {
  501. struct kernfs_open_node *on = kn->attr.open;
  502. unsigned long flags;
  503. mutex_lock(&kernfs_open_file_mutex);
  504. spin_lock_irqsave(&kernfs_open_node_lock, flags);
  505. if (of)
  506. list_del(&of->list);
  507. if (atomic_dec_and_test(&on->refcnt))
  508. kn->attr.open = NULL;
  509. else
  510. on = NULL;
  511. spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
  512. mutex_unlock(&kernfs_open_file_mutex);
  513. kfree(on);
  514. }
  515. static int kernfs_fop_open(struct inode *inode, struct file *file)
  516. {
  517. struct kernfs_node *kn = file->f_path.dentry->d_fsdata;
  518. struct kernfs_root *root = kernfs_root(kn);
  519. const struct kernfs_ops *ops;
  520. struct kernfs_open_file *of;
  521. bool has_read, has_write, has_mmap;
  522. int error = -EACCES;
  523. if (!kernfs_get_active(kn))
  524. return -ENODEV;
  525. ops = kernfs_ops(kn);
  526. has_read = ops->seq_show || ops->read || ops->mmap;
  527. has_write = ops->write || ops->mmap;
  528. has_mmap = ops->mmap;
  529. /* see the flag definition for details */
  530. if (root->flags & KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK) {
  531. if ((file->f_mode & FMODE_WRITE) &&
  532. (!(inode->i_mode & S_IWUGO) || !has_write))
  533. goto err_out;
  534. if ((file->f_mode & FMODE_READ) &&
  535. (!(inode->i_mode & S_IRUGO) || !has_read))
  536. goto err_out;
  537. }
  538. /* allocate a kernfs_open_file for the file */
  539. error = -ENOMEM;
  540. of = kzalloc(sizeof(struct kernfs_open_file), GFP_KERNEL);
  541. if (!of)
  542. goto err_out;
  543. /*
  544. * The following is done to give a different lockdep key to
  545. * @of->mutex for files which implement mmap. This is a rather
  546. * crude way to avoid false positive lockdep warning around
  547. * mm->mmap_sem - mmap nests @of->mutex under mm->mmap_sem and
  548. * reading /sys/block/sda/trace/act_mask grabs sr_mutex, under
  549. * which mm->mmap_sem nests, while holding @of->mutex. As each
  550. * open file has a separate mutex, it's okay as long as those don't
  551. * happen on the same file. At this point, we can't easily give
  552. * each file a separate locking class. Let's differentiate on
  553. * whether the file has mmap or not for now.
  554. *
  555. * Both paths of the branch look the same. They're supposed to
  556. * look that way and give @of->mutex different static lockdep keys.
  557. */
  558. if (has_mmap)
  559. mutex_init(&of->mutex);
  560. else
  561. mutex_init(&of->mutex);
  562. of->kn = kn;
  563. of->file = file;
  564. /*
  565. * Write path needs to atomic_write_len outside active reference.
  566. * Cache it in open_file. See kernfs_fop_write() for details.
  567. */
  568. of->atomic_write_len = ops->atomic_write_len;
  569. error = -EINVAL;
  570. /*
  571. * ->seq_show is incompatible with ->prealloc,
  572. * as seq_read does its own allocation.
  573. * ->read must be used instead.
  574. */
  575. if (ops->prealloc && ops->seq_show)
  576. goto err_free;
  577. if (ops->prealloc) {
  578. int len = of->atomic_write_len ?: PAGE_SIZE;
  579. of->prealloc_buf = kmalloc(len + 1, GFP_KERNEL);
  580. error = -ENOMEM;
  581. if (!of->prealloc_buf)
  582. goto err_free;
  583. }
  584. /*
  585. * Always instantiate seq_file even if read access doesn't use
  586. * seq_file or is not requested. This unifies private data access
  587. * and readable regular files are the vast majority anyway.
  588. */
  589. if (ops->seq_show)
  590. error = seq_open(file, &kernfs_seq_ops);
  591. else
  592. error = seq_open(file, NULL);
  593. if (error)
  594. goto err_free;
  595. ((struct seq_file *)file->private_data)->private = of;
  596. /* seq_file clears PWRITE unconditionally, restore it if WRITE */
  597. if (file->f_mode & FMODE_WRITE)
  598. file->f_mode |= FMODE_PWRITE;
  599. /* make sure we have open node struct */
  600. error = kernfs_get_open_node(kn, of);
  601. if (error)
  602. goto err_close;
  603. /* open succeeded, put active references */
  604. kernfs_put_active(kn);
  605. return 0;
  606. err_close:
  607. seq_release(inode, file);
  608. err_free:
  609. kfree(of->prealloc_buf);
  610. kfree(of);
  611. err_out:
  612. kernfs_put_active(kn);
  613. return error;
  614. }
  615. static int kernfs_fop_release(struct inode *inode, struct file *filp)
  616. {
  617. struct kernfs_node *kn = filp->f_path.dentry->d_fsdata;
  618. struct kernfs_open_file *of = kernfs_of(filp);
  619. kernfs_put_open_node(kn, of);
  620. seq_release(inode, filp);
  621. kfree(of->prealloc_buf);
  622. kfree(of);
  623. return 0;
  624. }
  625. void kernfs_unmap_bin_file(struct kernfs_node *kn)
  626. {
  627. struct kernfs_open_node *on;
  628. struct kernfs_open_file *of;
  629. if (!(kn->flags & KERNFS_HAS_MMAP))
  630. return;
  631. spin_lock_irq(&kernfs_open_node_lock);
  632. on = kn->attr.open;
  633. if (on)
  634. atomic_inc(&on->refcnt);
  635. spin_unlock_irq(&kernfs_open_node_lock);
  636. if (!on)
  637. return;
  638. mutex_lock(&kernfs_open_file_mutex);
  639. list_for_each_entry(of, &on->files, list) {
  640. struct inode *inode = file_inode(of->file);
  641. unmap_mapping_range(inode->i_mapping, 0, 0, 1);
  642. }
  643. mutex_unlock(&kernfs_open_file_mutex);
  644. kernfs_put_open_node(kn, NULL);
  645. }
  646. /*
  647. * Kernfs attribute files are pollable. The idea is that you read
  648. * the content and then you use 'poll' or 'select' to wait for
  649. * the content to change. When the content changes (assuming the
  650. * manager for the kobject supports notification), poll will
  651. * return POLLERR|POLLPRI, and select will return the fd whether
  652. * it is waiting for read, write, or exceptions.
  653. * Once poll/select indicates that the value has changed, you
  654. * need to close and re-open the file, or seek to 0 and read again.
  655. * Reminder: this only works for attributes which actively support
  656. * it, and it is not possible to test an attribute from userspace
  657. * to see if it supports poll (Neither 'poll' nor 'select' return
  658. * an appropriate error code). When in doubt, set a suitable timeout value.
  659. */
  660. static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
  661. {
  662. struct kernfs_open_file *of = kernfs_of(filp);
  663. struct kernfs_node *kn = filp->f_path.dentry->d_fsdata;
  664. struct kernfs_open_node *on = kn->attr.open;
  665. /* need parent for the kobj, grab both */
  666. if (!kernfs_get_active(kn))
  667. goto trigger;
  668. poll_wait(filp, &on->poll, wait);
  669. kernfs_put_active(kn);
  670. if (of->event != atomic_read(&on->event))
  671. goto trigger;
  672. return DEFAULT_POLLMASK;
  673. trigger:
  674. return DEFAULT_POLLMASK|POLLERR|POLLPRI;
  675. }
  676. static void kernfs_notify_workfn(struct work_struct *work)
  677. {
  678. struct kernfs_node *kn;
  679. struct kernfs_open_node *on;
  680. struct kernfs_super_info *info;
  681. repeat:
  682. /* pop one off the notify_list */
  683. spin_lock_irq(&kernfs_notify_lock);
  684. kn = kernfs_notify_list;
  685. if (kn == KERNFS_NOTIFY_EOL) {
  686. spin_unlock_irq(&kernfs_notify_lock);
  687. return;
  688. }
  689. kernfs_notify_list = kn->attr.notify_next;
  690. kn->attr.notify_next = NULL;
  691. spin_unlock_irq(&kernfs_notify_lock);
  692. /* kick poll */
  693. spin_lock_irq(&kernfs_open_node_lock);
  694. on = kn->attr.open;
  695. if (on) {
  696. atomic_inc(&on->event);
  697. wake_up_interruptible(&on->poll);
  698. }
  699. spin_unlock_irq(&kernfs_open_node_lock);
  700. /* kick fsnotify */
  701. mutex_lock(&kernfs_mutex);
  702. list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
  703. struct inode *inode;
  704. struct dentry *dentry;
  705. inode = ilookup(info->sb, kn->ino);
  706. if (!inode)
  707. continue;
  708. dentry = d_find_any_alias(inode);
  709. if (dentry) {
  710. fsnotify_parent(NULL, dentry, FS_MODIFY);
  711. fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE,
  712. NULL, 0);
  713. dput(dentry);
  714. }
  715. iput(inode);
  716. }
  717. mutex_unlock(&kernfs_mutex);
  718. kernfs_put(kn);
  719. goto repeat;
  720. }
  721. /**
  722. * kernfs_notify - notify a kernfs file
  723. * @kn: file to notify
  724. *
  725. * Notify @kn such that poll(2) on @kn wakes up. Maybe be called from any
  726. * context.
  727. */
  728. void kernfs_notify(struct kernfs_node *kn)
  729. {
  730. static DECLARE_WORK(kernfs_notify_work, kernfs_notify_workfn);
  731. unsigned long flags;
  732. if (WARN_ON(kernfs_type(kn) != KERNFS_FILE))
  733. return;
  734. spin_lock_irqsave(&kernfs_notify_lock, flags);
  735. if (!kn->attr.notify_next) {
  736. kernfs_get(kn);
  737. kn->attr.notify_next = kernfs_notify_list;
  738. kernfs_notify_list = kn;
  739. schedule_work(&kernfs_notify_work);
  740. }
  741. spin_unlock_irqrestore(&kernfs_notify_lock, flags);
  742. }
  743. EXPORT_SYMBOL_GPL(kernfs_notify);
  744. const struct file_operations kernfs_file_fops = {
  745. .read = kernfs_fop_read,
  746. .write = kernfs_fop_write,
  747. .llseek = generic_file_llseek,
  748. .mmap = kernfs_fop_mmap,
  749. .open = kernfs_fop_open,
  750. .release = kernfs_fop_release,
  751. .poll = kernfs_fop_poll,
  752. };
  753. /**
  754. * __kernfs_create_file - kernfs internal function to create a file
  755. * @parent: directory to create the file in
  756. * @name: name of the file
  757. * @mode: mode of the file
  758. * @size: size of the file
  759. * @ops: kernfs operations for the file
  760. * @priv: private data for the file
  761. * @ns: optional namespace tag of the file
  762. * @key: lockdep key for the file's active_ref, %NULL to disable lockdep
  763. *
  764. * Returns the created node on success, ERR_PTR() value on error.
  765. */
  766. struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
  767. const char *name,
  768. umode_t mode, loff_t size,
  769. const struct kernfs_ops *ops,
  770. void *priv, const void *ns,
  771. struct lock_class_key *key)
  772. {
  773. struct kernfs_node *kn;
  774. unsigned flags;
  775. int rc;
  776. flags = KERNFS_FILE;
  777. kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG, flags);
  778. if (!kn)
  779. return ERR_PTR(-ENOMEM);
  780. kn->attr.ops = ops;
  781. kn->attr.size = size;
  782. kn->ns = ns;
  783. kn->priv = priv;
  784. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  785. if (key) {
  786. lockdep_init_map(&kn->dep_map, "s_active", key, 0);
  787. kn->flags |= KERNFS_LOCKDEP;
  788. }
  789. #endif
  790. /*
  791. * kn->attr.ops is accesible only while holding active ref. We
  792. * need to know whether some ops are implemented outside active
  793. * ref. Cache their existence in flags.
  794. */
  795. if (ops->seq_show)
  796. kn->flags |= KERNFS_HAS_SEQ_SHOW;
  797. if (ops->mmap)
  798. kn->flags |= KERNFS_HAS_MMAP;
  799. rc = kernfs_add_one(kn);
  800. if (rc) {
  801. kernfs_put(kn);
  802. return ERR_PTR(rc);
  803. }
  804. return kn;
  805. }