file.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879
  1. /*
  2. * fs/kernfs/file.c - kernfs file implementation
  3. *
  4. * Copyright (c) 2001-3 Patrick Mochel
  5. * Copyright (c) 2007 SUSE Linux Products GmbH
  6. * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
  7. *
  8. * This file is released under the GPLv2.
  9. */
  10. #include <linux/fs.h>
  11. #include <linux/seq_file.h>
  12. #include <linux/slab.h>
  13. #include <linux/poll.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/sched.h>
  16. #include "kernfs-internal.h"
  17. /*
  18. * There's one kernfs_open_file for each open file and one kernfs_open_node
  19. * for each kernfs_node with one or more open files.
  20. *
  21. * kernfs_node->attr.open points to kernfs_open_node. attr.open is
  22. * protected by kernfs_open_node_lock.
  23. *
  24. * filp->private_data points to seq_file whose ->private points to
  25. * kernfs_open_file. kernfs_open_files are chained at
  26. * kernfs_open_node->files, which is protected by kernfs_open_file_mutex.
  27. */
  28. static DEFINE_SPINLOCK(kernfs_open_node_lock);
  29. static DEFINE_MUTEX(kernfs_open_file_mutex);
  30. struct kernfs_open_node {
  31. atomic_t refcnt;
  32. atomic_t event;
  33. wait_queue_head_t poll;
  34. struct list_head files; /* goes through kernfs_open_file.list */
  35. };
  36. static struct kernfs_open_file *kernfs_of(struct file *file)
  37. {
  38. return ((struct seq_file *)file->private_data)->private;
  39. }
  40. /*
  41. * Determine the kernfs_ops for the given kernfs_node. This function must
  42. * be called while holding an active reference.
  43. */
  44. static const struct kernfs_ops *kernfs_ops(struct kernfs_node *kn)
  45. {
  46. if (kn->flags & KERNFS_LOCKDEP)
  47. lockdep_assert_held(kn);
  48. return kn->attr.ops;
  49. }
  50. /*
  51. * As kernfs_seq_stop() is also called after kernfs_seq_start() or
  52. * kernfs_seq_next() failure, it needs to distinguish whether it's stopping
  53. * a seq_file iteration which is fully initialized with an active reference
  54. * or an aborted kernfs_seq_start() due to get_active failure. The
  55. * position pointer is the only context for each seq_file iteration and
  56. * thus the stop condition should be encoded in it. As the return value is
  57. * directly visible to userland, ERR_PTR(-ENODEV) is the only acceptable
  58. * choice to indicate get_active failure.
  59. *
  60. * Unfortunately, this is complicated due to the optional custom seq_file
  61. * operations which may return ERR_PTR(-ENODEV) too. kernfs_seq_stop()
  62. * can't distinguish whether ERR_PTR(-ENODEV) is from get_active failure or
  63. * custom seq_file operations and thus can't decide whether put_active
  64. * should be performed or not only on ERR_PTR(-ENODEV).
  65. *
  66. * This is worked around by factoring out the custom seq_stop() and
  67. * put_active part into kernfs_seq_stop_active(), skipping it from
  68. * kernfs_seq_stop() if ERR_PTR(-ENODEV) while invoking it directly after
  69. * custom seq_file operations fail with ERR_PTR(-ENODEV) - this ensures
  70. * that kernfs_seq_stop_active() is skipped only after get_active failure.
  71. */
  72. static void kernfs_seq_stop_active(struct seq_file *sf, void *v)
  73. {
  74. struct kernfs_open_file *of = sf->private;
  75. const struct kernfs_ops *ops = kernfs_ops(of->kn);
  76. if (ops->seq_stop)
  77. ops->seq_stop(sf, v);
  78. kernfs_put_active(of->kn);
  79. }
  80. static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
  81. {
  82. struct kernfs_open_file *of = sf->private;
  83. const struct kernfs_ops *ops;
  84. /*
  85. * @of->mutex nests outside active ref and is just to ensure that
  86. * the ops aren't called concurrently for the same open file.
  87. */
  88. mutex_lock(&of->mutex);
  89. if (!kernfs_get_active(of->kn))
  90. return ERR_PTR(-ENODEV);
  91. ops = kernfs_ops(of->kn);
  92. if (ops->seq_start) {
  93. void *next = ops->seq_start(sf, ppos);
  94. /* see the comment above kernfs_seq_stop_active() */
  95. if (next == ERR_PTR(-ENODEV))
  96. kernfs_seq_stop_active(sf, next);
  97. return next;
  98. } else {
  99. /*
  100. * The same behavior and code as single_open(). Returns
  101. * !NULL if pos is at the beginning; otherwise, NULL.
  102. */
  103. return NULL + !*ppos;
  104. }
  105. }
  106. static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos)
  107. {
  108. struct kernfs_open_file *of = sf->private;
  109. const struct kernfs_ops *ops = kernfs_ops(of->kn);
  110. if (ops->seq_next) {
  111. void *next = ops->seq_next(sf, v, ppos);
  112. /* see the comment above kernfs_seq_stop_active() */
  113. if (next == ERR_PTR(-ENODEV))
  114. kernfs_seq_stop_active(sf, next);
  115. return next;
  116. } else {
  117. /*
  118. * The same behavior and code as single_open(), always
  119. * terminate after the initial read.
  120. */
  121. ++*ppos;
  122. return NULL;
  123. }
  124. }
  125. static void kernfs_seq_stop(struct seq_file *sf, void *v)
  126. {
  127. struct kernfs_open_file *of = sf->private;
  128. if (v != ERR_PTR(-ENODEV))
  129. kernfs_seq_stop_active(sf, v);
  130. mutex_unlock(&of->mutex);
  131. }
  132. static int kernfs_seq_show(struct seq_file *sf, void *v)
  133. {
  134. struct kernfs_open_file *of = sf->private;
  135. of->event = atomic_read(&of->kn->attr.open->event);
  136. return of->kn->attr.ops->seq_show(sf, v);
  137. }
  138. static const struct seq_operations kernfs_seq_ops = {
  139. .start = kernfs_seq_start,
  140. .next = kernfs_seq_next,
  141. .stop = kernfs_seq_stop,
  142. .show = kernfs_seq_show,
  143. };
  144. /*
  145. * As reading a bin file can have side-effects, the exact offset and bytes
  146. * specified in read(2) call should be passed to the read callback making
  147. * it difficult to use seq_file. Implement simplistic custom buffering for
  148. * bin files.
  149. */
  150. static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
  151. char __user *user_buf, size_t count,
  152. loff_t *ppos)
  153. {
  154. ssize_t len = min_t(size_t, count, PAGE_SIZE);
  155. const struct kernfs_ops *ops;
  156. char *buf;
  157. buf = kmalloc(len, GFP_KERNEL);
  158. if (!buf)
  159. return -ENOMEM;
  160. /*
  161. * @of->mutex nests outside active ref and is just to ensure that
  162. * the ops aren't called concurrently for the same open file.
  163. */
  164. mutex_lock(&of->mutex);
  165. if (!kernfs_get_active(of->kn)) {
  166. len = -ENODEV;
  167. mutex_unlock(&of->mutex);
  168. goto out_free;
  169. }
  170. ops = kernfs_ops(of->kn);
  171. if (ops->read)
  172. len = ops->read(of, buf, len, *ppos);
  173. else
  174. len = -EINVAL;
  175. kernfs_put_active(of->kn);
  176. mutex_unlock(&of->mutex);
  177. if (len < 0)
  178. goto out_free;
  179. if (copy_to_user(user_buf, buf, len)) {
  180. len = -EFAULT;
  181. goto out_free;
  182. }
  183. *ppos += len;
  184. out_free:
  185. kfree(buf);
  186. return len;
  187. }
  188. /**
  189. * kernfs_fop_read - kernfs vfs read callback
  190. * @file: file pointer
  191. * @user_buf: data to write
  192. * @count: number of bytes
  193. * @ppos: starting offset
  194. */
  195. static ssize_t kernfs_fop_read(struct file *file, char __user *user_buf,
  196. size_t count, loff_t *ppos)
  197. {
  198. struct kernfs_open_file *of = kernfs_of(file);
  199. if (of->kn->flags & KERNFS_HAS_SEQ_SHOW)
  200. return seq_read(file, user_buf, count, ppos);
  201. else
  202. return kernfs_file_direct_read(of, user_buf, count, ppos);
  203. }
  204. /**
  205. * kernfs_fop_write - kernfs vfs write callback
  206. * @file: file pointer
  207. * @user_buf: data to write
  208. * @count: number of bytes
  209. * @ppos: starting offset
  210. *
  211. * Copy data in from userland and pass it to the matching kernfs write
  212. * operation.
  213. *
  214. * There is no easy way for us to know if userspace is only doing a partial
  215. * write, so we don't support them. We expect the entire buffer to come on
  216. * the first write. Hint: if you're writing a value, first read the file,
  217. * modify only the the value you're changing, then write entire buffer
  218. * back.
  219. */
  220. static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
  221. size_t count, loff_t *ppos)
  222. {
  223. struct kernfs_open_file *of = kernfs_of(file);
  224. const struct kernfs_ops *ops;
  225. size_t len;
  226. char *buf;
  227. if (of->atomic_write_len) {
  228. len = count;
  229. if (len > of->atomic_write_len)
  230. return -E2BIG;
  231. } else {
  232. len = min_t(size_t, count, PAGE_SIZE);
  233. }
  234. buf = kmalloc(len + 1, GFP_KERNEL);
  235. if (!buf)
  236. return -ENOMEM;
  237. if (copy_from_user(buf, user_buf, len)) {
  238. len = -EFAULT;
  239. goto out_free;
  240. }
  241. buf[len] = '\0'; /* guarantee string termination */
  242. /*
  243. * @of->mutex nests outside active ref and is just to ensure that
  244. * the ops aren't called concurrently for the same open file.
  245. */
  246. mutex_lock(&of->mutex);
  247. if (!kernfs_get_active(of->kn)) {
  248. mutex_unlock(&of->mutex);
  249. len = -ENODEV;
  250. goto out_free;
  251. }
  252. ops = kernfs_ops(of->kn);
  253. if (ops->write)
  254. len = ops->write(of, buf, len, *ppos);
  255. else
  256. len = -EINVAL;
  257. kernfs_put_active(of->kn);
  258. mutex_unlock(&of->mutex);
  259. if (len > 0)
  260. *ppos += len;
  261. out_free:
  262. kfree(buf);
  263. return len;
  264. }
  265. static void kernfs_vma_open(struct vm_area_struct *vma)
  266. {
  267. struct file *file = vma->vm_file;
  268. struct kernfs_open_file *of = kernfs_of(file);
  269. if (!of->vm_ops)
  270. return;
  271. if (!kernfs_get_active(of->kn))
  272. return;
  273. if (of->vm_ops->open)
  274. of->vm_ops->open(vma);
  275. kernfs_put_active(of->kn);
  276. }
  277. static int kernfs_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  278. {
  279. struct file *file = vma->vm_file;
  280. struct kernfs_open_file *of = kernfs_of(file);
  281. int ret;
  282. if (!of->vm_ops)
  283. return VM_FAULT_SIGBUS;
  284. if (!kernfs_get_active(of->kn))
  285. return VM_FAULT_SIGBUS;
  286. ret = VM_FAULT_SIGBUS;
  287. if (of->vm_ops->fault)
  288. ret = of->vm_ops->fault(vma, vmf);
  289. kernfs_put_active(of->kn);
  290. return ret;
  291. }
  292. static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
  293. struct vm_fault *vmf)
  294. {
  295. struct file *file = vma->vm_file;
  296. struct kernfs_open_file *of = kernfs_of(file);
  297. int ret;
  298. if (!of->vm_ops)
  299. return VM_FAULT_SIGBUS;
  300. if (!kernfs_get_active(of->kn))
  301. return VM_FAULT_SIGBUS;
  302. ret = 0;
  303. if (of->vm_ops->page_mkwrite)
  304. ret = of->vm_ops->page_mkwrite(vma, vmf);
  305. else
  306. file_update_time(file);
  307. kernfs_put_active(of->kn);
  308. return ret;
  309. }
  310. static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
  311. void *buf, int len, int write)
  312. {
  313. struct file *file = vma->vm_file;
  314. struct kernfs_open_file *of = kernfs_of(file);
  315. int ret;
  316. if (!of->vm_ops)
  317. return -EINVAL;
  318. if (!kernfs_get_active(of->kn))
  319. return -EINVAL;
  320. ret = -EINVAL;
  321. if (of->vm_ops->access)
  322. ret = of->vm_ops->access(vma, addr, buf, len, write);
  323. kernfs_put_active(of->kn);
  324. return ret;
  325. }
  326. #ifdef CONFIG_NUMA
  327. static int kernfs_vma_set_policy(struct vm_area_struct *vma,
  328. struct mempolicy *new)
  329. {
  330. struct file *file = vma->vm_file;
  331. struct kernfs_open_file *of = kernfs_of(file);
  332. int ret;
  333. if (!of->vm_ops)
  334. return 0;
  335. if (!kernfs_get_active(of->kn))
  336. return -EINVAL;
  337. ret = 0;
  338. if (of->vm_ops->set_policy)
  339. ret = of->vm_ops->set_policy(vma, new);
  340. kernfs_put_active(of->kn);
  341. return ret;
  342. }
  343. static struct mempolicy *kernfs_vma_get_policy(struct vm_area_struct *vma,
  344. unsigned long addr)
  345. {
  346. struct file *file = vma->vm_file;
  347. struct kernfs_open_file *of = kernfs_of(file);
  348. struct mempolicy *pol;
  349. if (!of->vm_ops)
  350. return vma->vm_policy;
  351. if (!kernfs_get_active(of->kn))
  352. return vma->vm_policy;
  353. pol = vma->vm_policy;
  354. if (of->vm_ops->get_policy)
  355. pol = of->vm_ops->get_policy(vma, addr);
  356. kernfs_put_active(of->kn);
  357. return pol;
  358. }
  359. static int kernfs_vma_migrate(struct vm_area_struct *vma,
  360. const nodemask_t *from, const nodemask_t *to,
  361. unsigned long flags)
  362. {
  363. struct file *file = vma->vm_file;
  364. struct kernfs_open_file *of = kernfs_of(file);
  365. int ret;
  366. if (!of->vm_ops)
  367. return 0;
  368. if (!kernfs_get_active(of->kn))
  369. return 0;
  370. ret = 0;
  371. if (of->vm_ops->migrate)
  372. ret = of->vm_ops->migrate(vma, from, to, flags);
  373. kernfs_put_active(of->kn);
  374. return ret;
  375. }
  376. #endif
  377. static const struct vm_operations_struct kernfs_vm_ops = {
  378. .open = kernfs_vma_open,
  379. .fault = kernfs_vma_fault,
  380. .page_mkwrite = kernfs_vma_page_mkwrite,
  381. .access = kernfs_vma_access,
  382. #ifdef CONFIG_NUMA
  383. .set_policy = kernfs_vma_set_policy,
  384. .get_policy = kernfs_vma_get_policy,
  385. .migrate = kernfs_vma_migrate,
  386. #endif
  387. };
  388. static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
  389. {
  390. struct kernfs_open_file *of = kernfs_of(file);
  391. const struct kernfs_ops *ops;
  392. int rc;
  393. /*
  394. * mmap path and of->mutex are prone to triggering spurious lockdep
  395. * warnings and we don't want to add spurious locking dependency
  396. * between the two. Check whether mmap is actually implemented
  397. * without grabbing @of->mutex by testing HAS_MMAP flag. See the
  398. * comment in kernfs_file_open() for more details.
  399. */
  400. if (!(of->kn->flags & KERNFS_HAS_MMAP))
  401. return -ENODEV;
  402. mutex_lock(&of->mutex);
  403. rc = -ENODEV;
  404. if (!kernfs_get_active(of->kn))
  405. goto out_unlock;
  406. ops = kernfs_ops(of->kn);
  407. rc = ops->mmap(of, vma);
  408. if (rc)
  409. goto out_put;
  410. /*
  411. * PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup()
  412. * to satisfy versions of X which crash if the mmap fails: that
  413. * substitutes a new vm_file, and we don't then want bin_vm_ops.
  414. */
  415. if (vma->vm_file != file)
  416. goto out_put;
  417. rc = -EINVAL;
  418. if (of->mmapped && of->vm_ops != vma->vm_ops)
  419. goto out_put;
  420. /*
  421. * It is not possible to successfully wrap close.
  422. * So error if someone is trying to use close.
  423. */
  424. rc = -EINVAL;
  425. if (vma->vm_ops && vma->vm_ops->close)
  426. goto out_put;
  427. rc = 0;
  428. of->mmapped = 1;
  429. of->vm_ops = vma->vm_ops;
  430. vma->vm_ops = &kernfs_vm_ops;
  431. out_put:
  432. kernfs_put_active(of->kn);
  433. out_unlock:
  434. mutex_unlock(&of->mutex);
  435. return rc;
  436. }
  437. /**
  438. * kernfs_get_open_node - get or create kernfs_open_node
  439. * @kn: target kernfs_node
  440. * @of: kernfs_open_file for this instance of open
  441. *
  442. * If @kn->attr.open exists, increment its reference count; otherwise,
  443. * create one. @of is chained to the files list.
  444. *
  445. * LOCKING:
  446. * Kernel thread context (may sleep).
  447. *
  448. * RETURNS:
  449. * 0 on success, -errno on failure.
  450. */
  451. static int kernfs_get_open_node(struct kernfs_node *kn,
  452. struct kernfs_open_file *of)
  453. {
  454. struct kernfs_open_node *on, *new_on = NULL;
  455. retry:
  456. mutex_lock(&kernfs_open_file_mutex);
  457. spin_lock_irq(&kernfs_open_node_lock);
  458. if (!kn->attr.open && new_on) {
  459. kn->attr.open = new_on;
  460. new_on = NULL;
  461. }
  462. on = kn->attr.open;
  463. if (on) {
  464. atomic_inc(&on->refcnt);
  465. list_add_tail(&of->list, &on->files);
  466. }
  467. spin_unlock_irq(&kernfs_open_node_lock);
  468. mutex_unlock(&kernfs_open_file_mutex);
  469. if (on) {
  470. kfree(new_on);
  471. return 0;
  472. }
  473. /* not there, initialize a new one and retry */
  474. new_on = kmalloc(sizeof(*new_on), GFP_KERNEL);
  475. if (!new_on)
  476. return -ENOMEM;
  477. atomic_set(&new_on->refcnt, 0);
  478. atomic_set(&new_on->event, 1);
  479. init_waitqueue_head(&new_on->poll);
  480. INIT_LIST_HEAD(&new_on->files);
  481. goto retry;
  482. }
  483. /**
  484. * kernfs_put_open_node - put kernfs_open_node
  485. * @kn: target kernfs_nodet
  486. * @of: associated kernfs_open_file
  487. *
  488. * Put @kn->attr.open and unlink @of from the files list. If
  489. * reference count reaches zero, disassociate and free it.
  490. *
  491. * LOCKING:
  492. * None.
  493. */
  494. static void kernfs_put_open_node(struct kernfs_node *kn,
  495. struct kernfs_open_file *of)
  496. {
  497. struct kernfs_open_node *on = kn->attr.open;
  498. unsigned long flags;
  499. mutex_lock(&kernfs_open_file_mutex);
  500. spin_lock_irqsave(&kernfs_open_node_lock, flags);
  501. if (of)
  502. list_del(&of->list);
  503. if (atomic_dec_and_test(&on->refcnt))
  504. kn->attr.open = NULL;
  505. else
  506. on = NULL;
  507. spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
  508. mutex_unlock(&kernfs_open_file_mutex);
  509. kfree(on);
  510. }
  511. static int kernfs_fop_open(struct inode *inode, struct file *file)
  512. {
  513. struct kernfs_node *kn = file->f_path.dentry->d_fsdata;
  514. const struct kernfs_ops *ops;
  515. struct kernfs_open_file *of;
  516. bool has_read, has_write, has_mmap;
  517. int error = -EACCES;
  518. if (!kernfs_get_active(kn))
  519. return -ENODEV;
  520. ops = kernfs_ops(kn);
  521. has_read = ops->seq_show || ops->read || ops->mmap;
  522. has_write = ops->write || ops->mmap;
  523. has_mmap = ops->mmap;
  524. /* check perms and supported operations */
  525. if ((file->f_mode & FMODE_WRITE) &&
  526. (!(inode->i_mode & S_IWUGO) || !has_write))
  527. goto err_out;
  528. if ((file->f_mode & FMODE_READ) &&
  529. (!(inode->i_mode & S_IRUGO) || !has_read))
  530. goto err_out;
  531. /* allocate a kernfs_open_file for the file */
  532. error = -ENOMEM;
  533. of = kzalloc(sizeof(struct kernfs_open_file), GFP_KERNEL);
  534. if (!of)
  535. goto err_out;
  536. /*
  537. * The following is done to give a different lockdep key to
  538. * @of->mutex for files which implement mmap. This is a rather
  539. * crude way to avoid false positive lockdep warning around
  540. * mm->mmap_sem - mmap nests @of->mutex under mm->mmap_sem and
  541. * reading /sys/block/sda/trace/act_mask grabs sr_mutex, under
  542. * which mm->mmap_sem nests, while holding @of->mutex. As each
  543. * open file has a separate mutex, it's okay as long as those don't
  544. * happen on the same file. At this point, we can't easily give
  545. * each file a separate locking class. Let's differentiate on
  546. * whether the file has mmap or not for now.
  547. *
  548. * Both paths of the branch look the same. They're supposed to
  549. * look that way and give @of->mutex different static lockdep keys.
  550. */
  551. if (has_mmap)
  552. mutex_init(&of->mutex);
  553. else
  554. mutex_init(&of->mutex);
  555. of->kn = kn;
  556. of->file = file;
  557. /*
  558. * Write path needs to atomic_write_len outside active reference.
  559. * Cache it in open_file. See kernfs_fop_write() for details.
  560. */
  561. of->atomic_write_len = ops->atomic_write_len;
  562. /*
  563. * Always instantiate seq_file even if read access doesn't use
  564. * seq_file or is not requested. This unifies private data access
  565. * and readable regular files are the vast majority anyway.
  566. */
  567. if (ops->seq_show)
  568. error = seq_open(file, &kernfs_seq_ops);
  569. else
  570. error = seq_open(file, NULL);
  571. if (error)
  572. goto err_free;
  573. ((struct seq_file *)file->private_data)->private = of;
  574. /* seq_file clears PWRITE unconditionally, restore it if WRITE */
  575. if (file->f_mode & FMODE_WRITE)
  576. file->f_mode |= FMODE_PWRITE;
  577. /* make sure we have open node struct */
  578. error = kernfs_get_open_node(kn, of);
  579. if (error)
  580. goto err_close;
  581. /* open succeeded, put active references */
  582. kernfs_put_active(kn);
  583. return 0;
  584. err_close:
  585. seq_release(inode, file);
  586. err_free:
  587. kfree(of);
  588. err_out:
  589. kernfs_put_active(kn);
  590. return error;
  591. }
  592. static int kernfs_fop_release(struct inode *inode, struct file *filp)
  593. {
  594. struct kernfs_node *kn = filp->f_path.dentry->d_fsdata;
  595. struct kernfs_open_file *of = kernfs_of(filp);
  596. kernfs_put_open_node(kn, of);
  597. seq_release(inode, filp);
  598. kfree(of);
  599. return 0;
  600. }
  601. void kernfs_unmap_bin_file(struct kernfs_node *kn)
  602. {
  603. struct kernfs_open_node *on;
  604. struct kernfs_open_file *of;
  605. if (!(kn->flags & KERNFS_HAS_MMAP))
  606. return;
  607. spin_lock_irq(&kernfs_open_node_lock);
  608. on = kn->attr.open;
  609. if (on)
  610. atomic_inc(&on->refcnt);
  611. spin_unlock_irq(&kernfs_open_node_lock);
  612. if (!on)
  613. return;
  614. mutex_lock(&kernfs_open_file_mutex);
  615. list_for_each_entry(of, &on->files, list) {
  616. struct inode *inode = file_inode(of->file);
  617. unmap_mapping_range(inode->i_mapping, 0, 0, 1);
  618. }
  619. mutex_unlock(&kernfs_open_file_mutex);
  620. kernfs_put_open_node(kn, NULL);
  621. }
  622. /*
  623. * Kernfs attribute files are pollable. The idea is that you read
  624. * the content and then you use 'poll' or 'select' to wait for
  625. * the content to change. When the content changes (assuming the
  626. * manager for the kobject supports notification), poll will
  627. * return POLLERR|POLLPRI, and select will return the fd whether
  628. * it is waiting for read, write, or exceptions.
  629. * Once poll/select indicates that the value has changed, you
  630. * need to close and re-open the file, or seek to 0 and read again.
  631. * Reminder: this only works for attributes which actively support
  632. * it, and it is not possible to test an attribute from userspace
  633. * to see if it supports poll (Neither 'poll' nor 'select' return
  634. * an appropriate error code). When in doubt, set a suitable timeout value.
  635. */
  636. static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
  637. {
  638. struct kernfs_open_file *of = kernfs_of(filp);
  639. struct kernfs_node *kn = filp->f_path.dentry->d_fsdata;
  640. struct kernfs_open_node *on = kn->attr.open;
  641. /* need parent for the kobj, grab both */
  642. if (!kernfs_get_active(kn))
  643. goto trigger;
  644. poll_wait(filp, &on->poll, wait);
  645. kernfs_put_active(kn);
  646. if (of->event != atomic_read(&on->event))
  647. goto trigger;
  648. return DEFAULT_POLLMASK;
  649. trigger:
  650. return DEFAULT_POLLMASK|POLLERR|POLLPRI;
  651. }
  652. /**
  653. * kernfs_notify - notify a kernfs file
  654. * @kn: file to notify
  655. *
  656. * Notify @kn such that poll(2) on @kn wakes up.
  657. */
  658. void kernfs_notify(struct kernfs_node *kn)
  659. {
  660. struct kernfs_open_node *on;
  661. unsigned long flags;
  662. spin_lock_irqsave(&kernfs_open_node_lock, flags);
  663. if (!WARN_ON(kernfs_type(kn) != KERNFS_FILE)) {
  664. on = kn->attr.open;
  665. if (on) {
  666. atomic_inc(&on->event);
  667. wake_up_interruptible(&on->poll);
  668. }
  669. }
  670. spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
  671. }
  672. EXPORT_SYMBOL_GPL(kernfs_notify);
  673. const struct file_operations kernfs_file_fops = {
  674. .read = kernfs_fop_read,
  675. .write = kernfs_fop_write,
  676. .llseek = generic_file_llseek,
  677. .mmap = kernfs_fop_mmap,
  678. .open = kernfs_fop_open,
  679. .release = kernfs_fop_release,
  680. .poll = kernfs_fop_poll,
  681. };
  682. /**
  683. * __kernfs_create_file - kernfs internal function to create a file
  684. * @parent: directory to create the file in
  685. * @name: name of the file
  686. * @mode: mode of the file
  687. * @size: size of the file
  688. * @ops: kernfs operations for the file
  689. * @priv: private data for the file
  690. * @ns: optional namespace tag of the file
  691. * @static_name: don't copy file name
  692. * @key: lockdep key for the file's active_ref, %NULL to disable lockdep
  693. *
  694. * Returns the created node on success, ERR_PTR() value on error.
  695. */
  696. struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
  697. const char *name,
  698. umode_t mode, loff_t size,
  699. const struct kernfs_ops *ops,
  700. void *priv, const void *ns,
  701. bool name_is_static,
  702. struct lock_class_key *key)
  703. {
  704. struct kernfs_node *kn;
  705. unsigned flags;
  706. int rc;
  707. flags = KERNFS_FILE;
  708. if (name_is_static)
  709. flags |= KERNFS_STATIC_NAME;
  710. kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG, flags);
  711. if (!kn)
  712. return ERR_PTR(-ENOMEM);
  713. kn->attr.ops = ops;
  714. kn->attr.size = size;
  715. kn->ns = ns;
  716. kn->priv = priv;
  717. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  718. if (key) {
  719. lockdep_init_map(&kn->dep_map, "s_active", key, 0);
  720. kn->flags |= KERNFS_LOCKDEP;
  721. }
  722. #endif
  723. /*
  724. * kn->attr.ops is accesible only while holding active ref. We
  725. * need to know whether some ops are implemented outside active
  726. * ref. Cache their existence in flags.
  727. */
  728. if (ops->seq_show)
  729. kn->flags |= KERNFS_HAS_SEQ_SHOW;
  730. if (ops->mmap)
  731. kn->flags |= KERNFS_HAS_MMAP;
  732. rc = kernfs_add_one(kn);
  733. if (rc) {
  734. kernfs_put(kn);
  735. return ERR_PTR(rc);
  736. }
  737. return kn;
  738. }