pagemap.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_PAGEMAP_H
  3. #define _LINUX_PAGEMAP_H
  4. /*
  5. * Copyright 1995 Linus Torvalds
  6. */
  7. #include <linux/mm.h>
  8. #include <linux/fs.h>
  9. #include <linux/list.h>
  10. #include <linux/highmem.h>
  11. #include <linux/compiler.h>
  12. #include <linux/uaccess.h>
  13. #include <linux/gfp.h>
  14. #include <linux/bitops.h>
  15. #include <linux/hardirq.h> /* for in_interrupt() */
  16. #include <linux/hugetlb_inline.h>
  17. struct pagevec;
  18. /*
  19. * Bits in mapping->flags.
  20. */
  21. enum mapping_flags {
  22. AS_EIO = 0, /* IO error on async write */
  23. AS_ENOSPC = 1, /* ENOSPC on async write */
  24. AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */
  25. AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */
  26. AS_EXITING = 4, /* final truncate in progress */
  27. /* writeback related tags are not used */
  28. AS_NO_WRITEBACK_TAGS = 5,
  29. };
  30. /**
  31. * mapping_set_error - record a writeback error in the address_space
  32. * @mapping - the mapping in which an error should be set
  33. * @error - the error to set in the mapping
  34. *
  35. * When writeback fails in some way, we must record that error so that
  36. * userspace can be informed when fsync and the like are called. We endeavor
  37. * to report errors on any file that was open at the time of the error. Some
  38. * internal callers also need to know when writeback errors have occurred.
  39. *
  40. * When a writeback error occurs, most filesystems will want to call
  41. * mapping_set_error to record the error in the mapping so that it can be
  42. * reported when the application calls fsync(2).
  43. */
  44. static inline void mapping_set_error(struct address_space *mapping, int error)
  45. {
  46. if (likely(!error))
  47. return;
  48. /* Record in wb_err for checkers using errseq_t based tracking */
  49. filemap_set_wb_err(mapping, error);
  50. /* Record it in flags for now, for legacy callers */
  51. if (error == -ENOSPC)
  52. set_bit(AS_ENOSPC, &mapping->flags);
  53. else
  54. set_bit(AS_EIO, &mapping->flags);
  55. }
  56. static inline void mapping_set_unevictable(struct address_space *mapping)
  57. {
  58. set_bit(AS_UNEVICTABLE, &mapping->flags);
  59. }
  60. static inline void mapping_clear_unevictable(struct address_space *mapping)
  61. {
  62. clear_bit(AS_UNEVICTABLE, &mapping->flags);
  63. }
  64. static inline int mapping_unevictable(struct address_space *mapping)
  65. {
  66. if (mapping)
  67. return test_bit(AS_UNEVICTABLE, &mapping->flags);
  68. return !!mapping;
  69. }
  70. static inline void mapping_set_exiting(struct address_space *mapping)
  71. {
  72. set_bit(AS_EXITING, &mapping->flags);
  73. }
  74. static inline int mapping_exiting(struct address_space *mapping)
  75. {
  76. return test_bit(AS_EXITING, &mapping->flags);
  77. }
  78. static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
  79. {
  80. set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
  81. }
  82. static inline int mapping_use_writeback_tags(struct address_space *mapping)
  83. {
  84. return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
  85. }
  86. static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
  87. {
  88. return mapping->gfp_mask;
  89. }
  90. /* Restricts the given gfp_mask to what the mapping allows. */
  91. static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
  92. gfp_t gfp_mask)
  93. {
  94. return mapping_gfp_mask(mapping) & gfp_mask;
  95. }
  96. /*
  97. * This is non-atomic. Only to be used before the mapping is activated.
  98. * Probably needs a barrier...
  99. */
  100. static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
  101. {
  102. m->gfp_mask = mask;
  103. }
  104. void release_pages(struct page **pages, int nr);
  105. /*
  106. * speculatively take a reference to a page.
  107. * If the page is free (_refcount == 0), then _refcount is untouched, and 0
  108. * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
  109. *
  110. * This function must be called inside the same rcu_read_lock() section as has
  111. * been used to lookup the page in the pagecache radix-tree (or page table):
  112. * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
  113. *
  114. * Unless an RCU grace period has passed, the count of all pages coming out
  115. * of the allocator must be considered unstable. page_count may return higher
  116. * than expected, and put_page must be able to do the right thing when the
  117. * page has been finished with, no matter what it is subsequently allocated
  118. * for (because put_page is what is used here to drop an invalid speculative
  119. * reference).
  120. *
  121. * This is the interesting part of the lockless pagecache (and lockless
  122. * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
  123. * has the following pattern:
  124. * 1. find page in radix tree
  125. * 2. conditionally increment refcount
  126. * 3. check the page is still in pagecache (if no, goto 1)
  127. *
  128. * Remove-side that cares about stability of _refcount (eg. reclaim) has the
  129. * following (with the i_pages lock held):
  130. * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
  131. * B. remove page from pagecache
  132. * C. free the page
  133. *
  134. * There are 2 critical interleavings that matter:
  135. * - 2 runs before A: in this case, A sees elevated refcount and bails out
  136. * - A runs before 2: in this case, 2 sees zero refcount and retries;
  137. * subsequently, B will complete and 1 will find no page, causing the
  138. * lookup to return NULL.
  139. *
  140. * It is possible that between 1 and 2, the page is removed then the exact same
  141. * page is inserted into the same position in pagecache. That's OK: the
  142. * old find_get_page using a lock could equally have run before or after
  143. * such a re-insertion, depending on order that locks are granted.
  144. *
  145. * Lookups racing against pagecache insertion isn't a big problem: either 1
  146. * will find the page or it will not. Likewise, the old find_get_page could run
  147. * either before the insertion or afterwards, depending on timing.
  148. */
  149. static inline int page_cache_get_speculative(struct page *page)
  150. {
  151. #ifdef CONFIG_TINY_RCU
  152. # ifdef CONFIG_PREEMPT_COUNT
  153. VM_BUG_ON(!in_atomic() && !irqs_disabled());
  154. # endif
  155. /*
  156. * Preempt must be disabled here - we rely on rcu_read_lock doing
  157. * this for us.
  158. *
  159. * Pagecache won't be truncated from interrupt context, so if we have
  160. * found a page in the radix tree here, we have pinned its refcount by
  161. * disabling preempt, and hence no need for the "speculative get" that
  162. * SMP requires.
  163. */
  164. VM_BUG_ON_PAGE(page_count(page) == 0, page);
  165. page_ref_inc(page);
  166. #else
  167. if (unlikely(!get_page_unless_zero(page))) {
  168. /*
  169. * Either the page has been freed, or will be freed.
  170. * In either case, retry here and the caller should
  171. * do the right thing (see comments above).
  172. */
  173. return 0;
  174. }
  175. #endif
  176. VM_BUG_ON_PAGE(PageTail(page), page);
  177. return 1;
  178. }
  179. /*
  180. * Same as above, but add instead of inc (could just be merged)
  181. */
  182. static inline int page_cache_add_speculative(struct page *page, int count)
  183. {
  184. VM_BUG_ON(in_interrupt());
  185. #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
  186. # ifdef CONFIG_PREEMPT_COUNT
  187. VM_BUG_ON(!in_atomic() && !irqs_disabled());
  188. # endif
  189. VM_BUG_ON_PAGE(page_count(page) == 0, page);
  190. page_ref_add(page, count);
  191. #else
  192. if (unlikely(!page_ref_add_unless(page, count, 0)))
  193. return 0;
  194. #endif
  195. VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
  196. return 1;
  197. }
  198. #ifdef CONFIG_NUMA
  199. extern struct page *__page_cache_alloc(gfp_t gfp);
  200. #else
  201. static inline struct page *__page_cache_alloc(gfp_t gfp)
  202. {
  203. return alloc_pages(gfp, 0);
  204. }
  205. #endif
  206. static inline struct page *page_cache_alloc(struct address_space *x)
  207. {
  208. return __page_cache_alloc(mapping_gfp_mask(x));
  209. }
  210. static inline gfp_t readahead_gfp_mask(struct address_space *x)
  211. {
  212. return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
  213. }
  214. typedef int filler_t(void *, struct page *);
  215. pgoff_t page_cache_next_hole(struct address_space *mapping,
  216. pgoff_t index, unsigned long max_scan);
  217. pgoff_t page_cache_prev_hole(struct address_space *mapping,
  218. pgoff_t index, unsigned long max_scan);
  219. #define FGP_ACCESSED 0x00000001
  220. #define FGP_LOCK 0x00000002
  221. #define FGP_CREAT 0x00000004
  222. #define FGP_WRITE 0x00000008
  223. #define FGP_NOFS 0x00000010
  224. #define FGP_NOWAIT 0x00000020
  225. struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
  226. int fgp_flags, gfp_t cache_gfp_mask);
  227. /**
  228. * find_get_page - find and get a page reference
  229. * @mapping: the address_space to search
  230. * @offset: the page index
  231. *
  232. * Looks up the page cache slot at @mapping & @offset. If there is a
  233. * page cache page, it is returned with an increased refcount.
  234. *
  235. * Otherwise, %NULL is returned.
  236. */
  237. static inline struct page *find_get_page(struct address_space *mapping,
  238. pgoff_t offset)
  239. {
  240. return pagecache_get_page(mapping, offset, 0, 0);
  241. }
  242. static inline struct page *find_get_page_flags(struct address_space *mapping,
  243. pgoff_t offset, int fgp_flags)
  244. {
  245. return pagecache_get_page(mapping, offset, fgp_flags, 0);
  246. }
  247. /**
  248. * find_lock_page - locate, pin and lock a pagecache page
  249. * @mapping: the address_space to search
  250. * @offset: the page index
  251. *
  252. * Looks up the page cache slot at @mapping & @offset. If there is a
  253. * page cache page, it is returned locked and with an increased
  254. * refcount.
  255. *
  256. * Otherwise, %NULL is returned.
  257. *
  258. * find_lock_page() may sleep.
  259. */
  260. static inline struct page *find_lock_page(struct address_space *mapping,
  261. pgoff_t offset)
  262. {
  263. return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
  264. }
  265. /**
  266. * find_or_create_page - locate or add a pagecache page
  267. * @mapping: the page's address_space
  268. * @index: the page's index into the mapping
  269. * @gfp_mask: page allocation mode
  270. *
  271. * Looks up the page cache slot at @mapping & @offset. If there is a
  272. * page cache page, it is returned locked and with an increased
  273. * refcount.
  274. *
  275. * If the page is not present, a new page is allocated using @gfp_mask
  276. * and added to the page cache and the VM's LRU list. The page is
  277. * returned locked and with an increased refcount.
  278. *
  279. * On memory exhaustion, %NULL is returned.
  280. *
  281. * find_or_create_page() may sleep, even if @gfp_flags specifies an
  282. * atomic allocation!
  283. */
  284. static inline struct page *find_or_create_page(struct address_space *mapping,
  285. pgoff_t offset, gfp_t gfp_mask)
  286. {
  287. return pagecache_get_page(mapping, offset,
  288. FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
  289. gfp_mask);
  290. }
  291. /**
  292. * grab_cache_page_nowait - returns locked page at given index in given cache
  293. * @mapping: target address_space
  294. * @index: the page index
  295. *
  296. * Same as grab_cache_page(), but do not wait if the page is unavailable.
  297. * This is intended for speculative data generators, where the data can
  298. * be regenerated if the page couldn't be grabbed. This routine should
  299. * be safe to call while holding the lock for another page.
  300. *
  301. * Clear __GFP_FS when allocating the page to avoid recursion into the fs
  302. * and deadlock against the caller's locked page.
  303. */
  304. static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
  305. pgoff_t index)
  306. {
  307. return pagecache_get_page(mapping, index,
  308. FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
  309. mapping_gfp_mask(mapping));
  310. }
  311. struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
  312. struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
  313. unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
  314. unsigned int nr_entries, struct page **entries,
  315. pgoff_t *indices);
  316. unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
  317. pgoff_t end, unsigned int nr_pages,
  318. struct page **pages);
  319. static inline unsigned find_get_pages(struct address_space *mapping,
  320. pgoff_t *start, unsigned int nr_pages,
  321. struct page **pages)
  322. {
  323. return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
  324. pages);
  325. }
  326. unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
  327. unsigned int nr_pages, struct page **pages);
  328. unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
  329. pgoff_t end, int tag, unsigned int nr_pages,
  330. struct page **pages);
  331. static inline unsigned find_get_pages_tag(struct address_space *mapping,
  332. pgoff_t *index, int tag, unsigned int nr_pages,
  333. struct page **pages)
  334. {
  335. return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
  336. nr_pages, pages);
  337. }
  338. unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
  339. int tag, unsigned int nr_entries,
  340. struct page **entries, pgoff_t *indices);
  341. struct page *grab_cache_page_write_begin(struct address_space *mapping,
  342. pgoff_t index, unsigned flags);
  343. /*
  344. * Returns locked page at given index in given cache, creating it if needed.
  345. */
  346. static inline struct page *grab_cache_page(struct address_space *mapping,
  347. pgoff_t index)
  348. {
  349. return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
  350. }
  351. extern struct page * read_cache_page(struct address_space *mapping,
  352. pgoff_t index, filler_t *filler, void *data);
  353. extern struct page * read_cache_page_gfp(struct address_space *mapping,
  354. pgoff_t index, gfp_t gfp_mask);
  355. extern int read_cache_pages(struct address_space *mapping,
  356. struct list_head *pages, filler_t *filler, void *data);
  357. static inline struct page *read_mapping_page(struct address_space *mapping,
  358. pgoff_t index, void *data)
  359. {
  360. filler_t *filler = (filler_t *)mapping->a_ops->readpage;
  361. return read_cache_page(mapping, index, filler, data);
  362. }
  363. /*
  364. * Get index of the page with in radix-tree
  365. * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
  366. */
  367. static inline pgoff_t page_to_index(struct page *page)
  368. {
  369. pgoff_t pgoff;
  370. if (likely(!PageTransTail(page)))
  371. return page->index;
  372. /*
  373. * We don't initialize ->index for tail pages: calculate based on
  374. * head page
  375. */
  376. pgoff = compound_head(page)->index;
  377. pgoff += page - compound_head(page);
  378. return pgoff;
  379. }
  380. /*
  381. * Get the offset in PAGE_SIZE.
  382. * (TODO: hugepage should have ->index in PAGE_SIZE)
  383. */
  384. static inline pgoff_t page_to_pgoff(struct page *page)
  385. {
  386. if (unlikely(PageHeadHuge(page)))
  387. return page->index << compound_order(page);
  388. return page_to_index(page);
  389. }
  390. /*
  391. * Return byte-offset into filesystem object for page.
  392. */
  393. static inline loff_t page_offset(struct page *page)
  394. {
  395. return ((loff_t)page->index) << PAGE_SHIFT;
  396. }
  397. static inline loff_t page_file_offset(struct page *page)
  398. {
  399. return ((loff_t)page_index(page)) << PAGE_SHIFT;
  400. }
  401. extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
  402. unsigned long address);
  403. static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
  404. unsigned long address)
  405. {
  406. pgoff_t pgoff;
  407. if (unlikely(is_vm_hugetlb_page(vma)))
  408. return linear_hugepage_index(vma, address);
  409. pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
  410. pgoff += vma->vm_pgoff;
  411. return pgoff;
  412. }
  413. extern void __lock_page(struct page *page);
  414. extern int __lock_page_killable(struct page *page);
  415. extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
  416. unsigned int flags);
  417. extern void unlock_page(struct page *page);
  418. static inline int trylock_page(struct page *page)
  419. {
  420. page = compound_head(page);
  421. return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
  422. }
  423. /*
  424. * lock_page may only be called if we have the page's inode pinned.
  425. */
  426. static inline void lock_page(struct page *page)
  427. {
  428. might_sleep();
  429. if (!trylock_page(page))
  430. __lock_page(page);
  431. }
  432. /*
  433. * lock_page_killable is like lock_page but can be interrupted by fatal
  434. * signals. It returns 0 if it locked the page and -EINTR if it was
  435. * killed while waiting.
  436. */
  437. static inline int lock_page_killable(struct page *page)
  438. {
  439. might_sleep();
  440. if (!trylock_page(page))
  441. return __lock_page_killable(page);
  442. return 0;
  443. }
  444. /*
  445. * lock_page_or_retry - Lock the page, unless this would block and the
  446. * caller indicated that it can handle a retry.
  447. *
  448. * Return value and mmap_sem implications depend on flags; see
  449. * __lock_page_or_retry().
  450. */
  451. static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
  452. unsigned int flags)
  453. {
  454. might_sleep();
  455. return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
  456. }
  457. /*
  458. * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
  459. * and should not be used directly.
  460. */
  461. extern void wait_on_page_bit(struct page *page, int bit_nr);
  462. extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
  463. /*
  464. * Wait for a page to be unlocked.
  465. *
  466. * This must be called with the caller "holding" the page,
  467. * ie with increased "page->count" so that the page won't
  468. * go away during the wait..
  469. */
  470. static inline void wait_on_page_locked(struct page *page)
  471. {
  472. if (PageLocked(page))
  473. wait_on_page_bit(compound_head(page), PG_locked);
  474. }
  475. static inline int wait_on_page_locked_killable(struct page *page)
  476. {
  477. if (!PageLocked(page))
  478. return 0;
  479. return wait_on_page_bit_killable(compound_head(page), PG_locked);
  480. }
  481. /*
  482. * Wait for a page to complete writeback
  483. */
  484. static inline void wait_on_page_writeback(struct page *page)
  485. {
  486. if (PageWriteback(page))
  487. wait_on_page_bit(page, PG_writeback);
  488. }
  489. extern void end_page_writeback(struct page *page);
  490. void wait_for_stable_page(struct page *page);
  491. void page_endio(struct page *page, bool is_write, int err);
  492. /*
  493. * Add an arbitrary waiter to a page's wait queue
  494. */
  495. extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
  496. /*
  497. * Fault everything in given userspace address range in.
  498. */
  499. static inline int fault_in_pages_writeable(char __user *uaddr, int size)
  500. {
  501. char __user *end = uaddr + size - 1;
  502. if (unlikely(size == 0))
  503. return 0;
  504. if (unlikely(uaddr > end))
  505. return -EFAULT;
  506. /*
  507. * Writing zeroes into userspace here is OK, because we know that if
  508. * the zero gets there, we'll be overwriting it.
  509. */
  510. do {
  511. if (unlikely(__put_user(0, uaddr) != 0))
  512. return -EFAULT;
  513. uaddr += PAGE_SIZE;
  514. } while (uaddr <= end);
  515. /* Check whether the range spilled into the next page. */
  516. if (((unsigned long)uaddr & PAGE_MASK) ==
  517. ((unsigned long)end & PAGE_MASK))
  518. return __put_user(0, end);
  519. return 0;
  520. }
  521. static inline int fault_in_pages_readable(const char __user *uaddr, int size)
  522. {
  523. volatile char c;
  524. const char __user *end = uaddr + size - 1;
  525. if (unlikely(size == 0))
  526. return 0;
  527. if (unlikely(uaddr > end))
  528. return -EFAULT;
  529. do {
  530. if (unlikely(__get_user(c, uaddr) != 0))
  531. return -EFAULT;
  532. uaddr += PAGE_SIZE;
  533. } while (uaddr <= end);
  534. /* Check whether the range spilled into the next page. */
  535. if (((unsigned long)uaddr & PAGE_MASK) ==
  536. ((unsigned long)end & PAGE_MASK)) {
  537. return __get_user(c, end);
  538. }
  539. (void)c;
  540. return 0;
  541. }
  542. int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
  543. pgoff_t index, gfp_t gfp_mask);
  544. int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
  545. pgoff_t index, gfp_t gfp_mask);
  546. extern void delete_from_page_cache(struct page *page);
  547. extern void __delete_from_page_cache(struct page *page, void *shadow);
  548. int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
  549. void delete_from_page_cache_batch(struct address_space *mapping,
  550. struct pagevec *pvec);
  551. /*
  552. * Like add_to_page_cache_locked, but used to add newly allocated pages:
  553. * the page is new, so we can just run __SetPageLocked() against it.
  554. */
  555. static inline int add_to_page_cache(struct page *page,
  556. struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
  557. {
  558. int error;
  559. __SetPageLocked(page);
  560. error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
  561. if (unlikely(error))
  562. __ClearPageLocked(page);
  563. return error;
  564. }
  565. static inline unsigned long dir_pages(struct inode *inode)
  566. {
  567. return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
  568. PAGE_SHIFT;
  569. }
  570. #endif /* _LINUX_PAGEMAP_H */