pagemap.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_PAGEMAP_H
  3. #define _LINUX_PAGEMAP_H
  4. /*
  5. * Copyright 1995 Linus Torvalds
  6. */
  7. #include <linux/mm.h>
  8. #include <linux/fs.h>
  9. #include <linux/list.h>
  10. #include <linux/highmem.h>
  11. #include <linux/compiler.h>
  12. #include <linux/uaccess.h>
  13. #include <linux/gfp.h>
  14. #include <linux/bitops.h>
  15. #include <linux/hardirq.h> /* for in_interrupt() */
  16. #include <linux/hugetlb_inline.h>
  17. struct pagevec;
  18. /*
  19. * Bits in mapping->flags.
  20. */
  21. enum mapping_flags {
  22. AS_EIO = 0, /* IO error on async write */
  23. AS_ENOSPC = 1, /* ENOSPC on async write */
  24. AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */
  25. AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */
  26. AS_EXITING = 4, /* final truncate in progress */
  27. /* writeback related tags are not used */
  28. AS_NO_WRITEBACK_TAGS = 5,
  29. };
  30. /**
  31. * mapping_set_error - record a writeback error in the address_space
  32. * @mapping - the mapping in which an error should be set
  33. * @error - the error to set in the mapping
  34. *
  35. * When writeback fails in some way, we must record that error so that
  36. * userspace can be informed when fsync and the like are called. We endeavor
  37. * to report errors on any file that was open at the time of the error. Some
  38. * internal callers also need to know when writeback errors have occurred.
  39. *
  40. * When a writeback error occurs, most filesystems will want to call
  41. * mapping_set_error to record the error in the mapping so that it can be
  42. * reported when the application calls fsync(2).
  43. */
  44. static inline void mapping_set_error(struct address_space *mapping, int error)
  45. {
  46. if (likely(!error))
  47. return;
  48. /* Record in wb_err for checkers using errseq_t based tracking */
  49. filemap_set_wb_err(mapping, error);
  50. /* Record it in flags for now, for legacy callers */
  51. if (error == -ENOSPC)
  52. set_bit(AS_ENOSPC, &mapping->flags);
  53. else
  54. set_bit(AS_EIO, &mapping->flags);
  55. }
  56. static inline void mapping_set_unevictable(struct address_space *mapping)
  57. {
  58. set_bit(AS_UNEVICTABLE, &mapping->flags);
  59. }
  60. static inline void mapping_clear_unevictable(struct address_space *mapping)
  61. {
  62. clear_bit(AS_UNEVICTABLE, &mapping->flags);
  63. }
  64. static inline int mapping_unevictable(struct address_space *mapping)
  65. {
  66. if (mapping)
  67. return test_bit(AS_UNEVICTABLE, &mapping->flags);
  68. return !!mapping;
  69. }
  70. static inline void mapping_set_exiting(struct address_space *mapping)
  71. {
  72. set_bit(AS_EXITING, &mapping->flags);
  73. }
  74. static inline int mapping_exiting(struct address_space *mapping)
  75. {
  76. return test_bit(AS_EXITING, &mapping->flags);
  77. }
  78. static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
  79. {
  80. set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
  81. }
  82. static inline int mapping_use_writeback_tags(struct address_space *mapping)
  83. {
  84. return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
  85. }
  86. static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
  87. {
  88. return mapping->gfp_mask;
  89. }
  90. /* Restricts the given gfp_mask to what the mapping allows. */
  91. static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
  92. gfp_t gfp_mask)
  93. {
  94. return mapping_gfp_mask(mapping) & gfp_mask;
  95. }
  96. /*
  97. * This is non-atomic. Only to be used before the mapping is activated.
  98. * Probably needs a barrier...
  99. */
  100. static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
  101. {
  102. m->gfp_mask = mask;
  103. }
  104. void release_pages(struct page **pages, int nr, bool cold);
  105. /*
  106. * speculatively take a reference to a page.
  107. * If the page is free (_refcount == 0), then _refcount is untouched, and 0
  108. * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
  109. *
  110. * This function must be called inside the same rcu_read_lock() section as has
  111. * been used to lookup the page in the pagecache radix-tree (or page table):
  112. * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
  113. *
  114. * Unless an RCU grace period has passed, the count of all pages coming out
  115. * of the allocator must be considered unstable. page_count may return higher
  116. * than expected, and put_page must be able to do the right thing when the
  117. * page has been finished with, no matter what it is subsequently allocated
  118. * for (because put_page is what is used here to drop an invalid speculative
  119. * reference).
  120. *
  121. * This is the interesting part of the lockless pagecache (and lockless
  122. * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
  123. * has the following pattern:
  124. * 1. find page in radix tree
  125. * 2. conditionally increment refcount
  126. * 3. check the page is still in pagecache (if no, goto 1)
  127. *
  128. * Remove-side that cares about stability of _refcount (eg. reclaim) has the
  129. * following (with tree_lock held for write):
  130. * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
  131. * B. remove page from pagecache
  132. * C. free the page
  133. *
  134. * There are 2 critical interleavings that matter:
  135. * - 2 runs before A: in this case, A sees elevated refcount and bails out
  136. * - A runs before 2: in this case, 2 sees zero refcount and retries;
  137. * subsequently, B will complete and 1 will find no page, causing the
  138. * lookup to return NULL.
  139. *
  140. * It is possible that between 1 and 2, the page is removed then the exact same
  141. * page is inserted into the same position in pagecache. That's OK: the
  142. * old find_get_page using tree_lock could equally have run before or after
  143. * such a re-insertion, depending on order that locks are granted.
  144. *
  145. * Lookups racing against pagecache insertion isn't a big problem: either 1
  146. * will find the page or it will not. Likewise, the old find_get_page could run
  147. * either before the insertion or afterwards, depending on timing.
  148. */
  149. static inline int page_cache_get_speculative(struct page *page)
  150. {
  151. #ifdef CONFIG_TINY_RCU
  152. # ifdef CONFIG_PREEMPT_COUNT
  153. VM_BUG_ON(!in_atomic() && !irqs_disabled());
  154. # endif
  155. /*
  156. * Preempt must be disabled here - we rely on rcu_read_lock doing
  157. * this for us.
  158. *
  159. * Pagecache won't be truncated from interrupt context, so if we have
  160. * found a page in the radix tree here, we have pinned its refcount by
  161. * disabling preempt, and hence no need for the "speculative get" that
  162. * SMP requires.
  163. */
  164. VM_BUG_ON_PAGE(page_count(page) == 0, page);
  165. page_ref_inc(page);
  166. #else
  167. if (unlikely(!get_page_unless_zero(page))) {
  168. /*
  169. * Either the page has been freed, or will be freed.
  170. * In either case, retry here and the caller should
  171. * do the right thing (see comments above).
  172. */
  173. return 0;
  174. }
  175. #endif
  176. VM_BUG_ON_PAGE(PageTail(page), page);
  177. return 1;
  178. }
  179. /*
  180. * Same as above, but add instead of inc (could just be merged)
  181. */
  182. static inline int page_cache_add_speculative(struct page *page, int count)
  183. {
  184. VM_BUG_ON(in_interrupt());
  185. #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
  186. # ifdef CONFIG_PREEMPT_COUNT
  187. VM_BUG_ON(!in_atomic() && !irqs_disabled());
  188. # endif
  189. VM_BUG_ON_PAGE(page_count(page) == 0, page);
  190. page_ref_add(page, count);
  191. #else
  192. if (unlikely(!page_ref_add_unless(page, count, 0)))
  193. return 0;
  194. #endif
  195. VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
  196. return 1;
  197. }
  198. #ifdef CONFIG_NUMA
  199. extern struct page *__page_cache_alloc(gfp_t gfp);
  200. #else
  201. static inline struct page *__page_cache_alloc(gfp_t gfp)
  202. {
  203. return alloc_pages(gfp, 0);
  204. }
  205. #endif
  206. static inline struct page *page_cache_alloc(struct address_space *x)
  207. {
  208. return __page_cache_alloc(mapping_gfp_mask(x));
  209. }
  210. static inline struct page *page_cache_alloc_cold(struct address_space *x)
  211. {
  212. return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
  213. }
  214. static inline gfp_t readahead_gfp_mask(struct address_space *x)
  215. {
  216. return mapping_gfp_mask(x) |
  217. __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN;
  218. }
  219. typedef int filler_t(void *, struct page *);
  220. pgoff_t page_cache_next_hole(struct address_space *mapping,
  221. pgoff_t index, unsigned long max_scan);
  222. pgoff_t page_cache_prev_hole(struct address_space *mapping,
  223. pgoff_t index, unsigned long max_scan);
  224. #define FGP_ACCESSED 0x00000001
  225. #define FGP_LOCK 0x00000002
  226. #define FGP_CREAT 0x00000004
  227. #define FGP_WRITE 0x00000008
  228. #define FGP_NOFS 0x00000010
  229. #define FGP_NOWAIT 0x00000020
  230. struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
  231. int fgp_flags, gfp_t cache_gfp_mask);
  232. /**
  233. * find_get_page - find and get a page reference
  234. * @mapping: the address_space to search
  235. * @offset: the page index
  236. *
  237. * Looks up the page cache slot at @mapping & @offset. If there is a
  238. * page cache page, it is returned with an increased refcount.
  239. *
  240. * Otherwise, %NULL is returned.
  241. */
  242. static inline struct page *find_get_page(struct address_space *mapping,
  243. pgoff_t offset)
  244. {
  245. return pagecache_get_page(mapping, offset, 0, 0);
  246. }
  247. static inline struct page *find_get_page_flags(struct address_space *mapping,
  248. pgoff_t offset, int fgp_flags)
  249. {
  250. return pagecache_get_page(mapping, offset, fgp_flags, 0);
  251. }
  252. /**
  253. * find_lock_page - locate, pin and lock a pagecache page
  254. * @mapping: the address_space to search
  255. * @offset: the page index
  256. *
  257. * Looks up the page cache slot at @mapping & @offset. If there is a
  258. * page cache page, it is returned locked and with an increased
  259. * refcount.
  260. *
  261. * Otherwise, %NULL is returned.
  262. *
  263. * find_lock_page() may sleep.
  264. */
  265. static inline struct page *find_lock_page(struct address_space *mapping,
  266. pgoff_t offset)
  267. {
  268. return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
  269. }
  270. /**
  271. * find_or_create_page - locate or add a pagecache page
  272. * @mapping: the page's address_space
  273. * @index: the page's index into the mapping
  274. * @gfp_mask: page allocation mode
  275. *
  276. * Looks up the page cache slot at @mapping & @offset. If there is a
  277. * page cache page, it is returned locked and with an increased
  278. * refcount.
  279. *
  280. * If the page is not present, a new page is allocated using @gfp_mask
  281. * and added to the page cache and the VM's LRU list. The page is
  282. * returned locked and with an increased refcount.
  283. *
  284. * On memory exhaustion, %NULL is returned.
  285. *
  286. * find_or_create_page() may sleep, even if @gfp_flags specifies an
  287. * atomic allocation!
  288. */
  289. static inline struct page *find_or_create_page(struct address_space *mapping,
  290. pgoff_t offset, gfp_t gfp_mask)
  291. {
  292. return pagecache_get_page(mapping, offset,
  293. FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
  294. gfp_mask);
  295. }
  296. /**
  297. * grab_cache_page_nowait - returns locked page at given index in given cache
  298. * @mapping: target address_space
  299. * @index: the page index
  300. *
  301. * Same as grab_cache_page(), but do not wait if the page is unavailable.
  302. * This is intended for speculative data generators, where the data can
  303. * be regenerated if the page couldn't be grabbed. This routine should
  304. * be safe to call while holding the lock for another page.
  305. *
  306. * Clear __GFP_FS when allocating the page to avoid recursion into the fs
  307. * and deadlock against the caller's locked page.
  308. */
  309. static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
  310. pgoff_t index)
  311. {
  312. return pagecache_get_page(mapping, index,
  313. FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
  314. mapping_gfp_mask(mapping));
  315. }
  316. struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
  317. struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
  318. unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
  319. unsigned int nr_entries, struct page **entries,
  320. pgoff_t *indices);
  321. unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
  322. pgoff_t end, unsigned int nr_pages,
  323. struct page **pages);
  324. static inline unsigned find_get_pages(struct address_space *mapping,
  325. pgoff_t *start, unsigned int nr_pages,
  326. struct page **pages)
  327. {
  328. return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
  329. pages);
  330. }
  331. unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
  332. unsigned int nr_pages, struct page **pages);
  333. unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
  334. pgoff_t end, int tag, unsigned int nr_pages,
  335. struct page **pages);
  336. static inline unsigned find_get_pages_tag(struct address_space *mapping,
  337. pgoff_t *index, int tag, unsigned int nr_pages,
  338. struct page **pages)
  339. {
  340. return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
  341. nr_pages, pages);
  342. }
  343. unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
  344. int tag, unsigned int nr_entries,
  345. struct page **entries, pgoff_t *indices);
  346. struct page *grab_cache_page_write_begin(struct address_space *mapping,
  347. pgoff_t index, unsigned flags);
  348. /*
  349. * Returns locked page at given index in given cache, creating it if needed.
  350. */
  351. static inline struct page *grab_cache_page(struct address_space *mapping,
  352. pgoff_t index)
  353. {
  354. return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
  355. }
  356. extern struct page * read_cache_page(struct address_space *mapping,
  357. pgoff_t index, filler_t *filler, void *data);
  358. extern struct page * read_cache_page_gfp(struct address_space *mapping,
  359. pgoff_t index, gfp_t gfp_mask);
  360. extern int read_cache_pages(struct address_space *mapping,
  361. struct list_head *pages, filler_t *filler, void *data);
  362. static inline struct page *read_mapping_page(struct address_space *mapping,
  363. pgoff_t index, void *data)
  364. {
  365. filler_t *filler = (filler_t *)mapping->a_ops->readpage;
  366. return read_cache_page(mapping, index, filler, data);
  367. }
  368. /*
  369. * Get index of the page with in radix-tree
  370. * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
  371. */
  372. static inline pgoff_t page_to_index(struct page *page)
  373. {
  374. pgoff_t pgoff;
  375. if (likely(!PageTransTail(page)))
  376. return page->index;
  377. /*
  378. * We don't initialize ->index for tail pages: calculate based on
  379. * head page
  380. */
  381. pgoff = compound_head(page)->index;
  382. pgoff += page - compound_head(page);
  383. return pgoff;
  384. }
  385. /*
  386. * Get the offset in PAGE_SIZE.
  387. * (TODO: hugepage should have ->index in PAGE_SIZE)
  388. */
  389. static inline pgoff_t page_to_pgoff(struct page *page)
  390. {
  391. if (unlikely(PageHeadHuge(page)))
  392. return page->index << compound_order(page);
  393. return page_to_index(page);
  394. }
  395. /*
  396. * Return byte-offset into filesystem object for page.
  397. */
  398. static inline loff_t page_offset(struct page *page)
  399. {
  400. return ((loff_t)page->index) << PAGE_SHIFT;
  401. }
  402. static inline loff_t page_file_offset(struct page *page)
  403. {
  404. return ((loff_t)page_index(page)) << PAGE_SHIFT;
  405. }
  406. extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
  407. unsigned long address);
  408. static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
  409. unsigned long address)
  410. {
  411. pgoff_t pgoff;
  412. if (unlikely(is_vm_hugetlb_page(vma)))
  413. return linear_hugepage_index(vma, address);
  414. pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
  415. pgoff += vma->vm_pgoff;
  416. return pgoff;
  417. }
  418. extern void __lock_page(struct page *page);
  419. extern int __lock_page_killable(struct page *page);
  420. extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
  421. unsigned int flags);
  422. extern void unlock_page(struct page *page);
  423. static inline int trylock_page(struct page *page)
  424. {
  425. page = compound_head(page);
  426. return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
  427. }
  428. /*
  429. * lock_page may only be called if we have the page's inode pinned.
  430. */
  431. static inline void lock_page(struct page *page)
  432. {
  433. might_sleep();
  434. if (!trylock_page(page))
  435. __lock_page(page);
  436. }
  437. /*
  438. * lock_page_killable is like lock_page but can be interrupted by fatal
  439. * signals. It returns 0 if it locked the page and -EINTR if it was
  440. * killed while waiting.
  441. */
  442. static inline int lock_page_killable(struct page *page)
  443. {
  444. might_sleep();
  445. if (!trylock_page(page))
  446. return __lock_page_killable(page);
  447. return 0;
  448. }
  449. /*
  450. * lock_page_or_retry - Lock the page, unless this would block and the
  451. * caller indicated that it can handle a retry.
  452. *
  453. * Return value and mmap_sem implications depend on flags; see
  454. * __lock_page_or_retry().
  455. */
  456. static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
  457. unsigned int flags)
  458. {
  459. might_sleep();
  460. return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
  461. }
  462. /*
  463. * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
  464. * and should not be used directly.
  465. */
  466. extern void wait_on_page_bit(struct page *page, int bit_nr);
  467. extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
  468. /*
  469. * Wait for a page to be unlocked.
  470. *
  471. * This must be called with the caller "holding" the page,
  472. * ie with increased "page->count" so that the page won't
  473. * go away during the wait..
  474. */
  475. static inline void wait_on_page_locked(struct page *page)
  476. {
  477. if (PageLocked(page))
  478. wait_on_page_bit(compound_head(page), PG_locked);
  479. }
  480. static inline int wait_on_page_locked_killable(struct page *page)
  481. {
  482. if (!PageLocked(page))
  483. return 0;
  484. return wait_on_page_bit_killable(compound_head(page), PG_locked);
  485. }
  486. /*
  487. * Wait for a page to complete writeback
  488. */
  489. static inline void wait_on_page_writeback(struct page *page)
  490. {
  491. if (PageWriteback(page))
  492. wait_on_page_bit(page, PG_writeback);
  493. }
  494. extern void end_page_writeback(struct page *page);
  495. void wait_for_stable_page(struct page *page);
  496. void page_endio(struct page *page, bool is_write, int err);
  497. /*
  498. * Add an arbitrary waiter to a page's wait queue
  499. */
  500. extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
  501. /*
  502. * Fault everything in given userspace address range in.
  503. */
  504. static inline int fault_in_pages_writeable(char __user *uaddr, int size)
  505. {
  506. char __user *end = uaddr + size - 1;
  507. if (unlikely(size == 0))
  508. return 0;
  509. if (unlikely(uaddr > end))
  510. return -EFAULT;
  511. /*
  512. * Writing zeroes into userspace here is OK, because we know that if
  513. * the zero gets there, we'll be overwriting it.
  514. */
  515. do {
  516. if (unlikely(__put_user(0, uaddr) != 0))
  517. return -EFAULT;
  518. uaddr += PAGE_SIZE;
  519. } while (uaddr <= end);
  520. /* Check whether the range spilled into the next page. */
  521. if (((unsigned long)uaddr & PAGE_MASK) ==
  522. ((unsigned long)end & PAGE_MASK))
  523. return __put_user(0, end);
  524. return 0;
  525. }
  526. static inline int fault_in_pages_readable(const char __user *uaddr, int size)
  527. {
  528. volatile char c;
  529. const char __user *end = uaddr + size - 1;
  530. if (unlikely(size == 0))
  531. return 0;
  532. if (unlikely(uaddr > end))
  533. return -EFAULT;
  534. do {
  535. if (unlikely(__get_user(c, uaddr) != 0))
  536. return -EFAULT;
  537. uaddr += PAGE_SIZE;
  538. } while (uaddr <= end);
  539. /* Check whether the range spilled into the next page. */
  540. if (((unsigned long)uaddr & PAGE_MASK) ==
  541. ((unsigned long)end & PAGE_MASK)) {
  542. return __get_user(c, end);
  543. }
  544. (void)c;
  545. return 0;
  546. }
  547. int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
  548. pgoff_t index, gfp_t gfp_mask);
  549. int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
  550. pgoff_t index, gfp_t gfp_mask);
  551. extern void delete_from_page_cache(struct page *page);
  552. extern void __delete_from_page_cache(struct page *page, void *shadow);
  553. int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
  554. void delete_from_page_cache_batch(struct address_space *mapping,
  555. struct pagevec *pvec);
  556. /*
  557. * Like add_to_page_cache_locked, but used to add newly allocated pages:
  558. * the page is new, so we can just run __SetPageLocked() against it.
  559. */
  560. static inline int add_to_page_cache(struct page *page,
  561. struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
  562. {
  563. int error;
  564. __SetPageLocked(page);
  565. error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
  566. if (unlikely(error))
  567. __ClearPageLocked(page);
  568. return error;
  569. }
  570. static inline unsigned long dir_pages(struct inode *inode)
  571. {
  572. return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
  573. PAGE_SHIFT;
  574. }
  575. #endif /* _LINUX_PAGEMAP_H */