pagemap.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678
  1. #ifndef _LINUX_PAGEMAP_H
  2. #define _LINUX_PAGEMAP_H
  3. /*
  4. * Copyright 1995 Linus Torvalds
  5. */
  6. #include <linux/mm.h>
  7. #include <linux/fs.h>
  8. #include <linux/list.h>
  9. #include <linux/highmem.h>
  10. #include <linux/compiler.h>
  11. #include <asm/uaccess.h>
  12. #include <linux/gfp.h>
  13. #include <linux/bitops.h>
  14. #include <linux/hardirq.h> /* for in_interrupt() */
  15. #include <linux/hugetlb_inline.h>
  16. /*
  17. * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
  18. * allocation mode flags.
  19. */
  20. enum mapping_flags {
  21. AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */
  22. AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
  23. AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
  24. AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
  25. AS_EXITING = __GFP_BITS_SHIFT + 4, /* final truncate in progress */
  26. };
  27. static inline void mapping_set_error(struct address_space *mapping, int error)
  28. {
  29. if (unlikely(error)) {
  30. if (error == -ENOSPC)
  31. set_bit(AS_ENOSPC, &mapping->flags);
  32. else
  33. set_bit(AS_EIO, &mapping->flags);
  34. }
  35. }
  36. static inline void mapping_set_unevictable(struct address_space *mapping)
  37. {
  38. set_bit(AS_UNEVICTABLE, &mapping->flags);
  39. }
  40. static inline void mapping_clear_unevictable(struct address_space *mapping)
  41. {
  42. clear_bit(AS_UNEVICTABLE, &mapping->flags);
  43. }
  44. static inline int mapping_unevictable(struct address_space *mapping)
  45. {
  46. if (mapping)
  47. return test_bit(AS_UNEVICTABLE, &mapping->flags);
  48. return !!mapping;
  49. }
  50. static inline void mapping_set_exiting(struct address_space *mapping)
  51. {
  52. set_bit(AS_EXITING, &mapping->flags);
  53. }
  54. static inline int mapping_exiting(struct address_space *mapping)
  55. {
  56. return test_bit(AS_EXITING, &mapping->flags);
  57. }
  58. static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
  59. {
  60. return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
  61. }
  62. /* Restricts the given gfp_mask to what the mapping allows. */
  63. static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
  64. gfp_t gfp_mask)
  65. {
  66. return mapping_gfp_mask(mapping) & gfp_mask;
  67. }
  68. /*
  69. * This is non-atomic. Only to be used before the mapping is activated.
  70. * Probably needs a barrier...
  71. */
  72. static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
  73. {
  74. m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
  75. (__force unsigned long)mask;
  76. }
  77. /*
  78. * The page cache can be done in larger chunks than
  79. * one page, because it allows for more efficient
  80. * throughput (it can then be mapped into user
  81. * space in smaller chunks for same flexibility).
  82. *
  83. * Or rather, it _will_ be done in larger chunks.
  84. */
  85. #define PAGE_CACHE_SHIFT PAGE_SHIFT
  86. #define PAGE_CACHE_SIZE PAGE_SIZE
  87. #define PAGE_CACHE_MASK PAGE_MASK
  88. #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
  89. #define page_cache_get(page) get_page(page)
  90. #define page_cache_release(page) put_page(page)
  91. void release_pages(struct page **pages, int nr, bool cold);
  92. /*
  93. * speculatively take a reference to a page.
  94. * If the page is free (_count == 0), then _count is untouched, and 0
  95. * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
  96. *
  97. * This function must be called inside the same rcu_read_lock() section as has
  98. * been used to lookup the page in the pagecache radix-tree (or page table):
  99. * this allows allocators to use a synchronize_rcu() to stabilize _count.
  100. *
  101. * Unless an RCU grace period has passed, the count of all pages coming out
  102. * of the allocator must be considered unstable. page_count may return higher
  103. * than expected, and put_page must be able to do the right thing when the
  104. * page has been finished with, no matter what it is subsequently allocated
  105. * for (because put_page is what is used here to drop an invalid speculative
  106. * reference).
  107. *
  108. * This is the interesting part of the lockless pagecache (and lockless
  109. * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
  110. * has the following pattern:
  111. * 1. find page in radix tree
  112. * 2. conditionally increment refcount
  113. * 3. check the page is still in pagecache (if no, goto 1)
  114. *
  115. * Remove-side that cares about stability of _count (eg. reclaim) has the
  116. * following (with tree_lock held for write):
  117. * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
  118. * B. remove page from pagecache
  119. * C. free the page
  120. *
  121. * There are 2 critical interleavings that matter:
  122. * - 2 runs before A: in this case, A sees elevated refcount and bails out
  123. * - A runs before 2: in this case, 2 sees zero refcount and retries;
  124. * subsequently, B will complete and 1 will find no page, causing the
  125. * lookup to return NULL.
  126. *
  127. * It is possible that between 1 and 2, the page is removed then the exact same
  128. * page is inserted into the same position in pagecache. That's OK: the
  129. * old find_get_page using tree_lock could equally have run before or after
  130. * such a re-insertion, depending on order that locks are granted.
  131. *
  132. * Lookups racing against pagecache insertion isn't a big problem: either 1
  133. * will find the page or it will not. Likewise, the old find_get_page could run
  134. * either before the insertion or afterwards, depending on timing.
  135. */
  136. static inline int page_cache_get_speculative(struct page *page)
  137. {
  138. VM_BUG_ON(in_interrupt());
  139. #ifdef CONFIG_TINY_RCU
  140. # ifdef CONFIG_PREEMPT_COUNT
  141. VM_BUG_ON(!in_atomic());
  142. # endif
  143. /*
  144. * Preempt must be disabled here - we rely on rcu_read_lock doing
  145. * this for us.
  146. *
  147. * Pagecache won't be truncated from interrupt context, so if we have
  148. * found a page in the radix tree here, we have pinned its refcount by
  149. * disabling preempt, and hence no need for the "speculative get" that
  150. * SMP requires.
  151. */
  152. VM_BUG_ON_PAGE(page_count(page) == 0, page);
  153. page_ref_inc(page);
  154. #else
  155. if (unlikely(!get_page_unless_zero(page))) {
  156. /*
  157. * Either the page has been freed, or will be freed.
  158. * In either case, retry here and the caller should
  159. * do the right thing (see comments above).
  160. */
  161. return 0;
  162. }
  163. #endif
  164. VM_BUG_ON_PAGE(PageTail(page), page);
  165. return 1;
  166. }
  167. /*
  168. * Same as above, but add instead of inc (could just be merged)
  169. */
  170. static inline int page_cache_add_speculative(struct page *page, int count)
  171. {
  172. VM_BUG_ON(in_interrupt());
  173. #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
  174. # ifdef CONFIG_PREEMPT_COUNT
  175. VM_BUG_ON(!in_atomic());
  176. # endif
  177. VM_BUG_ON_PAGE(page_count(page) == 0, page);
  178. page_ref_add(page, count);
  179. #else
  180. if (unlikely(!page_ref_add_unless(page, count, 0)))
  181. return 0;
  182. #endif
  183. VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
  184. return 1;
  185. }
  186. #ifdef CONFIG_NUMA
  187. extern struct page *__page_cache_alloc(gfp_t gfp);
  188. #else
  189. static inline struct page *__page_cache_alloc(gfp_t gfp)
  190. {
  191. return alloc_pages(gfp, 0);
  192. }
  193. #endif
  194. static inline struct page *page_cache_alloc(struct address_space *x)
  195. {
  196. return __page_cache_alloc(mapping_gfp_mask(x));
  197. }
  198. static inline struct page *page_cache_alloc_cold(struct address_space *x)
  199. {
  200. return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
  201. }
  202. static inline struct page *page_cache_alloc_readahead(struct address_space *x)
  203. {
  204. return __page_cache_alloc(mapping_gfp_mask(x) |
  205. __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN);
  206. }
  207. typedef int filler_t(void *, struct page *);
  208. pgoff_t page_cache_next_hole(struct address_space *mapping,
  209. pgoff_t index, unsigned long max_scan);
  210. pgoff_t page_cache_prev_hole(struct address_space *mapping,
  211. pgoff_t index, unsigned long max_scan);
  212. #define FGP_ACCESSED 0x00000001
  213. #define FGP_LOCK 0x00000002
  214. #define FGP_CREAT 0x00000004
  215. #define FGP_WRITE 0x00000008
  216. #define FGP_NOFS 0x00000010
  217. #define FGP_NOWAIT 0x00000020
  218. struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
  219. int fgp_flags, gfp_t cache_gfp_mask);
  220. /**
  221. * find_get_page - find and get a page reference
  222. * @mapping: the address_space to search
  223. * @offset: the page index
  224. *
  225. * Looks up the page cache slot at @mapping & @offset. If there is a
  226. * page cache page, it is returned with an increased refcount.
  227. *
  228. * Otherwise, %NULL is returned.
  229. */
  230. static inline struct page *find_get_page(struct address_space *mapping,
  231. pgoff_t offset)
  232. {
  233. return pagecache_get_page(mapping, offset, 0, 0);
  234. }
  235. static inline struct page *find_get_page_flags(struct address_space *mapping,
  236. pgoff_t offset, int fgp_flags)
  237. {
  238. return pagecache_get_page(mapping, offset, fgp_flags, 0);
  239. }
  240. /**
  241. * find_lock_page - locate, pin and lock a pagecache page
  242. * pagecache_get_page - find and get a page reference
  243. * @mapping: the address_space to search
  244. * @offset: the page index
  245. *
  246. * Looks up the page cache slot at @mapping & @offset. If there is a
  247. * page cache page, it is returned locked and with an increased
  248. * refcount.
  249. *
  250. * Otherwise, %NULL is returned.
  251. *
  252. * find_lock_page() may sleep.
  253. */
  254. static inline struct page *find_lock_page(struct address_space *mapping,
  255. pgoff_t offset)
  256. {
  257. return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
  258. }
  259. /**
  260. * find_or_create_page - locate or add a pagecache page
  261. * @mapping: the page's address_space
  262. * @index: the page's index into the mapping
  263. * @gfp_mask: page allocation mode
  264. *
  265. * Looks up the page cache slot at @mapping & @offset. If there is a
  266. * page cache page, it is returned locked and with an increased
  267. * refcount.
  268. *
  269. * If the page is not present, a new page is allocated using @gfp_mask
  270. * and added to the page cache and the VM's LRU list. The page is
  271. * returned locked and with an increased refcount.
  272. *
  273. * On memory exhaustion, %NULL is returned.
  274. *
  275. * find_or_create_page() may sleep, even if @gfp_flags specifies an
  276. * atomic allocation!
  277. */
  278. static inline struct page *find_or_create_page(struct address_space *mapping,
  279. pgoff_t offset, gfp_t gfp_mask)
  280. {
  281. return pagecache_get_page(mapping, offset,
  282. FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
  283. gfp_mask);
  284. }
  285. /**
  286. * grab_cache_page_nowait - returns locked page at given index in given cache
  287. * @mapping: target address_space
  288. * @index: the page index
  289. *
  290. * Same as grab_cache_page(), but do not wait if the page is unavailable.
  291. * This is intended for speculative data generators, where the data can
  292. * be regenerated if the page couldn't be grabbed. This routine should
  293. * be safe to call while holding the lock for another page.
  294. *
  295. * Clear __GFP_FS when allocating the page to avoid recursion into the fs
  296. * and deadlock against the caller's locked page.
  297. */
  298. static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
  299. pgoff_t index)
  300. {
  301. return pagecache_get_page(mapping, index,
  302. FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
  303. mapping_gfp_mask(mapping));
  304. }
  305. struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
  306. struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
  307. unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
  308. unsigned int nr_entries, struct page **entries,
  309. pgoff_t *indices);
  310. unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
  311. unsigned int nr_pages, struct page **pages);
  312. unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
  313. unsigned int nr_pages, struct page **pages);
  314. unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
  315. int tag, unsigned int nr_pages, struct page **pages);
  316. unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
  317. int tag, unsigned int nr_entries,
  318. struct page **entries, pgoff_t *indices);
  319. struct page *grab_cache_page_write_begin(struct address_space *mapping,
  320. pgoff_t index, unsigned flags);
  321. /*
  322. * Returns locked page at given index in given cache, creating it if needed.
  323. */
  324. static inline struct page *grab_cache_page(struct address_space *mapping,
  325. pgoff_t index)
  326. {
  327. return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
  328. }
  329. extern struct page * read_cache_page(struct address_space *mapping,
  330. pgoff_t index, filler_t *filler, void *data);
  331. extern struct page * read_cache_page_gfp(struct address_space *mapping,
  332. pgoff_t index, gfp_t gfp_mask);
  333. extern int read_cache_pages(struct address_space *mapping,
  334. struct list_head *pages, filler_t *filler, void *data);
  335. static inline struct page *read_mapping_page(struct address_space *mapping,
  336. pgoff_t index, void *data)
  337. {
  338. filler_t *filler = (filler_t *)mapping->a_ops->readpage;
  339. return read_cache_page(mapping, index, filler, data);
  340. }
  341. /*
  342. * Get the offset in PAGE_SIZE.
  343. * (TODO: hugepage should have ->index in PAGE_SIZE)
  344. */
  345. static inline pgoff_t page_to_pgoff(struct page *page)
  346. {
  347. pgoff_t pgoff;
  348. if (unlikely(PageHeadHuge(page)))
  349. return page->index << compound_order(page);
  350. if (likely(!PageTransTail(page)))
  351. return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  352. /*
  353. * We don't initialize ->index for tail pages: calculate based on
  354. * head page
  355. */
  356. pgoff = compound_head(page)->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  357. pgoff += page - compound_head(page);
  358. return pgoff;
  359. }
  360. /*
  361. * Return byte-offset into filesystem object for page.
  362. */
  363. static inline loff_t page_offset(struct page *page)
  364. {
  365. return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
  366. }
  367. static inline loff_t page_file_offset(struct page *page)
  368. {
  369. return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT;
  370. }
  371. extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
  372. unsigned long address);
  373. static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
  374. unsigned long address)
  375. {
  376. pgoff_t pgoff;
  377. if (unlikely(is_vm_hugetlb_page(vma)))
  378. return linear_hugepage_index(vma, address);
  379. pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
  380. pgoff += vma->vm_pgoff;
  381. return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  382. }
  383. extern void __lock_page(struct page *page);
  384. extern int __lock_page_killable(struct page *page);
  385. extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
  386. unsigned int flags);
  387. extern void unlock_page(struct page *page);
  388. static inline int trylock_page(struct page *page)
  389. {
  390. page = compound_head(page);
  391. return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
  392. }
  393. /*
  394. * lock_page may only be called if we have the page's inode pinned.
  395. */
  396. static inline void lock_page(struct page *page)
  397. {
  398. might_sleep();
  399. if (!trylock_page(page))
  400. __lock_page(page);
  401. }
  402. /*
  403. * lock_page_killable is like lock_page but can be interrupted by fatal
  404. * signals. It returns 0 if it locked the page and -EINTR if it was
  405. * killed while waiting.
  406. */
  407. static inline int lock_page_killable(struct page *page)
  408. {
  409. might_sleep();
  410. if (!trylock_page(page))
  411. return __lock_page_killable(page);
  412. return 0;
  413. }
  414. /*
  415. * lock_page_or_retry - Lock the page, unless this would block and the
  416. * caller indicated that it can handle a retry.
  417. *
  418. * Return value and mmap_sem implications depend on flags; see
  419. * __lock_page_or_retry().
  420. */
  421. static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
  422. unsigned int flags)
  423. {
  424. might_sleep();
  425. return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
  426. }
  427. /*
  428. * This is exported only for wait_on_page_locked/wait_on_page_writeback,
  429. * and for filesystems which need to wait on PG_private.
  430. */
  431. extern void wait_on_page_bit(struct page *page, int bit_nr);
  432. extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
  433. extern int wait_on_page_bit_killable_timeout(struct page *page,
  434. int bit_nr, unsigned long timeout);
  435. static inline int wait_on_page_locked_killable(struct page *page)
  436. {
  437. if (!PageLocked(page))
  438. return 0;
  439. return wait_on_page_bit_killable(compound_head(page), PG_locked);
  440. }
  441. extern wait_queue_head_t *page_waitqueue(struct page *page);
  442. static inline void wake_up_page(struct page *page, int bit)
  443. {
  444. __wake_up_bit(page_waitqueue(page), &page->flags, bit);
  445. }
  446. /*
  447. * Wait for a page to be unlocked.
  448. *
  449. * This must be called with the caller "holding" the page,
  450. * ie with increased "page->count" so that the page won't
  451. * go away during the wait..
  452. */
  453. static inline void wait_on_page_locked(struct page *page)
  454. {
  455. if (PageLocked(page))
  456. wait_on_page_bit(compound_head(page), PG_locked);
  457. }
  458. /*
  459. * Wait for a page to complete writeback
  460. */
  461. static inline void wait_on_page_writeback(struct page *page)
  462. {
  463. if (PageWriteback(page))
  464. wait_on_page_bit(page, PG_writeback);
  465. }
  466. extern void end_page_writeback(struct page *page);
  467. void wait_for_stable_page(struct page *page);
  468. void page_endio(struct page *page, int rw, int err);
  469. /*
  470. * Add an arbitrary waiter to a page's wait queue
  471. */
  472. extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
  473. /*
  474. * Fault a userspace page into pagetables. Return non-zero on a fault.
  475. *
  476. * This assumes that two userspace pages are always sufficient. That's
  477. * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
  478. */
  479. static inline int fault_in_pages_writeable(char __user *uaddr, int size)
  480. {
  481. int ret;
  482. if (unlikely(size == 0))
  483. return 0;
  484. /*
  485. * Writing zeroes into userspace here is OK, because we know that if
  486. * the zero gets there, we'll be overwriting it.
  487. */
  488. ret = __put_user(0, uaddr);
  489. if (ret == 0) {
  490. char __user *end = uaddr + size - 1;
  491. /*
  492. * If the page was already mapped, this will get a cache miss
  493. * for sure, so try to avoid doing it.
  494. */
  495. if (((unsigned long)uaddr & PAGE_MASK) !=
  496. ((unsigned long)end & PAGE_MASK))
  497. ret = __put_user(0, end);
  498. }
  499. return ret;
  500. }
  501. static inline int fault_in_pages_readable(const char __user *uaddr, int size)
  502. {
  503. volatile char c;
  504. int ret;
  505. if (unlikely(size == 0))
  506. return 0;
  507. ret = __get_user(c, uaddr);
  508. if (ret == 0) {
  509. const char __user *end = uaddr + size - 1;
  510. if (((unsigned long)uaddr & PAGE_MASK) !=
  511. ((unsigned long)end & PAGE_MASK)) {
  512. ret = __get_user(c, end);
  513. (void)c;
  514. }
  515. }
  516. return ret;
  517. }
  518. /*
  519. * Multipage variants of the above prefault helpers, useful if more than
  520. * PAGE_SIZE of data needs to be prefaulted. These are separate from the above
  521. * functions (which only handle up to PAGE_SIZE) to avoid clobbering the
  522. * filemap.c hotpaths.
  523. */
  524. static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
  525. {
  526. int ret = 0;
  527. char __user *end = uaddr + size - 1;
  528. if (unlikely(size == 0))
  529. return ret;
  530. /*
  531. * Writing zeroes into userspace here is OK, because we know that if
  532. * the zero gets there, we'll be overwriting it.
  533. */
  534. while (uaddr <= end) {
  535. ret = __put_user(0, uaddr);
  536. if (ret != 0)
  537. return ret;
  538. uaddr += PAGE_SIZE;
  539. }
  540. /* Check whether the range spilled into the next page. */
  541. if (((unsigned long)uaddr & PAGE_MASK) ==
  542. ((unsigned long)end & PAGE_MASK))
  543. ret = __put_user(0, end);
  544. return ret;
  545. }
  546. static inline int fault_in_multipages_readable(const char __user *uaddr,
  547. int size)
  548. {
  549. volatile char c;
  550. int ret = 0;
  551. const char __user *end = uaddr + size - 1;
  552. if (unlikely(size == 0))
  553. return ret;
  554. while (uaddr <= end) {
  555. ret = __get_user(c, uaddr);
  556. if (ret != 0)
  557. return ret;
  558. uaddr += PAGE_SIZE;
  559. }
  560. /* Check whether the range spilled into the next page. */
  561. if (((unsigned long)uaddr & PAGE_MASK) ==
  562. ((unsigned long)end & PAGE_MASK)) {
  563. ret = __get_user(c, end);
  564. (void)c;
  565. }
  566. return ret;
  567. }
  568. int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
  569. pgoff_t index, gfp_t gfp_mask);
  570. int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
  571. pgoff_t index, gfp_t gfp_mask);
  572. extern void delete_from_page_cache(struct page *page);
  573. extern void __delete_from_page_cache(struct page *page, void *shadow);
  574. int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
  575. /*
  576. * Like add_to_page_cache_locked, but used to add newly allocated pages:
  577. * the page is new, so we can just run __SetPageLocked() against it.
  578. */
  579. static inline int add_to_page_cache(struct page *page,
  580. struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
  581. {
  582. int error;
  583. __SetPageLocked(page);
  584. error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
  585. if (unlikely(error))
  586. __ClearPageLocked(page);
  587. return error;
  588. }
  589. static inline unsigned long dir_pages(struct inode *inode)
  590. {
  591. return (unsigned long)(inode->i_size + PAGE_CACHE_SIZE - 1) >>
  592. PAGE_CACHE_SHIFT;
  593. }
  594. #endif /* _LINUX_PAGEMAP_H */