pagemap.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682
  1. #ifndef _LINUX_PAGEMAP_H
  2. #define _LINUX_PAGEMAP_H
  3. /*
  4. * Copyright 1995 Linus Torvalds
  5. */
  6. #include <linux/mm.h>
  7. #include <linux/fs.h>
  8. #include <linux/list.h>
  9. #include <linux/highmem.h>
  10. #include <linux/compiler.h>
  11. #include <asm/uaccess.h>
  12. #include <linux/gfp.h>
  13. #include <linux/bitops.h>
  14. #include <linux/hardirq.h> /* for in_interrupt() */
  15. #include <linux/hugetlb_inline.h>
  16. /*
  17. * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
  18. * allocation mode flags.
  19. */
  20. enum mapping_flags {
  21. AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */
  22. AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
  23. AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
  24. AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
  25. AS_BALLOON_MAP = __GFP_BITS_SHIFT + 4, /* balloon page special map */
  26. AS_EXITING = __GFP_BITS_SHIFT + 5, /* final truncate in progress */
  27. };
  28. static inline void mapping_set_error(struct address_space *mapping, int error)
  29. {
  30. if (unlikely(error)) {
  31. if (error == -ENOSPC)
  32. set_bit(AS_ENOSPC, &mapping->flags);
  33. else
  34. set_bit(AS_EIO, &mapping->flags);
  35. }
  36. }
  37. static inline void mapping_set_unevictable(struct address_space *mapping)
  38. {
  39. set_bit(AS_UNEVICTABLE, &mapping->flags);
  40. }
  41. static inline void mapping_clear_unevictable(struct address_space *mapping)
  42. {
  43. clear_bit(AS_UNEVICTABLE, &mapping->flags);
  44. }
  45. static inline int mapping_unevictable(struct address_space *mapping)
  46. {
  47. if (mapping)
  48. return test_bit(AS_UNEVICTABLE, &mapping->flags);
  49. return !!mapping;
  50. }
  51. static inline void mapping_set_balloon(struct address_space *mapping)
  52. {
  53. set_bit(AS_BALLOON_MAP, &mapping->flags);
  54. }
  55. static inline void mapping_clear_balloon(struct address_space *mapping)
  56. {
  57. clear_bit(AS_BALLOON_MAP, &mapping->flags);
  58. }
  59. static inline int mapping_balloon(struct address_space *mapping)
  60. {
  61. return mapping && test_bit(AS_BALLOON_MAP, &mapping->flags);
  62. }
  63. static inline void mapping_set_exiting(struct address_space *mapping)
  64. {
  65. set_bit(AS_EXITING, &mapping->flags);
  66. }
  67. static inline int mapping_exiting(struct address_space *mapping)
  68. {
  69. return test_bit(AS_EXITING, &mapping->flags);
  70. }
  71. static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
  72. {
  73. return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
  74. }
  75. /*
  76. * This is non-atomic. Only to be used before the mapping is activated.
  77. * Probably needs a barrier...
  78. */
  79. static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
  80. {
  81. m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
  82. (__force unsigned long)mask;
  83. }
  84. /*
  85. * The page cache can done in larger chunks than
  86. * one page, because it allows for more efficient
  87. * throughput (it can then be mapped into user
  88. * space in smaller chunks for same flexibility).
  89. *
  90. * Or rather, it _will_ be done in larger chunks.
  91. */
  92. #define PAGE_CACHE_SHIFT PAGE_SHIFT
  93. #define PAGE_CACHE_SIZE PAGE_SIZE
  94. #define PAGE_CACHE_MASK PAGE_MASK
  95. #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
  96. #define page_cache_get(page) get_page(page)
  97. #define page_cache_release(page) put_page(page)
  98. void release_pages(struct page **pages, int nr, bool cold);
  99. /*
  100. * speculatively take a reference to a page.
  101. * If the page is free (_count == 0), then _count is untouched, and 0
  102. * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
  103. *
  104. * This function must be called inside the same rcu_read_lock() section as has
  105. * been used to lookup the page in the pagecache radix-tree (or page table):
  106. * this allows allocators to use a synchronize_rcu() to stabilize _count.
  107. *
  108. * Unless an RCU grace period has passed, the count of all pages coming out
  109. * of the allocator must be considered unstable. page_count may return higher
  110. * than expected, and put_page must be able to do the right thing when the
  111. * page has been finished with, no matter what it is subsequently allocated
  112. * for (because put_page is what is used here to drop an invalid speculative
  113. * reference).
  114. *
  115. * This is the interesting part of the lockless pagecache (and lockless
  116. * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
  117. * has the following pattern:
  118. * 1. find page in radix tree
  119. * 2. conditionally increment refcount
  120. * 3. check the page is still in pagecache (if no, goto 1)
  121. *
  122. * Remove-side that cares about stability of _count (eg. reclaim) has the
  123. * following (with tree_lock held for write):
  124. * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
  125. * B. remove page from pagecache
  126. * C. free the page
  127. *
  128. * There are 2 critical interleavings that matter:
  129. * - 2 runs before A: in this case, A sees elevated refcount and bails out
  130. * - A runs before 2: in this case, 2 sees zero refcount and retries;
  131. * subsequently, B will complete and 1 will find no page, causing the
  132. * lookup to return NULL.
  133. *
  134. * It is possible that between 1 and 2, the page is removed then the exact same
  135. * page is inserted into the same position in pagecache. That's OK: the
  136. * old find_get_page using tree_lock could equally have run before or after
  137. * such a re-insertion, depending on order that locks are granted.
  138. *
  139. * Lookups racing against pagecache insertion isn't a big problem: either 1
  140. * will find the page or it will not. Likewise, the old find_get_page could run
  141. * either before the insertion or afterwards, depending on timing.
  142. */
  143. static inline int page_cache_get_speculative(struct page *page)
  144. {
  145. VM_BUG_ON(in_interrupt());
  146. #ifdef CONFIG_TINY_RCU
  147. # ifdef CONFIG_PREEMPT_COUNT
  148. VM_BUG_ON(!in_atomic());
  149. # endif
  150. /*
  151. * Preempt must be disabled here - we rely on rcu_read_lock doing
  152. * this for us.
  153. *
  154. * Pagecache won't be truncated from interrupt context, so if we have
  155. * found a page in the radix tree here, we have pinned its refcount by
  156. * disabling preempt, and hence no need for the "speculative get" that
  157. * SMP requires.
  158. */
  159. VM_BUG_ON_PAGE(page_count(page) == 0, page);
  160. atomic_inc(&page->_count);
  161. #else
  162. if (unlikely(!get_page_unless_zero(page))) {
  163. /*
  164. * Either the page has been freed, or will be freed.
  165. * In either case, retry here and the caller should
  166. * do the right thing (see comments above).
  167. */
  168. return 0;
  169. }
  170. #endif
  171. VM_BUG_ON_PAGE(PageTail(page), page);
  172. return 1;
  173. }
  174. /*
  175. * Same as above, but add instead of inc (could just be merged)
  176. */
  177. static inline int page_cache_add_speculative(struct page *page, int count)
  178. {
  179. VM_BUG_ON(in_interrupt());
  180. #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
  181. # ifdef CONFIG_PREEMPT_COUNT
  182. VM_BUG_ON(!in_atomic());
  183. # endif
  184. VM_BUG_ON_PAGE(page_count(page) == 0, page);
  185. atomic_add(count, &page->_count);
  186. #else
  187. if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
  188. return 0;
  189. #endif
  190. VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
  191. return 1;
  192. }
  193. static inline int page_freeze_refs(struct page *page, int count)
  194. {
  195. return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
  196. }
  197. static inline void page_unfreeze_refs(struct page *page, int count)
  198. {
  199. VM_BUG_ON_PAGE(page_count(page) != 0, page);
  200. VM_BUG_ON(count == 0);
  201. atomic_set(&page->_count, count);
  202. }
  203. #ifdef CONFIG_NUMA
  204. extern struct page *__page_cache_alloc(gfp_t gfp);
  205. #else
  206. static inline struct page *__page_cache_alloc(gfp_t gfp)
  207. {
  208. return alloc_pages(gfp, 0);
  209. }
  210. #endif
  211. static inline struct page *page_cache_alloc(struct address_space *x)
  212. {
  213. return __page_cache_alloc(mapping_gfp_mask(x));
  214. }
  215. static inline struct page *page_cache_alloc_cold(struct address_space *x)
  216. {
  217. return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
  218. }
  219. static inline struct page *page_cache_alloc_readahead(struct address_space *x)
  220. {
  221. return __page_cache_alloc(mapping_gfp_mask(x) |
  222. __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN);
  223. }
  224. typedef int filler_t(void *, struct page *);
  225. pgoff_t page_cache_next_hole(struct address_space *mapping,
  226. pgoff_t index, unsigned long max_scan);
  227. pgoff_t page_cache_prev_hole(struct address_space *mapping,
  228. pgoff_t index, unsigned long max_scan);
  229. #define FGP_ACCESSED 0x00000001
  230. #define FGP_LOCK 0x00000002
  231. #define FGP_CREAT 0x00000004
  232. #define FGP_WRITE 0x00000008
  233. #define FGP_NOFS 0x00000010
  234. #define FGP_NOWAIT 0x00000020
  235. struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
  236. int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask);
  237. /**
  238. * find_get_page - find and get a page reference
  239. * @mapping: the address_space to search
  240. * @offset: the page index
  241. *
  242. * Looks up the page cache slot at @mapping & @offset. If there is a
  243. * page cache page, it is returned with an increased refcount.
  244. *
  245. * Otherwise, %NULL is returned.
  246. */
  247. static inline struct page *find_get_page(struct address_space *mapping,
  248. pgoff_t offset)
  249. {
  250. return pagecache_get_page(mapping, offset, 0, 0, 0);
  251. }
  252. static inline struct page *find_get_page_flags(struct address_space *mapping,
  253. pgoff_t offset, int fgp_flags)
  254. {
  255. return pagecache_get_page(mapping, offset, fgp_flags, 0, 0);
  256. }
  257. /**
  258. * find_lock_page - locate, pin and lock a pagecache page
  259. * pagecache_get_page - find and get a page reference
  260. * @mapping: the address_space to search
  261. * @offset: the page index
  262. *
  263. * Looks up the page cache slot at @mapping & @offset. If there is a
  264. * page cache page, it is returned locked and with an increased
  265. * refcount.
  266. *
  267. * Otherwise, %NULL is returned.
  268. *
  269. * find_lock_page() may sleep.
  270. */
  271. static inline struct page *find_lock_page(struct address_space *mapping,
  272. pgoff_t offset)
  273. {
  274. return pagecache_get_page(mapping, offset, FGP_LOCK, 0, 0);
  275. }
  276. /**
  277. * find_or_create_page - locate or add a pagecache page
  278. * @mapping: the page's address_space
  279. * @index: the page's index into the mapping
  280. * @gfp_mask: page allocation mode
  281. *
  282. * Looks up the page cache slot at @mapping & @offset. If there is a
  283. * page cache page, it is returned locked and with an increased
  284. * refcount.
  285. *
  286. * If the page is not present, a new page is allocated using @gfp_mask
  287. * and added to the page cache and the VM's LRU list. The page is
  288. * returned locked and with an increased refcount.
  289. *
  290. * On memory exhaustion, %NULL is returned.
  291. *
  292. * find_or_create_page() may sleep, even if @gfp_flags specifies an
  293. * atomic allocation!
  294. */
  295. static inline struct page *find_or_create_page(struct address_space *mapping,
  296. pgoff_t offset, gfp_t gfp_mask)
  297. {
  298. return pagecache_get_page(mapping, offset,
  299. FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
  300. gfp_mask, gfp_mask & GFP_RECLAIM_MASK);
  301. }
  302. /**
  303. * grab_cache_page_nowait - returns locked page at given index in given cache
  304. * @mapping: target address_space
  305. * @index: the page index
  306. *
  307. * Same as grab_cache_page(), but do not wait if the page is unavailable.
  308. * This is intended for speculative data generators, where the data can
  309. * be regenerated if the page couldn't be grabbed. This routine should
  310. * be safe to call while holding the lock for another page.
  311. *
  312. * Clear __GFP_FS when allocating the page to avoid recursion into the fs
  313. * and deadlock against the caller's locked page.
  314. */
  315. static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
  316. pgoff_t index)
  317. {
  318. return pagecache_get_page(mapping, index,
  319. FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
  320. mapping_gfp_mask(mapping),
  321. GFP_NOFS);
  322. }
  323. struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
  324. struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
  325. unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
  326. unsigned int nr_entries, struct page **entries,
  327. pgoff_t *indices);
  328. unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
  329. unsigned int nr_pages, struct page **pages);
  330. unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
  331. unsigned int nr_pages, struct page **pages);
  332. unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
  333. int tag, unsigned int nr_pages, struct page **pages);
  334. struct page *grab_cache_page_write_begin(struct address_space *mapping,
  335. pgoff_t index, unsigned flags);
  336. /*
  337. * Returns locked page at given index in given cache, creating it if needed.
  338. */
  339. static inline struct page *grab_cache_page(struct address_space *mapping,
  340. pgoff_t index)
  341. {
  342. return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
  343. }
  344. extern struct page * read_cache_page(struct address_space *mapping,
  345. pgoff_t index, filler_t *filler, void *data);
  346. extern struct page * read_cache_page_gfp(struct address_space *mapping,
  347. pgoff_t index, gfp_t gfp_mask);
  348. extern int read_cache_pages(struct address_space *mapping,
  349. struct list_head *pages, filler_t *filler, void *data);
  350. static inline struct page *read_mapping_page(struct address_space *mapping,
  351. pgoff_t index, void *data)
  352. {
  353. filler_t *filler = (filler_t *)mapping->a_ops->readpage;
  354. return read_cache_page(mapping, index, filler, data);
  355. }
  356. /*
  357. * Get the offset in PAGE_SIZE.
  358. * (TODO: hugepage should have ->index in PAGE_SIZE)
  359. */
  360. static inline pgoff_t page_to_pgoff(struct page *page)
  361. {
  362. if (unlikely(PageHeadHuge(page)))
  363. return page->index << compound_order(page);
  364. else
  365. return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  366. }
  367. /*
  368. * Return byte-offset into filesystem object for page.
  369. */
  370. static inline loff_t page_offset(struct page *page)
  371. {
  372. return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
  373. }
  374. static inline loff_t page_file_offset(struct page *page)
  375. {
  376. return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT;
  377. }
  378. extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
  379. unsigned long address);
  380. static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
  381. unsigned long address)
  382. {
  383. pgoff_t pgoff;
  384. if (unlikely(is_vm_hugetlb_page(vma)))
  385. return linear_hugepage_index(vma, address);
  386. pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
  387. pgoff += vma->vm_pgoff;
  388. return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  389. }
  390. extern void __lock_page(struct page *page);
  391. extern int __lock_page_killable(struct page *page);
  392. extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
  393. unsigned int flags);
  394. extern void unlock_page(struct page *page);
  395. static inline void __set_page_locked(struct page *page)
  396. {
  397. __set_bit(PG_locked, &page->flags);
  398. }
  399. static inline void __clear_page_locked(struct page *page)
  400. {
  401. __clear_bit(PG_locked, &page->flags);
  402. }
  403. static inline int trylock_page(struct page *page)
  404. {
  405. return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
  406. }
  407. /*
  408. * lock_page may only be called if we have the page's inode pinned.
  409. */
  410. static inline void lock_page(struct page *page)
  411. {
  412. might_sleep();
  413. if (!trylock_page(page))
  414. __lock_page(page);
  415. }
  416. /*
  417. * lock_page_killable is like lock_page but can be interrupted by fatal
  418. * signals. It returns 0 if it locked the page and -EINTR if it was
  419. * killed while waiting.
  420. */
  421. static inline int lock_page_killable(struct page *page)
  422. {
  423. might_sleep();
  424. if (!trylock_page(page))
  425. return __lock_page_killable(page);
  426. return 0;
  427. }
  428. /*
  429. * lock_page_or_retry - Lock the page, unless this would block and the
  430. * caller indicated that it can handle a retry.
  431. *
  432. * Return value and mmap_sem implications depend on flags; see
  433. * __lock_page_or_retry().
  434. */
  435. static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
  436. unsigned int flags)
  437. {
  438. might_sleep();
  439. return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
  440. }
  441. /*
  442. * This is exported only for wait_on_page_locked/wait_on_page_writeback.
  443. * Never use this directly!
  444. */
  445. extern void wait_on_page_bit(struct page *page, int bit_nr);
  446. extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
  447. static inline int wait_on_page_locked_killable(struct page *page)
  448. {
  449. if (PageLocked(page))
  450. return wait_on_page_bit_killable(page, PG_locked);
  451. return 0;
  452. }
  453. /*
  454. * Wait for a page to be unlocked.
  455. *
  456. * This must be called with the caller "holding" the page,
  457. * ie with increased "page->count" so that the page won't
  458. * go away during the wait..
  459. */
  460. static inline void wait_on_page_locked(struct page *page)
  461. {
  462. if (PageLocked(page))
  463. wait_on_page_bit(page, PG_locked);
  464. }
  465. /*
  466. * Wait for a page to complete writeback
  467. */
  468. static inline void wait_on_page_writeback(struct page *page)
  469. {
  470. if (PageWriteback(page))
  471. wait_on_page_bit(page, PG_writeback);
  472. }
  473. extern void end_page_writeback(struct page *page);
  474. void wait_for_stable_page(struct page *page);
  475. void page_endio(struct page *page, int rw, int err);
  476. /*
  477. * Add an arbitrary waiter to a page's wait queue
  478. */
  479. extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
  480. /*
  481. * Fault a userspace page into pagetables. Return non-zero on a fault.
  482. *
  483. * This assumes that two userspace pages are always sufficient. That's
  484. * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
  485. */
  486. static inline int fault_in_pages_writeable(char __user *uaddr, int size)
  487. {
  488. int ret;
  489. if (unlikely(size == 0))
  490. return 0;
  491. /*
  492. * Writing zeroes into userspace here is OK, because we know that if
  493. * the zero gets there, we'll be overwriting it.
  494. */
  495. ret = __put_user(0, uaddr);
  496. if (ret == 0) {
  497. char __user *end = uaddr + size - 1;
  498. /*
  499. * If the page was already mapped, this will get a cache miss
  500. * for sure, so try to avoid doing it.
  501. */
  502. if (((unsigned long)uaddr & PAGE_MASK) !=
  503. ((unsigned long)end & PAGE_MASK))
  504. ret = __put_user(0, end);
  505. }
  506. return ret;
  507. }
  508. static inline int fault_in_pages_readable(const char __user *uaddr, int size)
  509. {
  510. volatile char c;
  511. int ret;
  512. if (unlikely(size == 0))
  513. return 0;
  514. ret = __get_user(c, uaddr);
  515. if (ret == 0) {
  516. const char __user *end = uaddr + size - 1;
  517. if (((unsigned long)uaddr & PAGE_MASK) !=
  518. ((unsigned long)end & PAGE_MASK)) {
  519. ret = __get_user(c, end);
  520. (void)c;
  521. }
  522. }
  523. return ret;
  524. }
  525. /*
  526. * Multipage variants of the above prefault helpers, useful if more than
  527. * PAGE_SIZE of data needs to be prefaulted. These are separate from the above
  528. * functions (which only handle up to PAGE_SIZE) to avoid clobbering the
  529. * filemap.c hotpaths.
  530. */
  531. static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
  532. {
  533. int ret = 0;
  534. char __user *end = uaddr + size - 1;
  535. if (unlikely(size == 0))
  536. return ret;
  537. /*
  538. * Writing zeroes into userspace here is OK, because we know that if
  539. * the zero gets there, we'll be overwriting it.
  540. */
  541. while (uaddr <= end) {
  542. ret = __put_user(0, uaddr);
  543. if (ret != 0)
  544. return ret;
  545. uaddr += PAGE_SIZE;
  546. }
  547. /* Check whether the range spilled into the next page. */
  548. if (((unsigned long)uaddr & PAGE_MASK) ==
  549. ((unsigned long)end & PAGE_MASK))
  550. ret = __put_user(0, end);
  551. return ret;
  552. }
  553. static inline int fault_in_multipages_readable(const char __user *uaddr,
  554. int size)
  555. {
  556. volatile char c;
  557. int ret = 0;
  558. const char __user *end = uaddr + size - 1;
  559. if (unlikely(size == 0))
  560. return ret;
  561. while (uaddr <= end) {
  562. ret = __get_user(c, uaddr);
  563. if (ret != 0)
  564. return ret;
  565. uaddr += PAGE_SIZE;
  566. }
  567. /* Check whether the range spilled into the next page. */
  568. if (((unsigned long)uaddr & PAGE_MASK) ==
  569. ((unsigned long)end & PAGE_MASK)) {
  570. ret = __get_user(c, end);
  571. (void)c;
  572. }
  573. return ret;
  574. }
  575. int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
  576. pgoff_t index, gfp_t gfp_mask);
  577. int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
  578. pgoff_t index, gfp_t gfp_mask);
  579. extern void delete_from_page_cache(struct page *page);
  580. extern void __delete_from_page_cache(struct page *page, void *shadow);
  581. int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
  582. /*
  583. * Like add_to_page_cache_locked, but used to add newly allocated pages:
  584. * the page is new, so we can just run __set_page_locked() against it.
  585. */
  586. static inline int add_to_page_cache(struct page *page,
  587. struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
  588. {
  589. int error;
  590. __set_page_locked(page);
  591. error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
  592. if (unlikely(error))
  593. __clear_page_locked(page);
  594. return error;
  595. }
  596. #endif /* _LINUX_PAGEMAP_H */