readahead.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596
  1. /*
  2. * mm/readahead.c - address_space-level file readahead.
  3. *
  4. * Copyright (C) 2002, Linus Torvalds
  5. *
  6. * 09Apr2002 Andrew Morton
  7. * Initial version.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/fs.h>
  11. #include <linux/gfp.h>
  12. #include <linux/mm.h>
  13. #include <linux/export.h>
  14. #include <linux/blkdev.h>
  15. #include <linux/backing-dev.h>
  16. #include <linux/task_io_accounting_ops.h>
  17. #include <linux/pagevec.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/syscalls.h>
  20. #include <linux/file.h>
  21. /*
  22. * Initialise a struct file's readahead state. Assumes that the caller has
  23. * memset *ra to zero.
  24. */
  25. void
  26. file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
  27. {
  28. ra->ra_pages = mapping->backing_dev_info->ra_pages;
  29. ra->prev_pos = -1;
  30. }
  31. EXPORT_SYMBOL_GPL(file_ra_state_init);
  32. #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
  33. /*
  34. * see if a page needs releasing upon read_cache_pages() failure
  35. * - the caller of read_cache_pages() may have set PG_private or PG_fscache
  36. * before calling, such as the NFS fs marking pages that are cached locally
  37. * on disk, thus we need to give the fs a chance to clean up in the event of
  38. * an error
  39. */
  40. static void read_cache_pages_invalidate_page(struct address_space *mapping,
  41. struct page *page)
  42. {
  43. if (page_has_private(page)) {
  44. if (!trylock_page(page))
  45. BUG();
  46. page->mapping = mapping;
  47. do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
  48. page->mapping = NULL;
  49. unlock_page(page);
  50. }
  51. page_cache_release(page);
  52. }
  53. /*
  54. * release a list of pages, invalidating them first if need be
  55. */
  56. static void read_cache_pages_invalidate_pages(struct address_space *mapping,
  57. struct list_head *pages)
  58. {
  59. struct page *victim;
  60. while (!list_empty(pages)) {
  61. victim = list_to_page(pages);
  62. list_del(&victim->lru);
  63. read_cache_pages_invalidate_page(mapping, victim);
  64. }
  65. }
  66. /**
  67. * read_cache_pages - populate an address space with some pages & start reads against them
  68. * @mapping: the address_space
  69. * @pages: The address of a list_head which contains the target pages. These
  70. * pages have their ->index populated and are otherwise uninitialised.
  71. * @filler: callback routine for filling a single page.
  72. * @data: private data for the callback routine.
  73. *
  74. * Hides the details of the LRU cache etc from the filesystems.
  75. */
  76. int read_cache_pages(struct address_space *mapping, struct list_head *pages,
  77. int (*filler)(void *, struct page *), void *data)
  78. {
  79. struct page *page;
  80. int ret = 0;
  81. while (!list_empty(pages)) {
  82. page = list_to_page(pages);
  83. list_del(&page->lru);
  84. if (add_to_page_cache_lru(page, mapping,
  85. page->index, GFP_KERNEL)) {
  86. read_cache_pages_invalidate_page(mapping, page);
  87. continue;
  88. }
  89. page_cache_release(page);
  90. ret = filler(data, page);
  91. if (unlikely(ret)) {
  92. read_cache_pages_invalidate_pages(mapping, pages);
  93. break;
  94. }
  95. task_io_account_read(PAGE_CACHE_SIZE);
  96. }
  97. return ret;
  98. }
  99. EXPORT_SYMBOL(read_cache_pages);
  100. static int read_pages(struct address_space *mapping, struct file *filp,
  101. struct list_head *pages, unsigned nr_pages)
  102. {
  103. struct blk_plug plug;
  104. unsigned page_idx;
  105. int ret;
  106. blk_start_plug(&plug);
  107. if (mapping->a_ops->readpages) {
  108. ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
  109. /* Clean up the remaining pages */
  110. put_pages_list(pages);
  111. goto out;
  112. }
  113. for (page_idx = 0; page_idx < nr_pages; page_idx++) {
  114. struct page *page = list_to_page(pages);
  115. list_del(&page->lru);
  116. if (!add_to_page_cache_lru(page, mapping,
  117. page->index, GFP_KERNEL)) {
  118. mapping->a_ops->readpage(filp, page);
  119. }
  120. page_cache_release(page);
  121. }
  122. ret = 0;
  123. out:
  124. blk_finish_plug(&plug);
  125. return ret;
  126. }
  127. /*
  128. * __do_page_cache_readahead() actually reads a chunk of disk. It allocates all
  129. * the pages first, then submits them all for I/O. This avoids the very bad
  130. * behaviour which would occur if page allocations are causing VM writeback.
  131. * We really don't want to intermingle reads and writes like that.
  132. *
  133. * Returns the number of pages requested, or the maximum amount of I/O allowed.
  134. */
  135. static int
  136. __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
  137. pgoff_t offset, unsigned long nr_to_read,
  138. unsigned long lookahead_size)
  139. {
  140. struct inode *inode = mapping->host;
  141. struct page *page;
  142. unsigned long end_index; /* The last page we want to read */
  143. LIST_HEAD(page_pool);
  144. int page_idx;
  145. int ret = 0;
  146. loff_t isize = i_size_read(inode);
  147. if (isize == 0)
  148. goto out;
  149. end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
  150. /*
  151. * Preallocate as many pages as we will need.
  152. */
  153. for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
  154. pgoff_t page_offset = offset + page_idx;
  155. if (page_offset > end_index)
  156. break;
  157. rcu_read_lock();
  158. page = radix_tree_lookup(&mapping->page_tree, page_offset);
  159. rcu_read_unlock();
  160. if (page)
  161. continue;
  162. page = page_cache_alloc_readahead(mapping);
  163. if (!page)
  164. break;
  165. page->index = page_offset;
  166. list_add(&page->lru, &page_pool);
  167. if (page_idx == nr_to_read - lookahead_size)
  168. SetPageReadahead(page);
  169. ret++;
  170. }
  171. /*
  172. * Now start the IO. We ignore I/O errors - if the page is not
  173. * uptodate then the caller will launch readpage again, and
  174. * will then handle the error.
  175. */
  176. if (ret)
  177. read_pages(mapping, filp, &page_pool, ret);
  178. BUG_ON(!list_empty(&page_pool));
  179. out:
  180. return ret;
  181. }
  182. /*
  183. * Chunk the readahead into 2 megabyte units, so that we don't pin too much
  184. * memory at once.
  185. */
  186. int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
  187. pgoff_t offset, unsigned long nr_to_read)
  188. {
  189. if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
  190. return -EINVAL;
  191. nr_to_read = max_sane_readahead(nr_to_read);
  192. while (nr_to_read) {
  193. int err;
  194. unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE;
  195. if (this_chunk > nr_to_read)
  196. this_chunk = nr_to_read;
  197. err = __do_page_cache_readahead(mapping, filp,
  198. offset, this_chunk, 0);
  199. if (err < 0)
  200. return err;
  201. offset += this_chunk;
  202. nr_to_read -= this_chunk;
  203. }
  204. return 0;
  205. }
  206. /*
  207. * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
  208. * sensible upper limit.
  209. */
  210. unsigned long max_sane_readahead(unsigned long nr)
  211. {
  212. return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE_FILE)
  213. + node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2);
  214. }
  215. /*
  216. * Submit IO for the read-ahead request in file_ra_state.
  217. */
  218. unsigned long ra_submit(struct file_ra_state *ra,
  219. struct address_space *mapping, struct file *filp)
  220. {
  221. int actual;
  222. actual = __do_page_cache_readahead(mapping, filp,
  223. ra->start, ra->size, ra->async_size);
  224. return actual;
  225. }
  226. /*
  227. * Set the initial window size, round to next power of 2 and square
  228. * for small size, x 4 for medium, and x 2 for large
  229. * for 128k (32 page) max ra
  230. * 1-8 page = 32k initial, > 8 page = 128k initial
  231. */
  232. static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
  233. {
  234. unsigned long newsize = roundup_pow_of_two(size);
  235. if (newsize <= max / 32)
  236. newsize = newsize * 4;
  237. else if (newsize <= max / 4)
  238. newsize = newsize * 2;
  239. else
  240. newsize = max;
  241. return newsize;
  242. }
  243. /*
  244. * Get the previous window size, ramp it up, and
  245. * return it as the new window size.
  246. */
  247. static unsigned long get_next_ra_size(struct file_ra_state *ra,
  248. unsigned long max)
  249. {
  250. unsigned long cur = ra->size;
  251. unsigned long newsize;
  252. if (cur < max / 16)
  253. newsize = 4 * cur;
  254. else
  255. newsize = 2 * cur;
  256. return min(newsize, max);
  257. }
  258. /*
  259. * On-demand readahead design.
  260. *
  261. * The fields in struct file_ra_state represent the most-recently-executed
  262. * readahead attempt:
  263. *
  264. * |<----- async_size ---------|
  265. * |------------------- size -------------------->|
  266. * |==================#===========================|
  267. * ^start ^page marked with PG_readahead
  268. *
  269. * To overlap application thinking time and disk I/O time, we do
  270. * `readahead pipelining': Do not wait until the application consumed all
  271. * readahead pages and stalled on the missing page at readahead_index;
  272. * Instead, submit an asynchronous readahead I/O as soon as there are
  273. * only async_size pages left in the readahead window. Normally async_size
  274. * will be equal to size, for maximum pipelining.
  275. *
  276. * In interleaved sequential reads, concurrent streams on the same fd can
  277. * be invalidating each other's readahead state. So we flag the new readahead
  278. * page at (start+size-async_size) with PG_readahead, and use it as readahead
  279. * indicator. The flag won't be set on already cached pages, to avoid the
  280. * readahead-for-nothing fuss, saving pointless page cache lookups.
  281. *
  282. * prev_pos tracks the last visited byte in the _previous_ read request.
  283. * It should be maintained by the caller, and will be used for detecting
  284. * small random reads. Note that the readahead algorithm checks loosely
  285. * for sequential patterns. Hence interleaved reads might be served as
  286. * sequential ones.
  287. *
  288. * There is a special-case: if the first page which the application tries to
  289. * read happens to be the first page of the file, it is assumed that a linear
  290. * read is about to happen and the window is immediately set to the initial size
  291. * based on I/O request size and the max_readahead.
  292. *
  293. * The code ramps up the readahead size aggressively at first, but slow down as
  294. * it approaches max_readhead.
  295. */
  296. /*
  297. * Count contiguously cached pages from @offset-1 to @offset-@max,
  298. * this count is a conservative estimation of
  299. * - length of the sequential read sequence, or
  300. * - thrashing threshold in memory tight systems
  301. */
  302. static pgoff_t count_history_pages(struct address_space *mapping,
  303. struct file_ra_state *ra,
  304. pgoff_t offset, unsigned long max)
  305. {
  306. pgoff_t head;
  307. rcu_read_lock();
  308. head = radix_tree_prev_hole(&mapping->page_tree, offset - 1, max);
  309. rcu_read_unlock();
  310. return offset - 1 - head;
  311. }
  312. /*
  313. * page cache context based read-ahead
  314. */
  315. static int try_context_readahead(struct address_space *mapping,
  316. struct file_ra_state *ra,
  317. pgoff_t offset,
  318. unsigned long req_size,
  319. unsigned long max)
  320. {
  321. pgoff_t size;
  322. size = count_history_pages(mapping, ra, offset, max);
  323. /*
  324. * not enough history pages:
  325. * it could be a random read
  326. */
  327. if (size <= req_size)
  328. return 0;
  329. /*
  330. * starts from beginning of file:
  331. * it is a strong indication of long-run stream (or whole-file-read)
  332. */
  333. if (size >= offset)
  334. size *= 2;
  335. ra->start = offset;
  336. ra->size = min(size + req_size, max);
  337. ra->async_size = 1;
  338. return 1;
  339. }
  340. /*
  341. * A minimal readahead algorithm for trivial sequential/random reads.
  342. */
  343. static unsigned long
  344. ondemand_readahead(struct address_space *mapping,
  345. struct file_ra_state *ra, struct file *filp,
  346. bool hit_readahead_marker, pgoff_t offset,
  347. unsigned long req_size)
  348. {
  349. unsigned long max = max_sane_readahead(ra->ra_pages);
  350. pgoff_t prev_offset;
  351. /*
  352. * start of file
  353. */
  354. if (!offset)
  355. goto initial_readahead;
  356. /*
  357. * It's the expected callback offset, assume sequential access.
  358. * Ramp up sizes, and push forward the readahead window.
  359. */
  360. if ((offset == (ra->start + ra->size - ra->async_size) ||
  361. offset == (ra->start + ra->size))) {
  362. ra->start += ra->size;
  363. ra->size = get_next_ra_size(ra, max);
  364. ra->async_size = ra->size;
  365. goto readit;
  366. }
  367. /*
  368. * Hit a marked page without valid readahead state.
  369. * E.g. interleaved reads.
  370. * Query the pagecache for async_size, which normally equals to
  371. * readahead size. Ramp it up and use it as the new readahead size.
  372. */
  373. if (hit_readahead_marker) {
  374. pgoff_t start;
  375. rcu_read_lock();
  376. start = radix_tree_next_hole(&mapping->page_tree, offset+1,max);
  377. rcu_read_unlock();
  378. if (!start || start - offset > max)
  379. return 0;
  380. ra->start = start;
  381. ra->size = start - offset; /* old async_size */
  382. ra->size += req_size;
  383. ra->size = get_next_ra_size(ra, max);
  384. ra->async_size = ra->size;
  385. goto readit;
  386. }
  387. /*
  388. * oversize read
  389. */
  390. if (req_size > max)
  391. goto initial_readahead;
  392. /*
  393. * sequential cache miss
  394. * trivial case: (offset - prev_offset) == 1
  395. * unaligned reads: (offset - prev_offset) == 0
  396. */
  397. prev_offset = (unsigned long long)ra->prev_pos >> PAGE_CACHE_SHIFT;
  398. if (offset - prev_offset <= 1UL)
  399. goto initial_readahead;
  400. /*
  401. * Query the page cache and look for the traces(cached history pages)
  402. * that a sequential stream would leave behind.
  403. */
  404. if (try_context_readahead(mapping, ra, offset, req_size, max))
  405. goto readit;
  406. /*
  407. * standalone, small random read
  408. * Read as is, and do not pollute the readahead state.
  409. */
  410. return __do_page_cache_readahead(mapping, filp, offset, req_size, 0);
  411. initial_readahead:
  412. ra->start = offset;
  413. ra->size = get_init_ra_size(req_size, max);
  414. ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
  415. readit:
  416. /*
  417. * Will this read hit the readahead marker made by itself?
  418. * If so, trigger the readahead marker hit now, and merge
  419. * the resulted next readahead window into the current one.
  420. */
  421. if (offset == ra->start && ra->size == ra->async_size) {
  422. ra->async_size = get_next_ra_size(ra, max);
  423. ra->size += ra->async_size;
  424. }
  425. return ra_submit(ra, mapping, filp);
  426. }
  427. /**
  428. * page_cache_sync_readahead - generic file readahead
  429. * @mapping: address_space which holds the pagecache and I/O vectors
  430. * @ra: file_ra_state which holds the readahead state
  431. * @filp: passed on to ->readpage() and ->readpages()
  432. * @offset: start offset into @mapping, in pagecache page-sized units
  433. * @req_size: hint: total size of the read which the caller is performing in
  434. * pagecache pages
  435. *
  436. * page_cache_sync_readahead() should be called when a cache miss happened:
  437. * it will submit the read. The readahead logic may decide to piggyback more
  438. * pages onto the read request if access patterns suggest it will improve
  439. * performance.
  440. */
  441. void page_cache_sync_readahead(struct address_space *mapping,
  442. struct file_ra_state *ra, struct file *filp,
  443. pgoff_t offset, unsigned long req_size)
  444. {
  445. /* no read-ahead */
  446. if (!ra->ra_pages)
  447. return;
  448. /* be dumb */
  449. if (filp && (filp->f_mode & FMODE_RANDOM)) {
  450. force_page_cache_readahead(mapping, filp, offset, req_size);
  451. return;
  452. }
  453. /* do read-ahead */
  454. ondemand_readahead(mapping, ra, filp, false, offset, req_size);
  455. }
  456. EXPORT_SYMBOL_GPL(page_cache_sync_readahead);
  457. /**
  458. * page_cache_async_readahead - file readahead for marked pages
  459. * @mapping: address_space which holds the pagecache and I/O vectors
  460. * @ra: file_ra_state which holds the readahead state
  461. * @filp: passed on to ->readpage() and ->readpages()
  462. * @page: the page at @offset which has the PG_readahead flag set
  463. * @offset: start offset into @mapping, in pagecache page-sized units
  464. * @req_size: hint: total size of the read which the caller is performing in
  465. * pagecache pages
  466. *
  467. * page_cache_async_readahead() should be called when a page is used which
  468. * has the PG_readahead flag; this is a marker to suggest that the application
  469. * has used up enough of the readahead window that we should start pulling in
  470. * more pages.
  471. */
  472. void
  473. page_cache_async_readahead(struct address_space *mapping,
  474. struct file_ra_state *ra, struct file *filp,
  475. struct page *page, pgoff_t offset,
  476. unsigned long req_size)
  477. {
  478. /* no read-ahead */
  479. if (!ra->ra_pages)
  480. return;
  481. /*
  482. * Same bit is used for PG_readahead and PG_reclaim.
  483. */
  484. if (PageWriteback(page))
  485. return;
  486. ClearPageReadahead(page);
  487. /*
  488. * Defer asynchronous read-ahead on IO congestion.
  489. */
  490. if (bdi_read_congested(mapping->backing_dev_info))
  491. return;
  492. /* do read-ahead */
  493. ondemand_readahead(mapping, ra, filp, true, offset, req_size);
  494. }
  495. EXPORT_SYMBOL_GPL(page_cache_async_readahead);
  496. static ssize_t
  497. do_readahead(struct address_space *mapping, struct file *filp,
  498. pgoff_t index, unsigned long nr)
  499. {
  500. if (!mapping || !mapping->a_ops)
  501. return -EINVAL;
  502. return force_page_cache_readahead(mapping, filp, index, nr);
  503. }
  504. SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
  505. {
  506. ssize_t ret;
  507. struct fd f;
  508. ret = -EBADF;
  509. f = fdget(fd);
  510. if (f.file) {
  511. if (f.file->f_mode & FMODE_READ) {
  512. struct address_space *mapping = f.file->f_mapping;
  513. pgoff_t start = offset >> PAGE_CACHE_SHIFT;
  514. pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
  515. unsigned long len = end - start + 1;
  516. ret = do_readahead(mapping, f.file, start, len);
  517. }
  518. fdput(f);
  519. }
  520. return ret;
  521. }