filemap.c 63 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472
  1. /*
  2. * linux/mm/filemap.c
  3. *
  4. * Copyright (C) 1994-1999 Linus Torvalds
  5. */
  6. /*
  7. * This file handles the generic file mmap semantics used by
  8. * most "normal" filesystems (but you don't /have/ to use this:
  9. * the NFS filesystem used to do this differently, for example)
  10. */
  11. #include <linux/config.h>
  12. #include <linux/module.h>
  13. #include <linux/slab.h>
  14. #include <linux/compiler.h>
  15. #include <linux/fs.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/aio.h>
  18. #include <linux/capability.h>
  19. #include <linux/kernel_stat.h>
  20. #include <linux/mm.h>
  21. #include <linux/swap.h>
  22. #include <linux/mman.h>
  23. #include <linux/pagemap.h>
  24. #include <linux/file.h>
  25. #include <linux/uio.h>
  26. #include <linux/hash.h>
  27. #include <linux/writeback.h>
  28. #include <linux/pagevec.h>
  29. #include <linux/blkdev.h>
  30. #include <linux/security.h>
  31. #include <linux/syscalls.h>
  32. #include <linux/cpuset.h>
  33. #include "filemap.h"
  34. #include "internal.h"
  35. /*
  36. * FIXME: remove all knowledge of the buffer layer from the core VM
  37. */
  38. #include <linux/buffer_head.h> /* for generic_osync_inode */
  39. #include <asm/mman.h>
  40. static ssize_t
  41. generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
  42. loff_t offset, unsigned long nr_segs);
  43. /*
  44. * Shared mappings implemented 30.11.1994. It's not fully working yet,
  45. * though.
  46. *
  47. * Shared mappings now work. 15.8.1995 Bruno.
  48. *
  49. * finished 'unifying' the page and buffer cache and SMP-threaded the
  50. * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
  51. *
  52. * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
  53. */
  54. /*
  55. * Lock ordering:
  56. *
  57. * ->i_mmap_lock (vmtruncate)
  58. * ->private_lock (__free_pte->__set_page_dirty_buffers)
  59. * ->swap_lock (exclusive_swap_page, others)
  60. * ->mapping->tree_lock
  61. *
  62. * ->i_mutex
  63. * ->i_mmap_lock (truncate->unmap_mapping_range)
  64. *
  65. * ->mmap_sem
  66. * ->i_mmap_lock
  67. * ->page_table_lock or pte_lock (various, mainly in memory.c)
  68. * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
  69. *
  70. * ->mmap_sem
  71. * ->lock_page (access_process_vm)
  72. *
  73. * ->mmap_sem
  74. * ->i_mutex (msync)
  75. *
  76. * ->i_mutex
  77. * ->i_alloc_sem (various)
  78. *
  79. * ->inode_lock
  80. * ->sb_lock (fs/fs-writeback.c)
  81. * ->mapping->tree_lock (__sync_single_inode)
  82. *
  83. * ->i_mmap_lock
  84. * ->anon_vma.lock (vma_adjust)
  85. *
  86. * ->anon_vma.lock
  87. * ->page_table_lock or pte_lock (anon_vma_prepare and various)
  88. *
  89. * ->page_table_lock or pte_lock
  90. * ->swap_lock (try_to_unmap_one)
  91. * ->private_lock (try_to_unmap_one)
  92. * ->tree_lock (try_to_unmap_one)
  93. * ->zone.lru_lock (follow_page->mark_page_accessed)
  94. * ->zone.lru_lock (check_pte_range->isolate_lru_page)
  95. * ->private_lock (page_remove_rmap->set_page_dirty)
  96. * ->tree_lock (page_remove_rmap->set_page_dirty)
  97. * ->inode_lock (page_remove_rmap->set_page_dirty)
  98. * ->inode_lock (zap_pte_range->set_page_dirty)
  99. * ->private_lock (zap_pte_range->__set_page_dirty_buffers)
  100. *
  101. * ->task->proc_lock
  102. * ->dcache_lock (proc_pid_lookup)
  103. */
  104. /*
  105. * Remove a page from the page cache and free it. Caller has to make
  106. * sure the page is locked and that nobody else uses it - or that usage
  107. * is safe. The caller must hold a write_lock on the mapping's tree_lock.
  108. */
  109. void __remove_from_page_cache(struct page *page)
  110. {
  111. struct address_space *mapping = page->mapping;
  112. radix_tree_delete(&mapping->page_tree, page->index);
  113. page->mapping = NULL;
  114. mapping->nrpages--;
  115. pagecache_acct(-1);
  116. }
  117. void remove_from_page_cache(struct page *page)
  118. {
  119. struct address_space *mapping = page->mapping;
  120. BUG_ON(!PageLocked(page));
  121. write_lock_irq(&mapping->tree_lock);
  122. __remove_from_page_cache(page);
  123. write_unlock_irq(&mapping->tree_lock);
  124. }
  125. static int sync_page(void *word)
  126. {
  127. struct address_space *mapping;
  128. struct page *page;
  129. page = container_of((unsigned long *)word, struct page, flags);
  130. /*
  131. * page_mapping() is being called without PG_locked held.
  132. * Some knowledge of the state and use of the page is used to
  133. * reduce the requirements down to a memory barrier.
  134. * The danger here is of a stale page_mapping() return value
  135. * indicating a struct address_space different from the one it's
  136. * associated with when it is associated with one.
  137. * After smp_mb(), it's either the correct page_mapping() for
  138. * the page, or an old page_mapping() and the page's own
  139. * page_mapping() has gone NULL.
  140. * The ->sync_page() address_space operation must tolerate
  141. * page_mapping() going NULL. By an amazing coincidence,
  142. * this comes about because none of the users of the page
  143. * in the ->sync_page() methods make essential use of the
  144. * page_mapping(), merely passing the page down to the backing
  145. * device's unplug functions when it's non-NULL, which in turn
  146. * ignore it for all cases but swap, where only page_private(page) is
  147. * of interest. When page_mapping() does go NULL, the entire
  148. * call stack gracefully ignores the page and returns.
  149. * -- wli
  150. */
  151. smp_mb();
  152. mapping = page_mapping(page);
  153. if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
  154. mapping->a_ops->sync_page(page);
  155. io_schedule();
  156. return 0;
  157. }
  158. /**
  159. * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
  160. * @mapping: address space structure to write
  161. * @start: offset in bytes where the range starts
  162. * @end: offset in bytes where the range ends (inclusive)
  163. * @sync_mode: enable synchronous operation
  164. *
  165. * Start writeback against all of a mapping's dirty pages that lie
  166. * within the byte offsets <start, end> inclusive.
  167. *
  168. * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
  169. * opposed to a regular memory cleansing writeback. The difference between
  170. * these two operations is that if a dirty page/buffer is encountered, it must
  171. * be waited upon, and not just skipped over.
  172. */
  173. int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
  174. loff_t end, int sync_mode)
  175. {
  176. int ret;
  177. struct writeback_control wbc = {
  178. .sync_mode = sync_mode,
  179. .nr_to_write = mapping->nrpages * 2,
  180. .range_start = start,
  181. .range_end = end,
  182. };
  183. if (!mapping_cap_writeback_dirty(mapping))
  184. return 0;
  185. ret = do_writepages(mapping, &wbc);
  186. return ret;
  187. }
  188. static inline int __filemap_fdatawrite(struct address_space *mapping,
  189. int sync_mode)
  190. {
  191. return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
  192. }
  193. int filemap_fdatawrite(struct address_space *mapping)
  194. {
  195. return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
  196. }
  197. EXPORT_SYMBOL(filemap_fdatawrite);
  198. static int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
  199. loff_t end)
  200. {
  201. return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
  202. }
  203. /**
  204. * filemap_flush - mostly a non-blocking flush
  205. * @mapping: target address_space
  206. *
  207. * This is a mostly non-blocking flush. Not suitable for data-integrity
  208. * purposes - I/O may not be started against all dirty pages.
  209. */
  210. int filemap_flush(struct address_space *mapping)
  211. {
  212. return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
  213. }
  214. EXPORT_SYMBOL(filemap_flush);
  215. /**
  216. * wait_on_page_writeback_range - wait for writeback to complete
  217. * @mapping: target address_space
  218. * @start: beginning page index
  219. * @end: ending page index
  220. *
  221. * Wait for writeback to complete against pages indexed by start->end
  222. * inclusive
  223. */
  224. int wait_on_page_writeback_range(struct address_space *mapping,
  225. pgoff_t start, pgoff_t end)
  226. {
  227. struct pagevec pvec;
  228. int nr_pages;
  229. int ret = 0;
  230. pgoff_t index;
  231. if (end < start)
  232. return 0;
  233. pagevec_init(&pvec, 0);
  234. index = start;
  235. while ((index <= end) &&
  236. (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
  237. PAGECACHE_TAG_WRITEBACK,
  238. min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
  239. unsigned i;
  240. for (i = 0; i < nr_pages; i++) {
  241. struct page *page = pvec.pages[i];
  242. /* until radix tree lookup accepts end_index */
  243. if (page->index > end)
  244. continue;
  245. wait_on_page_writeback(page);
  246. if (PageError(page))
  247. ret = -EIO;
  248. }
  249. pagevec_release(&pvec);
  250. cond_resched();
  251. }
  252. /* Check for outstanding write errors */
  253. if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
  254. ret = -ENOSPC;
  255. if (test_and_clear_bit(AS_EIO, &mapping->flags))
  256. ret = -EIO;
  257. return ret;
  258. }
  259. /**
  260. * sync_page_range - write and wait on all pages in the passed range
  261. * @inode: target inode
  262. * @mapping: target address_space
  263. * @pos: beginning offset in pages to write
  264. * @count: number of bytes to write
  265. *
  266. * Write and wait upon all the pages in the passed range. This is a "data
  267. * integrity" operation. It waits upon in-flight writeout before starting and
  268. * waiting upon new writeout. If there was an IO error, return it.
  269. *
  270. * We need to re-take i_mutex during the generic_osync_inode list walk because
  271. * it is otherwise livelockable.
  272. */
  273. int sync_page_range(struct inode *inode, struct address_space *mapping,
  274. loff_t pos, loff_t count)
  275. {
  276. pgoff_t start = pos >> PAGE_CACHE_SHIFT;
  277. pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
  278. int ret;
  279. if (!mapping_cap_writeback_dirty(mapping) || !count)
  280. return 0;
  281. ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
  282. if (ret == 0) {
  283. mutex_lock(&inode->i_mutex);
  284. ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
  285. mutex_unlock(&inode->i_mutex);
  286. }
  287. if (ret == 0)
  288. ret = wait_on_page_writeback_range(mapping, start, end);
  289. return ret;
  290. }
  291. EXPORT_SYMBOL(sync_page_range);
  292. /**
  293. * sync_page_range_nolock
  294. * @inode: target inode
  295. * @mapping: target address_space
  296. * @pos: beginning offset in pages to write
  297. * @count: number of bytes to write
  298. *
  299. * Note: Holding i_mutex across sync_page_range_nolock is not a good idea
  300. * as it forces O_SYNC writers to different parts of the same file
  301. * to be serialised right until io completion.
  302. */
  303. int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
  304. loff_t pos, loff_t count)
  305. {
  306. pgoff_t start = pos >> PAGE_CACHE_SHIFT;
  307. pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
  308. int ret;
  309. if (!mapping_cap_writeback_dirty(mapping) || !count)
  310. return 0;
  311. ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
  312. if (ret == 0)
  313. ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
  314. if (ret == 0)
  315. ret = wait_on_page_writeback_range(mapping, start, end);
  316. return ret;
  317. }
  318. EXPORT_SYMBOL(sync_page_range_nolock);
  319. /**
  320. * filemap_fdatawait - wait for all under-writeback pages to complete
  321. * @mapping: address space structure to wait for
  322. *
  323. * Walk the list of under-writeback pages of the given address space
  324. * and wait for all of them.
  325. */
  326. int filemap_fdatawait(struct address_space *mapping)
  327. {
  328. loff_t i_size = i_size_read(mapping->host);
  329. if (i_size == 0)
  330. return 0;
  331. return wait_on_page_writeback_range(mapping, 0,
  332. (i_size - 1) >> PAGE_CACHE_SHIFT);
  333. }
  334. EXPORT_SYMBOL(filemap_fdatawait);
  335. int filemap_write_and_wait(struct address_space *mapping)
  336. {
  337. int err = 0;
  338. if (mapping->nrpages) {
  339. err = filemap_fdatawrite(mapping);
  340. /*
  341. * Even if the above returned error, the pages may be
  342. * written partially (e.g. -ENOSPC), so we wait for it.
  343. * But the -EIO is special case, it may indicate the worst
  344. * thing (e.g. bug) happened, so we avoid waiting for it.
  345. */
  346. if (err != -EIO) {
  347. int err2 = filemap_fdatawait(mapping);
  348. if (!err)
  349. err = err2;
  350. }
  351. }
  352. return err;
  353. }
  354. EXPORT_SYMBOL(filemap_write_and_wait);
  355. /**
  356. * filemap_write_and_wait_range - write out & wait on a file range
  357. * @mapping: the address_space for the pages
  358. * @lstart: offset in bytes where the range starts
  359. * @lend: offset in bytes where the range ends (inclusive)
  360. *
  361. * Write out and wait upon file offsets lstart->lend, inclusive.
  362. *
  363. * Note that `lend' is inclusive (describes the last byte to be written) so
  364. * that this function can be used to write to the very end-of-file (end = -1).
  365. */
  366. int filemap_write_and_wait_range(struct address_space *mapping,
  367. loff_t lstart, loff_t lend)
  368. {
  369. int err = 0;
  370. if (mapping->nrpages) {
  371. err = __filemap_fdatawrite_range(mapping, lstart, lend,
  372. WB_SYNC_ALL);
  373. /* See comment of filemap_write_and_wait() */
  374. if (err != -EIO) {
  375. int err2 = wait_on_page_writeback_range(mapping,
  376. lstart >> PAGE_CACHE_SHIFT,
  377. lend >> PAGE_CACHE_SHIFT);
  378. if (!err)
  379. err = err2;
  380. }
  381. }
  382. return err;
  383. }
  384. /**
  385. * add_to_page_cache - add newly allocated pagecache pages
  386. * @page: page to add
  387. * @mapping: the page's address_space
  388. * @offset: page index
  389. * @gfp_mask: page allocation mode
  390. *
  391. * This function is used to add newly allocated pagecache pages;
  392. * the page is new, so we can just run SetPageLocked() against it.
  393. * The other page state flags were set by rmqueue().
  394. *
  395. * This function does not add the page to the LRU. The caller must do that.
  396. */
  397. int add_to_page_cache(struct page *page, struct address_space *mapping,
  398. pgoff_t offset, gfp_t gfp_mask)
  399. {
  400. int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
  401. if (error == 0) {
  402. write_lock_irq(&mapping->tree_lock);
  403. error = radix_tree_insert(&mapping->page_tree, offset, page);
  404. if (!error) {
  405. page_cache_get(page);
  406. SetPageLocked(page);
  407. page->mapping = mapping;
  408. page->index = offset;
  409. mapping->nrpages++;
  410. pagecache_acct(1);
  411. }
  412. write_unlock_irq(&mapping->tree_lock);
  413. radix_tree_preload_end();
  414. }
  415. return error;
  416. }
  417. EXPORT_SYMBOL(add_to_page_cache);
  418. int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
  419. pgoff_t offset, gfp_t gfp_mask)
  420. {
  421. int ret = add_to_page_cache(page, mapping, offset, gfp_mask);
  422. if (ret == 0)
  423. lru_cache_add(page);
  424. return ret;
  425. }
  426. #ifdef CONFIG_NUMA
  427. struct page *page_cache_alloc(struct address_space *x)
  428. {
  429. if (cpuset_do_page_mem_spread()) {
  430. int n = cpuset_mem_spread_node();
  431. return alloc_pages_node(n, mapping_gfp_mask(x), 0);
  432. }
  433. return alloc_pages(mapping_gfp_mask(x), 0);
  434. }
  435. EXPORT_SYMBOL(page_cache_alloc);
  436. struct page *page_cache_alloc_cold(struct address_space *x)
  437. {
  438. if (cpuset_do_page_mem_spread()) {
  439. int n = cpuset_mem_spread_node();
  440. return alloc_pages_node(n, mapping_gfp_mask(x)|__GFP_COLD, 0);
  441. }
  442. return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0);
  443. }
  444. EXPORT_SYMBOL(page_cache_alloc_cold);
  445. #endif
  446. /*
  447. * In order to wait for pages to become available there must be
  448. * waitqueues associated with pages. By using a hash table of
  449. * waitqueues where the bucket discipline is to maintain all
  450. * waiters on the same queue and wake all when any of the pages
  451. * become available, and for the woken contexts to check to be
  452. * sure the appropriate page became available, this saves space
  453. * at a cost of "thundering herd" phenomena during rare hash
  454. * collisions.
  455. */
  456. static wait_queue_head_t *page_waitqueue(struct page *page)
  457. {
  458. const struct zone *zone = page_zone(page);
  459. return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
  460. }
  461. static inline void wake_up_page(struct page *page, int bit)
  462. {
  463. __wake_up_bit(page_waitqueue(page), &page->flags, bit);
  464. }
  465. void fastcall wait_on_page_bit(struct page *page, int bit_nr)
  466. {
  467. DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
  468. if (test_bit(bit_nr, &page->flags))
  469. __wait_on_bit(page_waitqueue(page), &wait, sync_page,
  470. TASK_UNINTERRUPTIBLE);
  471. }
  472. EXPORT_SYMBOL(wait_on_page_bit);
  473. /**
  474. * unlock_page - unlock a locked page
  475. * @page: the page
  476. *
  477. * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
  478. * Also wakes sleepers in wait_on_page_writeback() because the wakeup
  479. * mechananism between PageLocked pages and PageWriteback pages is shared.
  480. * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
  481. *
  482. * The first mb is necessary to safely close the critical section opened by the
  483. * TestSetPageLocked(), the second mb is necessary to enforce ordering between
  484. * the clear_bit and the read of the waitqueue (to avoid SMP races with a
  485. * parallel wait_on_page_locked()).
  486. */
  487. void fastcall unlock_page(struct page *page)
  488. {
  489. smp_mb__before_clear_bit();
  490. if (!TestClearPageLocked(page))
  491. BUG();
  492. smp_mb__after_clear_bit();
  493. wake_up_page(page, PG_locked);
  494. }
  495. EXPORT_SYMBOL(unlock_page);
  496. /**
  497. * end_page_writeback - end writeback against a page
  498. * @page: the page
  499. */
  500. void end_page_writeback(struct page *page)
  501. {
  502. if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) {
  503. if (!test_clear_page_writeback(page))
  504. BUG();
  505. }
  506. smp_mb__after_clear_bit();
  507. wake_up_page(page, PG_writeback);
  508. }
  509. EXPORT_SYMBOL(end_page_writeback);
  510. /**
  511. * __lock_page - get a lock on the page, assuming we need to sleep to get it
  512. * @page: the page to lock
  513. *
  514. * Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some
  515. * random driver's requestfn sets TASK_RUNNING, we could busywait. However
  516. * chances are that on the second loop, the block layer's plug list is empty,
  517. * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
  518. */
  519. void fastcall __lock_page(struct page *page)
  520. {
  521. DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
  522. __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page,
  523. TASK_UNINTERRUPTIBLE);
  524. }
  525. EXPORT_SYMBOL(__lock_page);
  526. /**
  527. * find_get_page - find and get a page reference
  528. * @mapping: the address_space to search
  529. * @offset: the page index
  530. *
  531. * A rather lightweight function, finding and getting a reference to a
  532. * hashed page atomically.
  533. */
  534. struct page * find_get_page(struct address_space *mapping, unsigned long offset)
  535. {
  536. struct page *page;
  537. read_lock_irq(&mapping->tree_lock);
  538. page = radix_tree_lookup(&mapping->page_tree, offset);
  539. if (page)
  540. page_cache_get(page);
  541. read_unlock_irq(&mapping->tree_lock);
  542. return page;
  543. }
  544. EXPORT_SYMBOL(find_get_page);
  545. /**
  546. * find_trylock_page - find and lock a page
  547. * @mapping: the address_space to search
  548. * @offset: the page index
  549. *
  550. * Same as find_get_page(), but trylock it instead of incrementing the count.
  551. */
  552. struct page *find_trylock_page(struct address_space *mapping, unsigned long offset)
  553. {
  554. struct page *page;
  555. read_lock_irq(&mapping->tree_lock);
  556. page = radix_tree_lookup(&mapping->page_tree, offset);
  557. if (page && TestSetPageLocked(page))
  558. page = NULL;
  559. read_unlock_irq(&mapping->tree_lock);
  560. return page;
  561. }
  562. EXPORT_SYMBOL(find_trylock_page);
  563. /**
  564. * find_lock_page - locate, pin and lock a pagecache page
  565. * @mapping: the address_space to search
  566. * @offset: the page index
  567. *
  568. * Locates the desired pagecache page, locks it, increments its reference
  569. * count and returns its address.
  570. *
  571. * Returns zero if the page was not present. find_lock_page() may sleep.
  572. */
  573. struct page *find_lock_page(struct address_space *mapping,
  574. unsigned long offset)
  575. {
  576. struct page *page;
  577. read_lock_irq(&mapping->tree_lock);
  578. repeat:
  579. page = radix_tree_lookup(&mapping->page_tree, offset);
  580. if (page) {
  581. page_cache_get(page);
  582. if (TestSetPageLocked(page)) {
  583. read_unlock_irq(&mapping->tree_lock);
  584. __lock_page(page);
  585. read_lock_irq(&mapping->tree_lock);
  586. /* Has the page been truncated while we slept? */
  587. if (unlikely(page->mapping != mapping ||
  588. page->index != offset)) {
  589. unlock_page(page);
  590. page_cache_release(page);
  591. goto repeat;
  592. }
  593. }
  594. }
  595. read_unlock_irq(&mapping->tree_lock);
  596. return page;
  597. }
  598. EXPORT_SYMBOL(find_lock_page);
  599. /**
  600. * find_or_create_page - locate or add a pagecache page
  601. * @mapping: the page's address_space
  602. * @index: the page's index into the mapping
  603. * @gfp_mask: page allocation mode
  604. *
  605. * Locates a page in the pagecache. If the page is not present, a new page
  606. * is allocated using @gfp_mask and is added to the pagecache and to the VM's
  607. * LRU list. The returned page is locked and has its reference count
  608. * incremented.
  609. *
  610. * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic
  611. * allocation!
  612. *
  613. * find_or_create_page() returns the desired page's address, or zero on
  614. * memory exhaustion.
  615. */
  616. struct page *find_or_create_page(struct address_space *mapping,
  617. unsigned long index, gfp_t gfp_mask)
  618. {
  619. struct page *page, *cached_page = NULL;
  620. int err;
  621. repeat:
  622. page = find_lock_page(mapping, index);
  623. if (!page) {
  624. if (!cached_page) {
  625. cached_page = alloc_page(gfp_mask);
  626. if (!cached_page)
  627. return NULL;
  628. }
  629. err = add_to_page_cache_lru(cached_page, mapping,
  630. index, gfp_mask);
  631. if (!err) {
  632. page = cached_page;
  633. cached_page = NULL;
  634. } else if (err == -EEXIST)
  635. goto repeat;
  636. }
  637. if (cached_page)
  638. page_cache_release(cached_page);
  639. return page;
  640. }
  641. EXPORT_SYMBOL(find_or_create_page);
  642. /**
  643. * find_get_pages - gang pagecache lookup
  644. * @mapping: The address_space to search
  645. * @start: The starting page index
  646. * @nr_pages: The maximum number of pages
  647. * @pages: Where the resulting pages are placed
  648. *
  649. * find_get_pages() will search for and return a group of up to
  650. * @nr_pages pages in the mapping. The pages are placed at @pages.
  651. * find_get_pages() takes a reference against the returned pages.
  652. *
  653. * The search returns a group of mapping-contiguous pages with ascending
  654. * indexes. There may be holes in the indices due to not-present pages.
  655. *
  656. * find_get_pages() returns the number of pages which were found.
  657. */
  658. unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
  659. unsigned int nr_pages, struct page **pages)
  660. {
  661. unsigned int i;
  662. unsigned int ret;
  663. read_lock_irq(&mapping->tree_lock);
  664. ret = radix_tree_gang_lookup(&mapping->page_tree,
  665. (void **)pages, start, nr_pages);
  666. for (i = 0; i < ret; i++)
  667. page_cache_get(pages[i]);
  668. read_unlock_irq(&mapping->tree_lock);
  669. return ret;
  670. }
  671. /**
  672. * find_get_pages_contig - gang contiguous pagecache lookup
  673. * @mapping: The address_space to search
  674. * @index: The starting page index
  675. * @nr_pages: The maximum number of pages
  676. * @pages: Where the resulting pages are placed
  677. *
  678. * find_get_pages_contig() works exactly like find_get_pages(), except
  679. * that the returned number of pages are guaranteed to be contiguous.
  680. *
  681. * find_get_pages_contig() returns the number of pages which were found.
  682. */
  683. unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
  684. unsigned int nr_pages, struct page **pages)
  685. {
  686. unsigned int i;
  687. unsigned int ret;
  688. read_lock_irq(&mapping->tree_lock);
  689. ret = radix_tree_gang_lookup(&mapping->page_tree,
  690. (void **)pages, index, nr_pages);
  691. for (i = 0; i < ret; i++) {
  692. if (pages[i]->mapping == NULL || pages[i]->index != index)
  693. break;
  694. page_cache_get(pages[i]);
  695. index++;
  696. }
  697. read_unlock_irq(&mapping->tree_lock);
  698. return i;
  699. }
  700. /**
  701. * find_get_pages_tag - find and return pages that match @tag
  702. * @mapping: the address_space to search
  703. * @index: the starting page index
  704. * @tag: the tag index
  705. * @nr_pages: the maximum number of pages
  706. * @pages: where the resulting pages are placed
  707. *
  708. * Like find_get_pages, except we only return pages which are tagged with
  709. * @tag. We update @index to index the next page for the traversal.
  710. */
  711. unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
  712. int tag, unsigned int nr_pages, struct page **pages)
  713. {
  714. unsigned int i;
  715. unsigned int ret;
  716. read_lock_irq(&mapping->tree_lock);
  717. ret = radix_tree_gang_lookup_tag(&mapping->page_tree,
  718. (void **)pages, *index, nr_pages, tag);
  719. for (i = 0; i < ret; i++)
  720. page_cache_get(pages[i]);
  721. if (ret)
  722. *index = pages[ret - 1]->index + 1;
  723. read_unlock_irq(&mapping->tree_lock);
  724. return ret;
  725. }
  726. /**
  727. * grab_cache_page_nowait - returns locked page at given index in given cache
  728. * @mapping: target address_space
  729. * @index: the page index
  730. *
  731. * Same as grab_cache_page, but do not wait if the page is unavailable.
  732. * This is intended for speculative data generators, where the data can
  733. * be regenerated if the page couldn't be grabbed. This routine should
  734. * be safe to call while holding the lock for another page.
  735. *
  736. * Clear __GFP_FS when allocating the page to avoid recursion into the fs
  737. * and deadlock against the caller's locked page.
  738. */
  739. struct page *
  740. grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
  741. {
  742. struct page *page = find_get_page(mapping, index);
  743. gfp_t gfp_mask;
  744. if (page) {
  745. if (!TestSetPageLocked(page))
  746. return page;
  747. page_cache_release(page);
  748. return NULL;
  749. }
  750. gfp_mask = mapping_gfp_mask(mapping) & ~__GFP_FS;
  751. page = alloc_pages(gfp_mask, 0);
  752. if (page && add_to_page_cache_lru(page, mapping, index, gfp_mask)) {
  753. page_cache_release(page);
  754. page = NULL;
  755. }
  756. return page;
  757. }
  758. EXPORT_SYMBOL(grab_cache_page_nowait);
  759. /*
  760. * CD/DVDs are error prone. When a medium error occurs, the driver may fail
  761. * a _large_ part of the i/o request. Imagine the worst scenario:
  762. *
  763. * ---R__________________________________________B__________
  764. * ^ reading here ^ bad block(assume 4k)
  765. *
  766. * read(R) => miss => readahead(R...B) => media error => frustrating retries
  767. * => failing the whole request => read(R) => read(R+1) =>
  768. * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
  769. * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
  770. * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
  771. *
  772. * It is going insane. Fix it by quickly scaling down the readahead size.
  773. */
  774. static void shrink_readahead_size_eio(struct file *filp,
  775. struct file_ra_state *ra)
  776. {
  777. if (!ra->ra_pages)
  778. return;
  779. ra->ra_pages /= 4;
  780. printk(KERN_WARNING "Reducing readahead size to %luK\n",
  781. ra->ra_pages << (PAGE_CACHE_SHIFT - 10));
  782. }
  783. /**
  784. * do_generic_mapping_read - generic file read routine
  785. * @mapping: address_space to be read
  786. * @_ra: file's readahead state
  787. * @filp: the file to read
  788. * @ppos: current file position
  789. * @desc: read_descriptor
  790. * @actor: read method
  791. *
  792. * This is a generic file read routine, and uses the
  793. * mapping->a_ops->readpage() function for the actual low-level stuff.
  794. *
  795. * This is really ugly. But the goto's actually try to clarify some
  796. * of the logic when it comes to error handling etc.
  797. *
  798. * Note the struct file* is only passed for the use of readpage.
  799. * It may be NULL.
  800. */
  801. void do_generic_mapping_read(struct address_space *mapping,
  802. struct file_ra_state *_ra,
  803. struct file *filp,
  804. loff_t *ppos,
  805. read_descriptor_t *desc,
  806. read_actor_t actor)
  807. {
  808. struct inode *inode = mapping->host;
  809. unsigned long index;
  810. unsigned long end_index;
  811. unsigned long offset;
  812. unsigned long last_index;
  813. unsigned long next_index;
  814. unsigned long prev_index;
  815. loff_t isize;
  816. struct page *cached_page;
  817. int error;
  818. struct file_ra_state ra = *_ra;
  819. cached_page = NULL;
  820. index = *ppos >> PAGE_CACHE_SHIFT;
  821. next_index = index;
  822. prev_index = ra.prev_page;
  823. last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
  824. offset = *ppos & ~PAGE_CACHE_MASK;
  825. isize = i_size_read(inode);
  826. if (!isize)
  827. goto out;
  828. end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
  829. for (;;) {
  830. struct page *page;
  831. unsigned long nr, ret;
  832. /* nr is the maximum number of bytes to copy from this page */
  833. nr = PAGE_CACHE_SIZE;
  834. if (index >= end_index) {
  835. if (index > end_index)
  836. goto out;
  837. nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
  838. if (nr <= offset) {
  839. goto out;
  840. }
  841. }
  842. nr = nr - offset;
  843. cond_resched();
  844. if (index == next_index)
  845. next_index = page_cache_readahead(mapping, &ra, filp,
  846. index, last_index - index);
  847. find_page:
  848. page = find_get_page(mapping, index);
  849. if (unlikely(page == NULL)) {
  850. handle_ra_miss(mapping, &ra, index);
  851. goto no_cached_page;
  852. }
  853. if (!PageUptodate(page))
  854. goto page_not_up_to_date;
  855. page_ok:
  856. /* If users can be writing to this page using arbitrary
  857. * virtual addresses, take care about potential aliasing
  858. * before reading the page on the kernel side.
  859. */
  860. if (mapping_writably_mapped(mapping))
  861. flush_dcache_page(page);
  862. /*
  863. * When (part of) the same page is read multiple times
  864. * in succession, only mark it as accessed the first time.
  865. */
  866. if (prev_index != index)
  867. mark_page_accessed(page);
  868. prev_index = index;
  869. /*
  870. * Ok, we have the page, and it's up-to-date, so
  871. * now we can copy it to user space...
  872. *
  873. * The actor routine returns how many bytes were actually used..
  874. * NOTE! This may not be the same as how much of a user buffer
  875. * we filled up (we may be padding etc), so we can only update
  876. * "pos" here (the actor routine has to update the user buffer
  877. * pointers and the remaining count).
  878. */
  879. ret = actor(desc, page, offset, nr);
  880. offset += ret;
  881. index += offset >> PAGE_CACHE_SHIFT;
  882. offset &= ~PAGE_CACHE_MASK;
  883. page_cache_release(page);
  884. if (ret == nr && desc->count)
  885. continue;
  886. goto out;
  887. page_not_up_to_date:
  888. /* Get exclusive access to the page ... */
  889. lock_page(page);
  890. /* Did it get unhashed before we got the lock? */
  891. if (!page->mapping) {
  892. unlock_page(page);
  893. page_cache_release(page);
  894. continue;
  895. }
  896. /* Did somebody else fill it already? */
  897. if (PageUptodate(page)) {
  898. unlock_page(page);
  899. goto page_ok;
  900. }
  901. readpage:
  902. /* Start the actual read. The read will unlock the page. */
  903. error = mapping->a_ops->readpage(filp, page);
  904. if (unlikely(error)) {
  905. if (error == AOP_TRUNCATED_PAGE) {
  906. page_cache_release(page);
  907. goto find_page;
  908. }
  909. goto readpage_error;
  910. }
  911. if (!PageUptodate(page)) {
  912. lock_page(page);
  913. if (!PageUptodate(page)) {
  914. if (page->mapping == NULL) {
  915. /*
  916. * invalidate_inode_pages got it
  917. */
  918. unlock_page(page);
  919. page_cache_release(page);
  920. goto find_page;
  921. }
  922. unlock_page(page);
  923. error = -EIO;
  924. shrink_readahead_size_eio(filp, &ra);
  925. goto readpage_error;
  926. }
  927. unlock_page(page);
  928. }
  929. /*
  930. * i_size must be checked after we have done ->readpage.
  931. *
  932. * Checking i_size after the readpage allows us to calculate
  933. * the correct value for "nr", which means the zero-filled
  934. * part of the page is not copied back to userspace (unless
  935. * another truncate extends the file - this is desired though).
  936. */
  937. isize = i_size_read(inode);
  938. end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
  939. if (unlikely(!isize || index > end_index)) {
  940. page_cache_release(page);
  941. goto out;
  942. }
  943. /* nr is the maximum number of bytes to copy from this page */
  944. nr = PAGE_CACHE_SIZE;
  945. if (index == end_index) {
  946. nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
  947. if (nr <= offset) {
  948. page_cache_release(page);
  949. goto out;
  950. }
  951. }
  952. nr = nr - offset;
  953. goto page_ok;
  954. readpage_error:
  955. /* UHHUH! A synchronous read error occurred. Report it */
  956. desc->error = error;
  957. page_cache_release(page);
  958. goto out;
  959. no_cached_page:
  960. /*
  961. * Ok, it wasn't cached, so we need to create a new
  962. * page..
  963. */
  964. if (!cached_page) {
  965. cached_page = page_cache_alloc_cold(mapping);
  966. if (!cached_page) {
  967. desc->error = -ENOMEM;
  968. goto out;
  969. }
  970. }
  971. error = add_to_page_cache_lru(cached_page, mapping,
  972. index, GFP_KERNEL);
  973. if (error) {
  974. if (error == -EEXIST)
  975. goto find_page;
  976. desc->error = error;
  977. goto out;
  978. }
  979. page = cached_page;
  980. cached_page = NULL;
  981. goto readpage;
  982. }
  983. out:
  984. *_ra = ra;
  985. *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
  986. if (cached_page)
  987. page_cache_release(cached_page);
  988. if (filp)
  989. file_accessed(filp);
  990. }
  991. EXPORT_SYMBOL(do_generic_mapping_read);
  992. int file_read_actor(read_descriptor_t *desc, struct page *page,
  993. unsigned long offset, unsigned long size)
  994. {
  995. char *kaddr;
  996. unsigned long left, count = desc->count;
  997. if (size > count)
  998. size = count;
  999. /*
  1000. * Faults on the destination of a read are common, so do it before
  1001. * taking the kmap.
  1002. */
  1003. if (!fault_in_pages_writeable(desc->arg.buf, size)) {
  1004. kaddr = kmap_atomic(page, KM_USER0);
  1005. left = __copy_to_user_inatomic(desc->arg.buf,
  1006. kaddr + offset, size);
  1007. kunmap_atomic(kaddr, KM_USER0);
  1008. if (left == 0)
  1009. goto success;
  1010. }
  1011. /* Do it the slow way */
  1012. kaddr = kmap(page);
  1013. left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
  1014. kunmap(page);
  1015. if (left) {
  1016. size -= left;
  1017. desc->error = -EFAULT;
  1018. }
  1019. success:
  1020. desc->count = count - size;
  1021. desc->written += size;
  1022. desc->arg.buf += size;
  1023. return size;
  1024. }
  1025. /**
  1026. * __generic_file_aio_read - generic filesystem read routine
  1027. * @iocb: kernel I/O control block
  1028. * @iov: io vector request
  1029. * @nr_segs: number of segments in the iovec
  1030. * @ppos: current file position
  1031. *
  1032. * This is the "read()" routine for all filesystems
  1033. * that can use the page cache directly.
  1034. */
  1035. ssize_t
  1036. __generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
  1037. unsigned long nr_segs, loff_t *ppos)
  1038. {
  1039. struct file *filp = iocb->ki_filp;
  1040. ssize_t retval;
  1041. unsigned long seg;
  1042. size_t count;
  1043. count = 0;
  1044. for (seg = 0; seg < nr_segs; seg++) {
  1045. const struct iovec *iv = &iov[seg];
  1046. /*
  1047. * If any segment has a negative length, or the cumulative
  1048. * length ever wraps negative then return -EINVAL.
  1049. */
  1050. count += iv->iov_len;
  1051. if (unlikely((ssize_t)(count|iv->iov_len) < 0))
  1052. return -EINVAL;
  1053. if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len))
  1054. continue;
  1055. if (seg == 0)
  1056. return -EFAULT;
  1057. nr_segs = seg;
  1058. count -= iv->iov_len; /* This segment is no good */
  1059. break;
  1060. }
  1061. /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
  1062. if (filp->f_flags & O_DIRECT) {
  1063. loff_t pos = *ppos, size;
  1064. struct address_space *mapping;
  1065. struct inode *inode;
  1066. mapping = filp->f_mapping;
  1067. inode = mapping->host;
  1068. retval = 0;
  1069. if (!count)
  1070. goto out; /* skip atime */
  1071. size = i_size_read(inode);
  1072. if (pos < size) {
  1073. retval = generic_file_direct_IO(READ, iocb,
  1074. iov, pos, nr_segs);
  1075. if (retval > 0 && !is_sync_kiocb(iocb))
  1076. retval = -EIOCBQUEUED;
  1077. if (retval > 0)
  1078. *ppos = pos + retval;
  1079. }
  1080. file_accessed(filp);
  1081. goto out;
  1082. }
  1083. retval = 0;
  1084. if (count) {
  1085. for (seg = 0; seg < nr_segs; seg++) {
  1086. read_descriptor_t desc;
  1087. desc.written = 0;
  1088. desc.arg.buf = iov[seg].iov_base;
  1089. desc.count = iov[seg].iov_len;
  1090. if (desc.count == 0)
  1091. continue;
  1092. desc.error = 0;
  1093. do_generic_file_read(filp,ppos,&desc,file_read_actor);
  1094. retval += desc.written;
  1095. if (desc.error) {
  1096. retval = retval ?: desc.error;
  1097. break;
  1098. }
  1099. }
  1100. }
  1101. out:
  1102. return retval;
  1103. }
  1104. EXPORT_SYMBOL(__generic_file_aio_read);
  1105. ssize_t
  1106. generic_file_aio_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
  1107. {
  1108. struct iovec local_iov = { .iov_base = buf, .iov_len = count };
  1109. BUG_ON(iocb->ki_pos != pos);
  1110. return __generic_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos);
  1111. }
  1112. EXPORT_SYMBOL(generic_file_aio_read);
  1113. ssize_t
  1114. generic_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
  1115. {
  1116. struct iovec local_iov = { .iov_base = buf, .iov_len = count };
  1117. struct kiocb kiocb;
  1118. ssize_t ret;
  1119. init_sync_kiocb(&kiocb, filp);
  1120. ret = __generic_file_aio_read(&kiocb, &local_iov, 1, ppos);
  1121. if (-EIOCBQUEUED == ret)
  1122. ret = wait_on_sync_kiocb(&kiocb);
  1123. return ret;
  1124. }
  1125. EXPORT_SYMBOL(generic_file_read);
  1126. int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size)
  1127. {
  1128. ssize_t written;
  1129. unsigned long count = desc->count;
  1130. struct file *file = desc->arg.data;
  1131. if (size > count)
  1132. size = count;
  1133. written = file->f_op->sendpage(file, page, offset,
  1134. size, &file->f_pos, size<count);
  1135. if (written < 0) {
  1136. desc->error = written;
  1137. written = 0;
  1138. }
  1139. desc->count = count - written;
  1140. desc->written += written;
  1141. return written;
  1142. }
  1143. ssize_t generic_file_sendfile(struct file *in_file, loff_t *ppos,
  1144. size_t count, read_actor_t actor, void *target)
  1145. {
  1146. read_descriptor_t desc;
  1147. if (!count)
  1148. return 0;
  1149. desc.written = 0;
  1150. desc.count = count;
  1151. desc.arg.data = target;
  1152. desc.error = 0;
  1153. do_generic_file_read(in_file, ppos, &desc, actor);
  1154. if (desc.written)
  1155. return desc.written;
  1156. return desc.error;
  1157. }
  1158. EXPORT_SYMBOL(generic_file_sendfile);
  1159. static ssize_t
  1160. do_readahead(struct address_space *mapping, struct file *filp,
  1161. unsigned long index, unsigned long nr)
  1162. {
  1163. if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
  1164. return -EINVAL;
  1165. force_page_cache_readahead(mapping, filp, index,
  1166. max_sane_readahead(nr));
  1167. return 0;
  1168. }
  1169. asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
  1170. {
  1171. ssize_t ret;
  1172. struct file *file;
  1173. ret = -EBADF;
  1174. file = fget(fd);
  1175. if (file) {
  1176. if (file->f_mode & FMODE_READ) {
  1177. struct address_space *mapping = file->f_mapping;
  1178. unsigned long start = offset >> PAGE_CACHE_SHIFT;
  1179. unsigned long end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
  1180. unsigned long len = end - start + 1;
  1181. ret = do_readahead(mapping, file, start, len);
  1182. }
  1183. fput(file);
  1184. }
  1185. return ret;
  1186. }
  1187. #ifdef CONFIG_MMU
  1188. static int FASTCALL(page_cache_read(struct file * file, unsigned long offset));
  1189. /**
  1190. * page_cache_read - adds requested page to the page cache if not already there
  1191. * @file: file to read
  1192. * @offset: page index
  1193. *
  1194. * This adds the requested page to the page cache if it isn't already there,
  1195. * and schedules an I/O to read in its contents from disk.
  1196. */
  1197. static int fastcall page_cache_read(struct file * file, unsigned long offset)
  1198. {
  1199. struct address_space *mapping = file->f_mapping;
  1200. struct page *page;
  1201. int ret;
  1202. do {
  1203. page = page_cache_alloc_cold(mapping);
  1204. if (!page)
  1205. return -ENOMEM;
  1206. ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
  1207. if (ret == 0)
  1208. ret = mapping->a_ops->readpage(file, page);
  1209. else if (ret == -EEXIST)
  1210. ret = 0; /* losing race to add is OK */
  1211. page_cache_release(page);
  1212. } while (ret == AOP_TRUNCATED_PAGE);
  1213. return ret;
  1214. }
  1215. #define MMAP_LOTSAMISS (100)
  1216. /**
  1217. * filemap_nopage - read in file data for page fault handling
  1218. * @area: the applicable vm_area
  1219. * @address: target address to read in
  1220. * @type: returned with VM_FAULT_{MINOR,MAJOR} if not %NULL
  1221. *
  1222. * filemap_nopage() is invoked via the vma operations vector for a
  1223. * mapped memory region to read in file data during a page fault.
  1224. *
  1225. * The goto's are kind of ugly, but this streamlines the normal case of having
  1226. * it in the page cache, and handles the special cases reasonably without
  1227. * having a lot of duplicated code.
  1228. */
  1229. struct page *filemap_nopage(struct vm_area_struct *area,
  1230. unsigned long address, int *type)
  1231. {
  1232. int error;
  1233. struct file *file = area->vm_file;
  1234. struct address_space *mapping = file->f_mapping;
  1235. struct file_ra_state *ra = &file->f_ra;
  1236. struct inode *inode = mapping->host;
  1237. struct page *page;
  1238. unsigned long size, pgoff;
  1239. int did_readaround = 0, majmin = VM_FAULT_MINOR;
  1240. pgoff = ((address-area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff;
  1241. retry_all:
  1242. size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  1243. if (pgoff >= size)
  1244. goto outside_data_content;
  1245. /* If we don't want any read-ahead, don't bother */
  1246. if (VM_RandomReadHint(area))
  1247. goto no_cached_page;
  1248. /*
  1249. * The readahead code wants to be told about each and every page
  1250. * so it can build and shrink its windows appropriately
  1251. *
  1252. * For sequential accesses, we use the generic readahead logic.
  1253. */
  1254. if (VM_SequentialReadHint(area))
  1255. page_cache_readahead(mapping, ra, file, pgoff, 1);
  1256. /*
  1257. * Do we have something in the page cache already?
  1258. */
  1259. retry_find:
  1260. page = find_get_page(mapping, pgoff);
  1261. if (!page) {
  1262. unsigned long ra_pages;
  1263. if (VM_SequentialReadHint(area)) {
  1264. handle_ra_miss(mapping, ra, pgoff);
  1265. goto no_cached_page;
  1266. }
  1267. ra->mmap_miss++;
  1268. /*
  1269. * Do we miss much more than hit in this file? If so,
  1270. * stop bothering with read-ahead. It will only hurt.
  1271. */
  1272. if (ra->mmap_miss > ra->mmap_hit + MMAP_LOTSAMISS)
  1273. goto no_cached_page;
  1274. /*
  1275. * To keep the pgmajfault counter straight, we need to
  1276. * check did_readaround, as this is an inner loop.
  1277. */
  1278. if (!did_readaround) {
  1279. majmin = VM_FAULT_MAJOR;
  1280. inc_page_state(pgmajfault);
  1281. }
  1282. did_readaround = 1;
  1283. ra_pages = max_sane_readahead(file->f_ra.ra_pages);
  1284. if (ra_pages) {
  1285. pgoff_t start = 0;
  1286. if (pgoff > ra_pages / 2)
  1287. start = pgoff - ra_pages / 2;
  1288. do_page_cache_readahead(mapping, file, start, ra_pages);
  1289. }
  1290. page = find_get_page(mapping, pgoff);
  1291. if (!page)
  1292. goto no_cached_page;
  1293. }
  1294. if (!did_readaround)
  1295. ra->mmap_hit++;
  1296. /*
  1297. * Ok, found a page in the page cache, now we need to check
  1298. * that it's up-to-date.
  1299. */
  1300. if (!PageUptodate(page))
  1301. goto page_not_uptodate;
  1302. success:
  1303. /*
  1304. * Found the page and have a reference on it.
  1305. */
  1306. mark_page_accessed(page);
  1307. if (type)
  1308. *type = majmin;
  1309. return page;
  1310. outside_data_content:
  1311. /*
  1312. * An external ptracer can access pages that normally aren't
  1313. * accessible..
  1314. */
  1315. if (area->vm_mm == current->mm)
  1316. return NULL;
  1317. /* Fall through to the non-read-ahead case */
  1318. no_cached_page:
  1319. /*
  1320. * We're only likely to ever get here if MADV_RANDOM is in
  1321. * effect.
  1322. */
  1323. error = page_cache_read(file, pgoff);
  1324. grab_swap_token();
  1325. /*
  1326. * The page we want has now been added to the page cache.
  1327. * In the unlikely event that someone removed it in the
  1328. * meantime, we'll just come back here and read it again.
  1329. */
  1330. if (error >= 0)
  1331. goto retry_find;
  1332. /*
  1333. * An error return from page_cache_read can result if the
  1334. * system is low on memory, or a problem occurs while trying
  1335. * to schedule I/O.
  1336. */
  1337. if (error == -ENOMEM)
  1338. return NOPAGE_OOM;
  1339. return NULL;
  1340. page_not_uptodate:
  1341. if (!did_readaround) {
  1342. majmin = VM_FAULT_MAJOR;
  1343. inc_page_state(pgmajfault);
  1344. }
  1345. lock_page(page);
  1346. /* Did it get unhashed while we waited for it? */
  1347. if (!page->mapping) {
  1348. unlock_page(page);
  1349. page_cache_release(page);
  1350. goto retry_all;
  1351. }
  1352. /* Did somebody else get it up-to-date? */
  1353. if (PageUptodate(page)) {
  1354. unlock_page(page);
  1355. goto success;
  1356. }
  1357. error = mapping->a_ops->readpage(file, page);
  1358. if (!error) {
  1359. wait_on_page_locked(page);
  1360. if (PageUptodate(page))
  1361. goto success;
  1362. } else if (error == AOP_TRUNCATED_PAGE) {
  1363. page_cache_release(page);
  1364. goto retry_find;
  1365. }
  1366. /*
  1367. * Umm, take care of errors if the page isn't up-to-date.
  1368. * Try to re-read it _once_. We do this synchronously,
  1369. * because there really aren't any performance issues here
  1370. * and we need to check for errors.
  1371. */
  1372. lock_page(page);
  1373. /* Somebody truncated the page on us? */
  1374. if (!page->mapping) {
  1375. unlock_page(page);
  1376. page_cache_release(page);
  1377. goto retry_all;
  1378. }
  1379. /* Somebody else successfully read it in? */
  1380. if (PageUptodate(page)) {
  1381. unlock_page(page);
  1382. goto success;
  1383. }
  1384. ClearPageError(page);
  1385. error = mapping->a_ops->readpage(file, page);
  1386. if (!error) {
  1387. wait_on_page_locked(page);
  1388. if (PageUptodate(page))
  1389. goto success;
  1390. } else if (error == AOP_TRUNCATED_PAGE) {
  1391. page_cache_release(page);
  1392. goto retry_find;
  1393. }
  1394. /*
  1395. * Things didn't work out. Return zero to tell the
  1396. * mm layer so, possibly freeing the page cache page first.
  1397. */
  1398. shrink_readahead_size_eio(file, ra);
  1399. page_cache_release(page);
  1400. return NULL;
  1401. }
  1402. EXPORT_SYMBOL(filemap_nopage);
  1403. static struct page * filemap_getpage(struct file *file, unsigned long pgoff,
  1404. int nonblock)
  1405. {
  1406. struct address_space *mapping = file->f_mapping;
  1407. struct page *page;
  1408. int error;
  1409. /*
  1410. * Do we have something in the page cache already?
  1411. */
  1412. retry_find:
  1413. page = find_get_page(mapping, pgoff);
  1414. if (!page) {
  1415. if (nonblock)
  1416. return NULL;
  1417. goto no_cached_page;
  1418. }
  1419. /*
  1420. * Ok, found a page in the page cache, now we need to check
  1421. * that it's up-to-date.
  1422. */
  1423. if (!PageUptodate(page)) {
  1424. if (nonblock) {
  1425. page_cache_release(page);
  1426. return NULL;
  1427. }
  1428. goto page_not_uptodate;
  1429. }
  1430. success:
  1431. /*
  1432. * Found the page and have a reference on it.
  1433. */
  1434. mark_page_accessed(page);
  1435. return page;
  1436. no_cached_page:
  1437. error = page_cache_read(file, pgoff);
  1438. /*
  1439. * The page we want has now been added to the page cache.
  1440. * In the unlikely event that someone removed it in the
  1441. * meantime, we'll just come back here and read it again.
  1442. */
  1443. if (error >= 0)
  1444. goto retry_find;
  1445. /*
  1446. * An error return from page_cache_read can result if the
  1447. * system is low on memory, or a problem occurs while trying
  1448. * to schedule I/O.
  1449. */
  1450. return NULL;
  1451. page_not_uptodate:
  1452. lock_page(page);
  1453. /* Did it get unhashed while we waited for it? */
  1454. if (!page->mapping) {
  1455. unlock_page(page);
  1456. goto err;
  1457. }
  1458. /* Did somebody else get it up-to-date? */
  1459. if (PageUptodate(page)) {
  1460. unlock_page(page);
  1461. goto success;
  1462. }
  1463. error = mapping->a_ops->readpage(file, page);
  1464. if (!error) {
  1465. wait_on_page_locked(page);
  1466. if (PageUptodate(page))
  1467. goto success;
  1468. } else if (error == AOP_TRUNCATED_PAGE) {
  1469. page_cache_release(page);
  1470. goto retry_find;
  1471. }
  1472. /*
  1473. * Umm, take care of errors if the page isn't up-to-date.
  1474. * Try to re-read it _once_. We do this synchronously,
  1475. * because there really aren't any performance issues here
  1476. * and we need to check for errors.
  1477. */
  1478. lock_page(page);
  1479. /* Somebody truncated the page on us? */
  1480. if (!page->mapping) {
  1481. unlock_page(page);
  1482. goto err;
  1483. }
  1484. /* Somebody else successfully read it in? */
  1485. if (PageUptodate(page)) {
  1486. unlock_page(page);
  1487. goto success;
  1488. }
  1489. ClearPageError(page);
  1490. error = mapping->a_ops->readpage(file, page);
  1491. if (!error) {
  1492. wait_on_page_locked(page);
  1493. if (PageUptodate(page))
  1494. goto success;
  1495. } else if (error == AOP_TRUNCATED_PAGE) {
  1496. page_cache_release(page);
  1497. goto retry_find;
  1498. }
  1499. /*
  1500. * Things didn't work out. Return zero to tell the
  1501. * mm layer so, possibly freeing the page cache page first.
  1502. */
  1503. err:
  1504. page_cache_release(page);
  1505. return NULL;
  1506. }
  1507. int filemap_populate(struct vm_area_struct *vma, unsigned long addr,
  1508. unsigned long len, pgprot_t prot, unsigned long pgoff,
  1509. int nonblock)
  1510. {
  1511. struct file *file = vma->vm_file;
  1512. struct address_space *mapping = file->f_mapping;
  1513. struct inode *inode = mapping->host;
  1514. unsigned long size;
  1515. struct mm_struct *mm = vma->vm_mm;
  1516. struct page *page;
  1517. int err;
  1518. if (!nonblock)
  1519. force_page_cache_readahead(mapping, vma->vm_file,
  1520. pgoff, len >> PAGE_CACHE_SHIFT);
  1521. repeat:
  1522. size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  1523. if (pgoff + (len >> PAGE_CACHE_SHIFT) > size)
  1524. return -EINVAL;
  1525. page = filemap_getpage(file, pgoff, nonblock);
  1526. /* XXX: This is wrong, a filesystem I/O error may have happened. Fix that as
  1527. * done in shmem_populate calling shmem_getpage */
  1528. if (!page && !nonblock)
  1529. return -ENOMEM;
  1530. if (page) {
  1531. err = install_page(mm, vma, addr, page, prot);
  1532. if (err) {
  1533. page_cache_release(page);
  1534. return err;
  1535. }
  1536. } else if (vma->vm_flags & VM_NONLINEAR) {
  1537. /* No page was found just because we can't read it in now (being
  1538. * here implies nonblock != 0), but the page may exist, so set
  1539. * the PTE to fault it in later. */
  1540. err = install_file_pte(mm, vma, addr, pgoff, prot);
  1541. if (err)
  1542. return err;
  1543. }
  1544. len -= PAGE_SIZE;
  1545. addr += PAGE_SIZE;
  1546. pgoff++;
  1547. if (len)
  1548. goto repeat;
  1549. return 0;
  1550. }
  1551. EXPORT_SYMBOL(filemap_populate);
  1552. struct vm_operations_struct generic_file_vm_ops = {
  1553. .nopage = filemap_nopage,
  1554. .populate = filemap_populate,
  1555. };
  1556. /* This is used for a general mmap of a disk file */
  1557. int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
  1558. {
  1559. struct address_space *mapping = file->f_mapping;
  1560. if (!mapping->a_ops->readpage)
  1561. return -ENOEXEC;
  1562. file_accessed(file);
  1563. vma->vm_ops = &generic_file_vm_ops;
  1564. return 0;
  1565. }
  1566. /*
  1567. * This is for filesystems which do not implement ->writepage.
  1568. */
  1569. int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
  1570. {
  1571. if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
  1572. return -EINVAL;
  1573. return generic_file_mmap(file, vma);
  1574. }
  1575. #else
  1576. int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
  1577. {
  1578. return -ENOSYS;
  1579. }
  1580. int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
  1581. {
  1582. return -ENOSYS;
  1583. }
  1584. #endif /* CONFIG_MMU */
  1585. EXPORT_SYMBOL(generic_file_mmap);
  1586. EXPORT_SYMBOL(generic_file_readonly_mmap);
  1587. static inline struct page *__read_cache_page(struct address_space *mapping,
  1588. unsigned long index,
  1589. int (*filler)(void *,struct page*),
  1590. void *data)
  1591. {
  1592. struct page *page, *cached_page = NULL;
  1593. int err;
  1594. repeat:
  1595. page = find_get_page(mapping, index);
  1596. if (!page) {
  1597. if (!cached_page) {
  1598. cached_page = page_cache_alloc_cold(mapping);
  1599. if (!cached_page)
  1600. return ERR_PTR(-ENOMEM);
  1601. }
  1602. err = add_to_page_cache_lru(cached_page, mapping,
  1603. index, GFP_KERNEL);
  1604. if (err == -EEXIST)
  1605. goto repeat;
  1606. if (err < 0) {
  1607. /* Presumably ENOMEM for radix tree node */
  1608. page_cache_release(cached_page);
  1609. return ERR_PTR(err);
  1610. }
  1611. page = cached_page;
  1612. cached_page = NULL;
  1613. err = filler(data, page);
  1614. if (err < 0) {
  1615. page_cache_release(page);
  1616. page = ERR_PTR(err);
  1617. }
  1618. }
  1619. if (cached_page)
  1620. page_cache_release(cached_page);
  1621. return page;
  1622. }
  1623. /**
  1624. * read_cache_page - read into page cache, fill it if needed
  1625. * @mapping: the page's address_space
  1626. * @index: the page index
  1627. * @filler: function to perform the read
  1628. * @data: destination for read data
  1629. *
  1630. * Read into the page cache. If a page already exists,
  1631. * and PageUptodate() is not set, try to fill the page.
  1632. */
  1633. struct page *read_cache_page(struct address_space *mapping,
  1634. unsigned long index,
  1635. int (*filler)(void *,struct page*),
  1636. void *data)
  1637. {
  1638. struct page *page;
  1639. int err;
  1640. retry:
  1641. page = __read_cache_page(mapping, index, filler, data);
  1642. if (IS_ERR(page))
  1643. goto out;
  1644. mark_page_accessed(page);
  1645. if (PageUptodate(page))
  1646. goto out;
  1647. lock_page(page);
  1648. if (!page->mapping) {
  1649. unlock_page(page);
  1650. page_cache_release(page);
  1651. goto retry;
  1652. }
  1653. if (PageUptodate(page)) {
  1654. unlock_page(page);
  1655. goto out;
  1656. }
  1657. err = filler(data, page);
  1658. if (err < 0) {
  1659. page_cache_release(page);
  1660. page = ERR_PTR(err);
  1661. }
  1662. out:
  1663. return page;
  1664. }
  1665. EXPORT_SYMBOL(read_cache_page);
  1666. /*
  1667. * If the page was newly created, increment its refcount and add it to the
  1668. * caller's lru-buffering pagevec. This function is specifically for
  1669. * generic_file_write().
  1670. */
  1671. static inline struct page *
  1672. __grab_cache_page(struct address_space *mapping, unsigned long index,
  1673. struct page **cached_page, struct pagevec *lru_pvec)
  1674. {
  1675. int err;
  1676. struct page *page;
  1677. repeat:
  1678. page = find_lock_page(mapping, index);
  1679. if (!page) {
  1680. if (!*cached_page) {
  1681. *cached_page = page_cache_alloc(mapping);
  1682. if (!*cached_page)
  1683. return NULL;
  1684. }
  1685. err = add_to_page_cache(*cached_page, mapping,
  1686. index, GFP_KERNEL);
  1687. if (err == -EEXIST)
  1688. goto repeat;
  1689. if (err == 0) {
  1690. page = *cached_page;
  1691. page_cache_get(page);
  1692. if (!pagevec_add(lru_pvec, page))
  1693. __pagevec_lru_add(lru_pvec);
  1694. *cached_page = NULL;
  1695. }
  1696. }
  1697. return page;
  1698. }
  1699. /*
  1700. * The logic we want is
  1701. *
  1702. * if suid or (sgid and xgrp)
  1703. * remove privs
  1704. */
  1705. int remove_suid(struct dentry *dentry)
  1706. {
  1707. mode_t mode = dentry->d_inode->i_mode;
  1708. int kill = 0;
  1709. int result = 0;
  1710. /* suid always must be killed */
  1711. if (unlikely(mode & S_ISUID))
  1712. kill = ATTR_KILL_SUID;
  1713. /*
  1714. * sgid without any exec bits is just a mandatory locking mark; leave
  1715. * it alone. If some exec bits are set, it's a real sgid; kill it.
  1716. */
  1717. if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
  1718. kill |= ATTR_KILL_SGID;
  1719. if (unlikely(kill && !capable(CAP_FSETID))) {
  1720. struct iattr newattrs;
  1721. newattrs.ia_valid = ATTR_FORCE | kill;
  1722. result = notify_change(dentry, &newattrs);
  1723. }
  1724. return result;
  1725. }
  1726. EXPORT_SYMBOL(remove_suid);
  1727. size_t
  1728. __filemap_copy_from_user_iovec_inatomic(char *vaddr,
  1729. const struct iovec *iov, size_t base, size_t bytes)
  1730. {
  1731. size_t copied = 0, left = 0;
  1732. while (bytes) {
  1733. char __user *buf = iov->iov_base + base;
  1734. int copy = min(bytes, iov->iov_len - base);
  1735. base = 0;
  1736. left = __copy_from_user_inatomic_nocache(vaddr, buf, copy);
  1737. copied += copy;
  1738. bytes -= copy;
  1739. vaddr += copy;
  1740. iov++;
  1741. if (unlikely(left))
  1742. break;
  1743. }
  1744. return copied - left;
  1745. }
  1746. /*
  1747. * Performs necessary checks before doing a write
  1748. *
  1749. * Can adjust writing position or amount of bytes to write.
  1750. * Returns appropriate error code that caller should return or
  1751. * zero in case that write should be allowed.
  1752. */
  1753. inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
  1754. {
  1755. struct inode *inode = file->f_mapping->host;
  1756. unsigned long limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
  1757. if (unlikely(*pos < 0))
  1758. return -EINVAL;
  1759. if (!isblk) {
  1760. /* FIXME: this is for backwards compatibility with 2.4 */
  1761. if (file->f_flags & O_APPEND)
  1762. *pos = i_size_read(inode);
  1763. if (limit != RLIM_INFINITY) {
  1764. if (*pos >= limit) {
  1765. send_sig(SIGXFSZ, current, 0);
  1766. return -EFBIG;
  1767. }
  1768. if (*count > limit - (typeof(limit))*pos) {
  1769. *count = limit - (typeof(limit))*pos;
  1770. }
  1771. }
  1772. }
  1773. /*
  1774. * LFS rule
  1775. */
  1776. if (unlikely(*pos + *count > MAX_NON_LFS &&
  1777. !(file->f_flags & O_LARGEFILE))) {
  1778. if (*pos >= MAX_NON_LFS) {
  1779. send_sig(SIGXFSZ, current, 0);
  1780. return -EFBIG;
  1781. }
  1782. if (*count > MAX_NON_LFS - (unsigned long)*pos) {
  1783. *count = MAX_NON_LFS - (unsigned long)*pos;
  1784. }
  1785. }
  1786. /*
  1787. * Are we about to exceed the fs block limit ?
  1788. *
  1789. * If we have written data it becomes a short write. If we have
  1790. * exceeded without writing data we send a signal and return EFBIG.
  1791. * Linus frestrict idea will clean these up nicely..
  1792. */
  1793. if (likely(!isblk)) {
  1794. if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
  1795. if (*count || *pos > inode->i_sb->s_maxbytes) {
  1796. send_sig(SIGXFSZ, current, 0);
  1797. return -EFBIG;
  1798. }
  1799. /* zero-length writes at ->s_maxbytes are OK */
  1800. }
  1801. if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
  1802. *count = inode->i_sb->s_maxbytes - *pos;
  1803. } else {
  1804. loff_t isize;
  1805. if (bdev_read_only(I_BDEV(inode)))
  1806. return -EPERM;
  1807. isize = i_size_read(inode);
  1808. if (*pos >= isize) {
  1809. if (*count || *pos > isize)
  1810. return -ENOSPC;
  1811. }
  1812. if (*pos + *count > isize)
  1813. *count = isize - *pos;
  1814. }
  1815. return 0;
  1816. }
  1817. EXPORT_SYMBOL(generic_write_checks);
  1818. ssize_t
  1819. generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
  1820. unsigned long *nr_segs, loff_t pos, loff_t *ppos,
  1821. size_t count, size_t ocount)
  1822. {
  1823. struct file *file = iocb->ki_filp;
  1824. struct address_space *mapping = file->f_mapping;
  1825. struct inode *inode = mapping->host;
  1826. ssize_t written;
  1827. if (count != ocount)
  1828. *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
  1829. written = generic_file_direct_IO(WRITE, iocb, iov, pos, *nr_segs);
  1830. if (written > 0) {
  1831. loff_t end = pos + written;
  1832. if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
  1833. i_size_write(inode, end);
  1834. mark_inode_dirty(inode);
  1835. }
  1836. *ppos = end;
  1837. }
  1838. /*
  1839. * Sync the fs metadata but not the minor inode changes and
  1840. * of course not the data as we did direct DMA for the IO.
  1841. * i_mutex is held, which protects generic_osync_inode() from
  1842. * livelocking.
  1843. */
  1844. if (written >= 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
  1845. int err = generic_osync_inode(inode, mapping, OSYNC_METADATA);
  1846. if (err < 0)
  1847. written = err;
  1848. }
  1849. if (written == count && !is_sync_kiocb(iocb))
  1850. written = -EIOCBQUEUED;
  1851. return written;
  1852. }
  1853. EXPORT_SYMBOL(generic_file_direct_write);
  1854. ssize_t
  1855. generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
  1856. unsigned long nr_segs, loff_t pos, loff_t *ppos,
  1857. size_t count, ssize_t written)
  1858. {
  1859. struct file *file = iocb->ki_filp;
  1860. struct address_space * mapping = file->f_mapping;
  1861. struct address_space_operations *a_ops = mapping->a_ops;
  1862. struct inode *inode = mapping->host;
  1863. long status = 0;
  1864. struct page *page;
  1865. struct page *cached_page = NULL;
  1866. size_t bytes;
  1867. struct pagevec lru_pvec;
  1868. const struct iovec *cur_iov = iov; /* current iovec */
  1869. size_t iov_base = 0; /* offset in the current iovec */
  1870. char __user *buf;
  1871. pagevec_init(&lru_pvec, 0);
  1872. /*
  1873. * handle partial DIO write. Adjust cur_iov if needed.
  1874. */
  1875. if (likely(nr_segs == 1))
  1876. buf = iov->iov_base + written;
  1877. else {
  1878. filemap_set_next_iovec(&cur_iov, &iov_base, written);
  1879. buf = cur_iov->iov_base + iov_base;
  1880. }
  1881. do {
  1882. unsigned long index;
  1883. unsigned long offset;
  1884. size_t copied;
  1885. offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
  1886. index = pos >> PAGE_CACHE_SHIFT;
  1887. bytes = PAGE_CACHE_SIZE - offset;
  1888. /* Limit the size of the copy to the caller's write size */
  1889. bytes = min(bytes, count);
  1890. /*
  1891. * Limit the size of the copy to that of the current segment,
  1892. * because fault_in_pages_readable() doesn't know how to walk
  1893. * segments.
  1894. */
  1895. bytes = min(bytes, cur_iov->iov_len - iov_base);
  1896. /*
  1897. * Bring in the user page that we will copy from _first_.
  1898. * Otherwise there's a nasty deadlock on copying from the
  1899. * same page as we're writing to, without it being marked
  1900. * up-to-date.
  1901. */
  1902. fault_in_pages_readable(buf, bytes);
  1903. page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec);
  1904. if (!page) {
  1905. status = -ENOMEM;
  1906. break;
  1907. }
  1908. status = a_ops->prepare_write(file, page, offset, offset+bytes);
  1909. if (unlikely(status)) {
  1910. loff_t isize = i_size_read(inode);
  1911. if (status != AOP_TRUNCATED_PAGE)
  1912. unlock_page(page);
  1913. page_cache_release(page);
  1914. if (status == AOP_TRUNCATED_PAGE)
  1915. continue;
  1916. /*
  1917. * prepare_write() may have instantiated a few blocks
  1918. * outside i_size. Trim these off again.
  1919. */
  1920. if (pos + bytes > isize)
  1921. vmtruncate(inode, isize);
  1922. break;
  1923. }
  1924. if (likely(nr_segs == 1))
  1925. copied = filemap_copy_from_user(page, offset,
  1926. buf, bytes);
  1927. else
  1928. copied = filemap_copy_from_user_iovec(page, offset,
  1929. cur_iov, iov_base, bytes);
  1930. flush_dcache_page(page);
  1931. status = a_ops->commit_write(file, page, offset, offset+bytes);
  1932. if (status == AOP_TRUNCATED_PAGE) {
  1933. page_cache_release(page);
  1934. continue;
  1935. }
  1936. if (likely(copied > 0)) {
  1937. if (!status)
  1938. status = copied;
  1939. if (status >= 0) {
  1940. written += status;
  1941. count -= status;
  1942. pos += status;
  1943. buf += status;
  1944. if (unlikely(nr_segs > 1)) {
  1945. filemap_set_next_iovec(&cur_iov,
  1946. &iov_base, status);
  1947. if (count)
  1948. buf = cur_iov->iov_base +
  1949. iov_base;
  1950. } else {
  1951. iov_base += status;
  1952. }
  1953. }
  1954. }
  1955. if (unlikely(copied != bytes))
  1956. if (status >= 0)
  1957. status = -EFAULT;
  1958. unlock_page(page);
  1959. mark_page_accessed(page);
  1960. page_cache_release(page);
  1961. if (status < 0)
  1962. break;
  1963. balance_dirty_pages_ratelimited(mapping);
  1964. cond_resched();
  1965. } while (count);
  1966. *ppos = pos;
  1967. if (cached_page)
  1968. page_cache_release(cached_page);
  1969. /*
  1970. * For now, when the user asks for O_SYNC, we'll actually give O_DSYNC
  1971. */
  1972. if (likely(status >= 0)) {
  1973. if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
  1974. if (!a_ops->writepage || !is_sync_kiocb(iocb))
  1975. status = generic_osync_inode(inode, mapping,
  1976. OSYNC_METADATA|OSYNC_DATA);
  1977. }
  1978. }
  1979. /*
  1980. * If we get here for O_DIRECT writes then we must have fallen through
  1981. * to buffered writes (block instantiation inside i_size). So we sync
  1982. * the file data here, to try to honour O_DIRECT expectations.
  1983. */
  1984. if (unlikely(file->f_flags & O_DIRECT) && written)
  1985. status = filemap_write_and_wait(mapping);
  1986. pagevec_lru_add(&lru_pvec);
  1987. return written ? written : status;
  1988. }
  1989. EXPORT_SYMBOL(generic_file_buffered_write);
  1990. static ssize_t
  1991. __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
  1992. unsigned long nr_segs, loff_t *ppos)
  1993. {
  1994. struct file *file = iocb->ki_filp;
  1995. struct address_space * mapping = file->f_mapping;
  1996. size_t ocount; /* original count */
  1997. size_t count; /* after file limit checks */
  1998. struct inode *inode = mapping->host;
  1999. unsigned long seg;
  2000. loff_t pos;
  2001. ssize_t written;
  2002. ssize_t err;
  2003. ocount = 0;
  2004. for (seg = 0; seg < nr_segs; seg++) {
  2005. const struct iovec *iv = &iov[seg];
  2006. /*
  2007. * If any segment has a negative length, or the cumulative
  2008. * length ever wraps negative then return -EINVAL.
  2009. */
  2010. ocount += iv->iov_len;
  2011. if (unlikely((ssize_t)(ocount|iv->iov_len) < 0))
  2012. return -EINVAL;
  2013. if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
  2014. continue;
  2015. if (seg == 0)
  2016. return -EFAULT;
  2017. nr_segs = seg;
  2018. ocount -= iv->iov_len; /* This segment is no good */
  2019. break;
  2020. }
  2021. count = ocount;
  2022. pos = *ppos;
  2023. vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
  2024. /* We can write back this queue in page reclaim */
  2025. current->backing_dev_info = mapping->backing_dev_info;
  2026. written = 0;
  2027. err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
  2028. if (err)
  2029. goto out;
  2030. if (count == 0)
  2031. goto out;
  2032. err = remove_suid(file->f_dentry);
  2033. if (err)
  2034. goto out;
  2035. file_update_time(file);
  2036. /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
  2037. if (unlikely(file->f_flags & O_DIRECT)) {
  2038. written = generic_file_direct_write(iocb, iov,
  2039. &nr_segs, pos, ppos, count, ocount);
  2040. if (written < 0 || written == count)
  2041. goto out;
  2042. /*
  2043. * direct-io write to a hole: fall through to buffered I/O
  2044. * for completing the rest of the request.
  2045. */
  2046. pos += written;
  2047. count -= written;
  2048. }
  2049. written = generic_file_buffered_write(iocb, iov, nr_segs,
  2050. pos, ppos, count, written);
  2051. out:
  2052. current->backing_dev_info = NULL;
  2053. return written ? written : err;
  2054. }
  2055. EXPORT_SYMBOL(generic_file_aio_write_nolock);
  2056. ssize_t
  2057. generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
  2058. unsigned long nr_segs, loff_t *ppos)
  2059. {
  2060. struct file *file = iocb->ki_filp;
  2061. struct address_space *mapping = file->f_mapping;
  2062. struct inode *inode = mapping->host;
  2063. ssize_t ret;
  2064. loff_t pos = *ppos;
  2065. ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs, ppos);
  2066. if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
  2067. int err;
  2068. err = sync_page_range_nolock(inode, mapping, pos, ret);
  2069. if (err < 0)
  2070. ret = err;
  2071. }
  2072. return ret;
  2073. }
  2074. static ssize_t
  2075. __generic_file_write_nolock(struct file *file, const struct iovec *iov,
  2076. unsigned long nr_segs, loff_t *ppos)
  2077. {
  2078. struct kiocb kiocb;
  2079. ssize_t ret;
  2080. init_sync_kiocb(&kiocb, file);
  2081. ret = __generic_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos);
  2082. if (ret == -EIOCBQUEUED)
  2083. ret = wait_on_sync_kiocb(&kiocb);
  2084. return ret;
  2085. }
  2086. ssize_t
  2087. generic_file_write_nolock(struct file *file, const struct iovec *iov,
  2088. unsigned long nr_segs, loff_t *ppos)
  2089. {
  2090. struct kiocb kiocb;
  2091. ssize_t ret;
  2092. init_sync_kiocb(&kiocb, file);
  2093. ret = generic_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos);
  2094. if (-EIOCBQUEUED == ret)
  2095. ret = wait_on_sync_kiocb(&kiocb);
  2096. return ret;
  2097. }
  2098. EXPORT_SYMBOL(generic_file_write_nolock);
  2099. ssize_t generic_file_aio_write(struct kiocb *iocb, const char __user *buf,
  2100. size_t count, loff_t pos)
  2101. {
  2102. struct file *file = iocb->ki_filp;
  2103. struct address_space *mapping = file->f_mapping;
  2104. struct inode *inode = mapping->host;
  2105. ssize_t ret;
  2106. struct iovec local_iov = { .iov_base = (void __user *)buf,
  2107. .iov_len = count };
  2108. BUG_ON(iocb->ki_pos != pos);
  2109. mutex_lock(&inode->i_mutex);
  2110. ret = __generic_file_aio_write_nolock(iocb, &local_iov, 1,
  2111. &iocb->ki_pos);
  2112. mutex_unlock(&inode->i_mutex);
  2113. if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
  2114. ssize_t err;
  2115. err = sync_page_range(inode, mapping, pos, ret);
  2116. if (err < 0)
  2117. ret = err;
  2118. }
  2119. return ret;
  2120. }
  2121. EXPORT_SYMBOL(generic_file_aio_write);
  2122. ssize_t generic_file_write(struct file *file, const char __user *buf,
  2123. size_t count, loff_t *ppos)
  2124. {
  2125. struct address_space *mapping = file->f_mapping;
  2126. struct inode *inode = mapping->host;
  2127. ssize_t ret;
  2128. struct iovec local_iov = { .iov_base = (void __user *)buf,
  2129. .iov_len = count };
  2130. mutex_lock(&inode->i_mutex);
  2131. ret = __generic_file_write_nolock(file, &local_iov, 1, ppos);
  2132. mutex_unlock(&inode->i_mutex);
  2133. if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
  2134. ssize_t err;
  2135. err = sync_page_range(inode, mapping, *ppos - ret, ret);
  2136. if (err < 0)
  2137. ret = err;
  2138. }
  2139. return ret;
  2140. }
  2141. EXPORT_SYMBOL(generic_file_write);
  2142. ssize_t generic_file_readv(struct file *filp, const struct iovec *iov,
  2143. unsigned long nr_segs, loff_t *ppos)
  2144. {
  2145. struct kiocb kiocb;
  2146. ssize_t ret;
  2147. init_sync_kiocb(&kiocb, filp);
  2148. ret = __generic_file_aio_read(&kiocb, iov, nr_segs, ppos);
  2149. if (-EIOCBQUEUED == ret)
  2150. ret = wait_on_sync_kiocb(&kiocb);
  2151. return ret;
  2152. }
  2153. EXPORT_SYMBOL(generic_file_readv);
  2154. ssize_t generic_file_writev(struct file *file, const struct iovec *iov,
  2155. unsigned long nr_segs, loff_t *ppos)
  2156. {
  2157. struct address_space *mapping = file->f_mapping;
  2158. struct inode *inode = mapping->host;
  2159. ssize_t ret;
  2160. mutex_lock(&inode->i_mutex);
  2161. ret = __generic_file_write_nolock(file, iov, nr_segs, ppos);
  2162. mutex_unlock(&inode->i_mutex);
  2163. if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
  2164. int err;
  2165. err = sync_page_range(inode, mapping, *ppos - ret, ret);
  2166. if (err < 0)
  2167. ret = err;
  2168. }
  2169. return ret;
  2170. }
  2171. EXPORT_SYMBOL(generic_file_writev);
  2172. /*
  2173. * Called under i_mutex for writes to S_ISREG files. Returns -EIO if something
  2174. * went wrong during pagecache shootdown.
  2175. */
  2176. static ssize_t
  2177. generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
  2178. loff_t offset, unsigned long nr_segs)
  2179. {
  2180. struct file *file = iocb->ki_filp;
  2181. struct address_space *mapping = file->f_mapping;
  2182. ssize_t retval;
  2183. size_t write_len = 0;
  2184. /*
  2185. * If it's a write, unmap all mmappings of the file up-front. This
  2186. * will cause any pte dirty bits to be propagated into the pageframes
  2187. * for the subsequent filemap_write_and_wait().
  2188. */
  2189. if (rw == WRITE) {
  2190. write_len = iov_length(iov, nr_segs);
  2191. if (mapping_mapped(mapping))
  2192. unmap_mapping_range(mapping, offset, write_len, 0);
  2193. }
  2194. retval = filemap_write_and_wait(mapping);
  2195. if (retval == 0) {
  2196. retval = mapping->a_ops->direct_IO(rw, iocb, iov,
  2197. offset, nr_segs);
  2198. if (rw == WRITE && mapping->nrpages) {
  2199. pgoff_t end = (offset + write_len - 1)
  2200. >> PAGE_CACHE_SHIFT;
  2201. int err = invalidate_inode_pages2_range(mapping,
  2202. offset >> PAGE_CACHE_SHIFT, end);
  2203. if (err)
  2204. retval = err;
  2205. }
  2206. }
  2207. return retval;
  2208. }