file.c 58 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/highmem.h>
  21. #include <linux/time.h>
  22. #include <linux/init.h>
  23. #include <linux/string.h>
  24. #include <linux/backing-dev.h>
  25. #include <linux/mpage.h>
  26. #include <linux/falloc.h>
  27. #include <linux/swap.h>
  28. #include <linux/writeback.h>
  29. #include <linux/statfs.h>
  30. #include <linux/compat.h>
  31. #include <linux/slab.h>
  32. #include "ctree.h"
  33. #include "disk-io.h"
  34. #include "transaction.h"
  35. #include "btrfs_inode.h"
  36. #include "ioctl.h"
  37. #include "print-tree.h"
  38. #include "tree-log.h"
  39. #include "locking.h"
  40. #include "compat.h"
  41. #include "volumes.h"
  42. /*
  43. * when auto defrag is enabled we
  44. * queue up these defrag structs to remember which
  45. * inodes need defragging passes
  46. */
  47. struct inode_defrag {
  48. struct rb_node rb_node;
  49. /* objectid */
  50. u64 ino;
  51. /*
  52. * transid where the defrag was added, we search for
  53. * extents newer than this
  54. */
  55. u64 transid;
  56. /* root objectid */
  57. u64 root;
  58. /* last offset we were able to defrag */
  59. u64 last_offset;
  60. /* if we've wrapped around back to zero once already */
  61. int cycled;
  62. };
  63. static int __compare_inode_defrag(struct inode_defrag *defrag1,
  64. struct inode_defrag *defrag2)
  65. {
  66. if (defrag1->root > defrag2->root)
  67. return 1;
  68. else if (defrag1->root < defrag2->root)
  69. return -1;
  70. else if (defrag1->ino > defrag2->ino)
  71. return 1;
  72. else if (defrag1->ino < defrag2->ino)
  73. return -1;
  74. else
  75. return 0;
  76. }
  77. /* pop a record for an inode into the defrag tree. The lock
  78. * must be held already
  79. *
  80. * If you're inserting a record for an older transid than an
  81. * existing record, the transid already in the tree is lowered
  82. *
  83. * If an existing record is found the defrag item you
  84. * pass in is freed
  85. */
  86. static void __btrfs_add_inode_defrag(struct inode *inode,
  87. struct inode_defrag *defrag)
  88. {
  89. struct btrfs_root *root = BTRFS_I(inode)->root;
  90. struct inode_defrag *entry;
  91. struct rb_node **p;
  92. struct rb_node *parent = NULL;
  93. int ret;
  94. p = &root->fs_info->defrag_inodes.rb_node;
  95. while (*p) {
  96. parent = *p;
  97. entry = rb_entry(parent, struct inode_defrag, rb_node);
  98. ret = __compare_inode_defrag(defrag, entry);
  99. if (ret < 0)
  100. p = &parent->rb_left;
  101. else if (ret > 0)
  102. p = &parent->rb_right;
  103. else {
  104. /* if we're reinserting an entry for
  105. * an old defrag run, make sure to
  106. * lower the transid of our existing record
  107. */
  108. if (defrag->transid < entry->transid)
  109. entry->transid = defrag->transid;
  110. if (defrag->last_offset > entry->last_offset)
  111. entry->last_offset = defrag->last_offset;
  112. goto exists;
  113. }
  114. }
  115. set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
  116. rb_link_node(&defrag->rb_node, parent, p);
  117. rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
  118. return;
  119. exists:
  120. kfree(defrag);
  121. return;
  122. }
  123. /*
  124. * insert a defrag record for this inode if auto defrag is
  125. * enabled
  126. */
  127. int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
  128. struct inode *inode)
  129. {
  130. struct btrfs_root *root = BTRFS_I(inode)->root;
  131. struct inode_defrag *defrag;
  132. u64 transid;
  133. if (!btrfs_test_opt(root, AUTO_DEFRAG))
  134. return 0;
  135. if (btrfs_fs_closing(root->fs_info))
  136. return 0;
  137. if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
  138. return 0;
  139. if (trans)
  140. transid = trans->transid;
  141. else
  142. transid = BTRFS_I(inode)->root->last_trans;
  143. defrag = kzalloc(sizeof(*defrag), GFP_NOFS);
  144. if (!defrag)
  145. return -ENOMEM;
  146. defrag->ino = btrfs_ino(inode);
  147. defrag->transid = transid;
  148. defrag->root = root->root_key.objectid;
  149. spin_lock(&root->fs_info->defrag_inodes_lock);
  150. if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
  151. __btrfs_add_inode_defrag(inode, defrag);
  152. else
  153. kfree(defrag);
  154. spin_unlock(&root->fs_info->defrag_inodes_lock);
  155. return 0;
  156. }
  157. /*
  158. * must be called with the defrag_inodes lock held
  159. */
  160. struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info,
  161. u64 root, u64 ino,
  162. struct rb_node **next)
  163. {
  164. struct inode_defrag *entry = NULL;
  165. struct inode_defrag tmp;
  166. struct rb_node *p;
  167. struct rb_node *parent = NULL;
  168. int ret;
  169. tmp.ino = ino;
  170. tmp.root = root;
  171. p = info->defrag_inodes.rb_node;
  172. while (p) {
  173. parent = p;
  174. entry = rb_entry(parent, struct inode_defrag, rb_node);
  175. ret = __compare_inode_defrag(&tmp, entry);
  176. if (ret < 0)
  177. p = parent->rb_left;
  178. else if (ret > 0)
  179. p = parent->rb_right;
  180. else
  181. return entry;
  182. }
  183. if (next) {
  184. while (parent && __compare_inode_defrag(&tmp, entry) > 0) {
  185. parent = rb_next(parent);
  186. entry = rb_entry(parent, struct inode_defrag, rb_node);
  187. }
  188. *next = parent;
  189. }
  190. return NULL;
  191. }
  192. /*
  193. * run through the list of inodes in the FS that need
  194. * defragging
  195. */
  196. int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
  197. {
  198. struct inode_defrag *defrag;
  199. struct btrfs_root *inode_root;
  200. struct inode *inode;
  201. struct rb_node *n;
  202. struct btrfs_key key;
  203. struct btrfs_ioctl_defrag_range_args range;
  204. u64 first_ino = 0;
  205. u64 root_objectid = 0;
  206. int num_defrag;
  207. int defrag_batch = 1024;
  208. memset(&range, 0, sizeof(range));
  209. range.len = (u64)-1;
  210. atomic_inc(&fs_info->defrag_running);
  211. spin_lock(&fs_info->defrag_inodes_lock);
  212. while(1) {
  213. n = NULL;
  214. /* find an inode to defrag */
  215. defrag = btrfs_find_defrag_inode(fs_info, root_objectid,
  216. first_ino, &n);
  217. if (!defrag) {
  218. if (n) {
  219. defrag = rb_entry(n, struct inode_defrag,
  220. rb_node);
  221. } else if (root_objectid || first_ino) {
  222. root_objectid = 0;
  223. first_ino = 0;
  224. continue;
  225. } else {
  226. break;
  227. }
  228. }
  229. /* remove it from the rbtree */
  230. first_ino = defrag->ino + 1;
  231. root_objectid = defrag->root;
  232. rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
  233. if (btrfs_fs_closing(fs_info))
  234. goto next_free;
  235. spin_unlock(&fs_info->defrag_inodes_lock);
  236. /* get the inode */
  237. key.objectid = defrag->root;
  238. btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
  239. key.offset = (u64)-1;
  240. inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
  241. if (IS_ERR(inode_root))
  242. goto next;
  243. key.objectid = defrag->ino;
  244. btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
  245. key.offset = 0;
  246. inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
  247. if (IS_ERR(inode))
  248. goto next;
  249. /* do a chunk of defrag */
  250. clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
  251. range.start = defrag->last_offset;
  252. num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
  253. defrag_batch);
  254. /*
  255. * if we filled the whole defrag batch, there
  256. * must be more work to do. Queue this defrag
  257. * again
  258. */
  259. if (num_defrag == defrag_batch) {
  260. defrag->last_offset = range.start;
  261. __btrfs_add_inode_defrag(inode, defrag);
  262. /*
  263. * we don't want to kfree defrag, we added it back to
  264. * the rbtree
  265. */
  266. defrag = NULL;
  267. } else if (defrag->last_offset && !defrag->cycled) {
  268. /*
  269. * we didn't fill our defrag batch, but
  270. * we didn't start at zero. Make sure we loop
  271. * around to the start of the file.
  272. */
  273. defrag->last_offset = 0;
  274. defrag->cycled = 1;
  275. __btrfs_add_inode_defrag(inode, defrag);
  276. defrag = NULL;
  277. }
  278. iput(inode);
  279. next:
  280. spin_lock(&fs_info->defrag_inodes_lock);
  281. next_free:
  282. kfree(defrag);
  283. }
  284. spin_unlock(&fs_info->defrag_inodes_lock);
  285. atomic_dec(&fs_info->defrag_running);
  286. /*
  287. * during unmount, we use the transaction_wait queue to
  288. * wait for the defragger to stop
  289. */
  290. wake_up(&fs_info->transaction_wait);
  291. return 0;
  292. }
  293. /* simple helper to fault in pages and copy. This should go away
  294. * and be replaced with calls into generic code.
  295. */
  296. static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
  297. size_t write_bytes,
  298. struct page **prepared_pages,
  299. struct iov_iter *i)
  300. {
  301. size_t copied = 0;
  302. size_t total_copied = 0;
  303. int pg = 0;
  304. int offset = pos & (PAGE_CACHE_SIZE - 1);
  305. while (write_bytes > 0) {
  306. size_t count = min_t(size_t,
  307. PAGE_CACHE_SIZE - offset, write_bytes);
  308. struct page *page = prepared_pages[pg];
  309. /*
  310. * Copy data from userspace to the current page
  311. *
  312. * Disable pagefault to avoid recursive lock since
  313. * the pages are already locked
  314. */
  315. pagefault_disable();
  316. copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
  317. pagefault_enable();
  318. /* Flush processor's dcache for this page */
  319. flush_dcache_page(page);
  320. /*
  321. * if we get a partial write, we can end up with
  322. * partially up to date pages. These add
  323. * a lot of complexity, so make sure they don't
  324. * happen by forcing this copy to be retried.
  325. *
  326. * The rest of the btrfs_file_write code will fall
  327. * back to page at a time copies after we return 0.
  328. */
  329. if (!PageUptodate(page) && copied < count)
  330. copied = 0;
  331. iov_iter_advance(i, copied);
  332. write_bytes -= copied;
  333. total_copied += copied;
  334. /* Return to btrfs_file_aio_write to fault page */
  335. if (unlikely(copied == 0))
  336. break;
  337. if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
  338. offset += copied;
  339. } else {
  340. pg++;
  341. offset = 0;
  342. }
  343. }
  344. return total_copied;
  345. }
  346. /*
  347. * unlocks pages after btrfs_file_write is done with them
  348. */
  349. void btrfs_drop_pages(struct page **pages, size_t num_pages)
  350. {
  351. size_t i;
  352. for (i = 0; i < num_pages; i++) {
  353. /* page checked is some magic around finding pages that
  354. * have been modified without going through btrfs_set_page_dirty
  355. * clear it here
  356. */
  357. ClearPageChecked(pages[i]);
  358. unlock_page(pages[i]);
  359. mark_page_accessed(pages[i]);
  360. page_cache_release(pages[i]);
  361. }
  362. }
  363. /*
  364. * after copy_from_user, pages need to be dirtied and we need to make
  365. * sure holes are created between the current EOF and the start of
  366. * any next extents (if required).
  367. *
  368. * this also makes the decision about creating an inline extent vs
  369. * doing real data extents, marking pages dirty and delalloc as required.
  370. */
  371. int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
  372. struct page **pages, size_t num_pages,
  373. loff_t pos, size_t write_bytes,
  374. struct extent_state **cached)
  375. {
  376. int err = 0;
  377. int i;
  378. u64 num_bytes;
  379. u64 start_pos;
  380. u64 end_of_last_block;
  381. u64 end_pos = pos + write_bytes;
  382. loff_t isize = i_size_read(inode);
  383. start_pos = pos & ~((u64)root->sectorsize - 1);
  384. num_bytes = (write_bytes + pos - start_pos +
  385. root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
  386. end_of_last_block = start_pos + num_bytes - 1;
  387. err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
  388. cached);
  389. if (err)
  390. return err;
  391. for (i = 0; i < num_pages; i++) {
  392. struct page *p = pages[i];
  393. SetPageUptodate(p);
  394. ClearPageChecked(p);
  395. set_page_dirty(p);
  396. }
  397. /*
  398. * we've only changed i_size in ram, and we haven't updated
  399. * the disk i_size. There is no need to log the inode
  400. * at this time.
  401. */
  402. if (end_pos > isize)
  403. i_size_write(inode, end_pos);
  404. return 0;
  405. }
  406. /*
  407. * this drops all the extents in the cache that intersect the range
  408. * [start, end]. Existing extents are split as required.
  409. */
  410. int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
  411. int skip_pinned)
  412. {
  413. struct extent_map *em;
  414. struct extent_map *split = NULL;
  415. struct extent_map *split2 = NULL;
  416. struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
  417. u64 len = end - start + 1;
  418. u64 gen;
  419. int ret;
  420. int testend = 1;
  421. unsigned long flags;
  422. int compressed = 0;
  423. WARN_ON(end < start);
  424. if (end == (u64)-1) {
  425. len = (u64)-1;
  426. testend = 0;
  427. }
  428. while (1) {
  429. if (!split)
  430. split = alloc_extent_map();
  431. if (!split2)
  432. split2 = alloc_extent_map();
  433. BUG_ON(!split || !split2); /* -ENOMEM */
  434. write_lock(&em_tree->lock);
  435. em = lookup_extent_mapping(em_tree, start, len);
  436. if (!em) {
  437. write_unlock(&em_tree->lock);
  438. break;
  439. }
  440. flags = em->flags;
  441. gen = em->generation;
  442. if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
  443. if (testend && em->start + em->len >= start + len) {
  444. free_extent_map(em);
  445. write_unlock(&em_tree->lock);
  446. break;
  447. }
  448. start = em->start + em->len;
  449. if (testend)
  450. len = start + len - (em->start + em->len);
  451. free_extent_map(em);
  452. write_unlock(&em_tree->lock);
  453. continue;
  454. }
  455. compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
  456. clear_bit(EXTENT_FLAG_PINNED, &em->flags);
  457. remove_extent_mapping(em_tree, em);
  458. if (em->block_start < EXTENT_MAP_LAST_BYTE &&
  459. em->start < start) {
  460. split->start = em->start;
  461. split->len = start - em->start;
  462. split->orig_start = em->orig_start;
  463. split->block_start = em->block_start;
  464. if (compressed)
  465. split->block_len = em->block_len;
  466. else
  467. split->block_len = split->len;
  468. split->generation = gen;
  469. split->bdev = em->bdev;
  470. split->flags = flags;
  471. split->compress_type = em->compress_type;
  472. ret = add_extent_mapping(em_tree, split);
  473. BUG_ON(ret); /* Logic error */
  474. list_move(&split->list, &em_tree->modified_extents);
  475. free_extent_map(split);
  476. split = split2;
  477. split2 = NULL;
  478. }
  479. if (em->block_start < EXTENT_MAP_LAST_BYTE &&
  480. testend && em->start + em->len > start + len) {
  481. u64 diff = start + len - em->start;
  482. split->start = start + len;
  483. split->len = em->start + em->len - (start + len);
  484. split->bdev = em->bdev;
  485. split->flags = flags;
  486. split->compress_type = em->compress_type;
  487. split->generation = gen;
  488. if (compressed) {
  489. split->block_len = em->block_len;
  490. split->block_start = em->block_start;
  491. split->orig_start = em->orig_start;
  492. } else {
  493. split->block_len = split->len;
  494. split->block_start = em->block_start + diff;
  495. split->orig_start = split->start;
  496. }
  497. ret = add_extent_mapping(em_tree, split);
  498. BUG_ON(ret); /* Logic error */
  499. list_move(&split->list, &em_tree->modified_extents);
  500. free_extent_map(split);
  501. split = NULL;
  502. }
  503. write_unlock(&em_tree->lock);
  504. /* once for us */
  505. free_extent_map(em);
  506. /* once for the tree*/
  507. free_extent_map(em);
  508. }
  509. if (split)
  510. free_extent_map(split);
  511. if (split2)
  512. free_extent_map(split2);
  513. return 0;
  514. }
  515. /*
  516. * this is very complex, but the basic idea is to drop all extents
  517. * in the range start - end. hint_block is filled in with a block number
  518. * that would be a good hint to the block allocator for this file.
  519. *
  520. * If an extent intersects the range but is not entirely inside the range
  521. * it is either truncated or split. Anything entirely inside the range
  522. * is deleted from the tree.
  523. */
  524. int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
  525. struct btrfs_root *root, struct inode *inode,
  526. struct btrfs_path *path, u64 start, u64 end,
  527. u64 *drop_end, int drop_cache)
  528. {
  529. struct extent_buffer *leaf;
  530. struct btrfs_file_extent_item *fi;
  531. struct btrfs_key key;
  532. struct btrfs_key new_key;
  533. u64 ino = btrfs_ino(inode);
  534. u64 search_start = start;
  535. u64 disk_bytenr = 0;
  536. u64 num_bytes = 0;
  537. u64 extent_offset = 0;
  538. u64 extent_end = 0;
  539. int del_nr = 0;
  540. int del_slot = 0;
  541. int extent_type;
  542. int recow;
  543. int ret;
  544. int modify_tree = -1;
  545. int update_refs = (root->ref_cows || root == root->fs_info->tree_root);
  546. if (drop_cache)
  547. btrfs_drop_extent_cache(inode, start, end - 1, 0);
  548. if (start >= BTRFS_I(inode)->disk_i_size)
  549. modify_tree = 0;
  550. while (1) {
  551. recow = 0;
  552. ret = btrfs_lookup_file_extent(trans, root, path, ino,
  553. search_start, modify_tree);
  554. if (ret < 0)
  555. break;
  556. if (ret > 0 && path->slots[0] > 0 && search_start == start) {
  557. leaf = path->nodes[0];
  558. btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
  559. if (key.objectid == ino &&
  560. key.type == BTRFS_EXTENT_DATA_KEY)
  561. path->slots[0]--;
  562. }
  563. ret = 0;
  564. next_slot:
  565. leaf = path->nodes[0];
  566. if (path->slots[0] >= btrfs_header_nritems(leaf)) {
  567. BUG_ON(del_nr > 0);
  568. ret = btrfs_next_leaf(root, path);
  569. if (ret < 0)
  570. break;
  571. if (ret > 0) {
  572. ret = 0;
  573. break;
  574. }
  575. leaf = path->nodes[0];
  576. recow = 1;
  577. }
  578. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  579. if (key.objectid > ino ||
  580. key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
  581. break;
  582. fi = btrfs_item_ptr(leaf, path->slots[0],
  583. struct btrfs_file_extent_item);
  584. extent_type = btrfs_file_extent_type(leaf, fi);
  585. if (extent_type == BTRFS_FILE_EXTENT_REG ||
  586. extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
  587. disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
  588. num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
  589. extent_offset = btrfs_file_extent_offset(leaf, fi);
  590. extent_end = key.offset +
  591. btrfs_file_extent_num_bytes(leaf, fi);
  592. } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
  593. extent_end = key.offset +
  594. btrfs_file_extent_inline_len(leaf, fi);
  595. } else {
  596. WARN_ON(1);
  597. extent_end = search_start;
  598. }
  599. if (extent_end <= search_start) {
  600. path->slots[0]++;
  601. goto next_slot;
  602. }
  603. search_start = max(key.offset, start);
  604. if (recow || !modify_tree) {
  605. modify_tree = -1;
  606. btrfs_release_path(path);
  607. continue;
  608. }
  609. /*
  610. * | - range to drop - |
  611. * | -------- extent -------- |
  612. */
  613. if (start > key.offset && end < extent_end) {
  614. BUG_ON(del_nr > 0);
  615. BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
  616. memcpy(&new_key, &key, sizeof(new_key));
  617. new_key.offset = start;
  618. ret = btrfs_duplicate_item(trans, root, path,
  619. &new_key);
  620. if (ret == -EAGAIN) {
  621. btrfs_release_path(path);
  622. continue;
  623. }
  624. if (ret < 0)
  625. break;
  626. leaf = path->nodes[0];
  627. fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
  628. struct btrfs_file_extent_item);
  629. btrfs_set_file_extent_num_bytes(leaf, fi,
  630. start - key.offset);
  631. fi = btrfs_item_ptr(leaf, path->slots[0],
  632. struct btrfs_file_extent_item);
  633. extent_offset += start - key.offset;
  634. btrfs_set_file_extent_offset(leaf, fi, extent_offset);
  635. btrfs_set_file_extent_num_bytes(leaf, fi,
  636. extent_end - start);
  637. btrfs_mark_buffer_dirty(leaf);
  638. if (update_refs && disk_bytenr > 0) {
  639. ret = btrfs_inc_extent_ref(trans, root,
  640. disk_bytenr, num_bytes, 0,
  641. root->root_key.objectid,
  642. new_key.objectid,
  643. start - extent_offset, 0);
  644. BUG_ON(ret); /* -ENOMEM */
  645. }
  646. key.offset = start;
  647. }
  648. /*
  649. * | ---- range to drop ----- |
  650. * | -------- extent -------- |
  651. */
  652. if (start <= key.offset && end < extent_end) {
  653. BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
  654. memcpy(&new_key, &key, sizeof(new_key));
  655. new_key.offset = end;
  656. btrfs_set_item_key_safe(trans, root, path, &new_key);
  657. extent_offset += end - key.offset;
  658. btrfs_set_file_extent_offset(leaf, fi, extent_offset);
  659. btrfs_set_file_extent_num_bytes(leaf, fi,
  660. extent_end - end);
  661. btrfs_mark_buffer_dirty(leaf);
  662. if (update_refs && disk_bytenr > 0)
  663. inode_sub_bytes(inode, end - key.offset);
  664. break;
  665. }
  666. search_start = extent_end;
  667. /*
  668. * | ---- range to drop ----- |
  669. * | -------- extent -------- |
  670. */
  671. if (start > key.offset && end >= extent_end) {
  672. BUG_ON(del_nr > 0);
  673. BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
  674. btrfs_set_file_extent_num_bytes(leaf, fi,
  675. start - key.offset);
  676. btrfs_mark_buffer_dirty(leaf);
  677. if (update_refs && disk_bytenr > 0)
  678. inode_sub_bytes(inode, extent_end - start);
  679. if (end == extent_end)
  680. break;
  681. path->slots[0]++;
  682. goto next_slot;
  683. }
  684. /*
  685. * | ---- range to drop ----- |
  686. * | ------ extent ------ |
  687. */
  688. if (start <= key.offset && end >= extent_end) {
  689. if (del_nr == 0) {
  690. del_slot = path->slots[0];
  691. del_nr = 1;
  692. } else {
  693. BUG_ON(del_slot + del_nr != path->slots[0]);
  694. del_nr++;
  695. }
  696. if (update_refs &&
  697. extent_type == BTRFS_FILE_EXTENT_INLINE) {
  698. inode_sub_bytes(inode,
  699. extent_end - key.offset);
  700. extent_end = ALIGN(extent_end,
  701. root->sectorsize);
  702. } else if (update_refs && disk_bytenr > 0) {
  703. ret = btrfs_free_extent(trans, root,
  704. disk_bytenr, num_bytes, 0,
  705. root->root_key.objectid,
  706. key.objectid, key.offset -
  707. extent_offset, 0);
  708. BUG_ON(ret); /* -ENOMEM */
  709. inode_sub_bytes(inode,
  710. extent_end - key.offset);
  711. }
  712. if (end == extent_end)
  713. break;
  714. if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
  715. path->slots[0]++;
  716. goto next_slot;
  717. }
  718. ret = btrfs_del_items(trans, root, path, del_slot,
  719. del_nr);
  720. if (ret) {
  721. btrfs_abort_transaction(trans, root, ret);
  722. break;
  723. }
  724. del_nr = 0;
  725. del_slot = 0;
  726. btrfs_release_path(path);
  727. continue;
  728. }
  729. BUG_ON(1);
  730. }
  731. if (!ret && del_nr > 0) {
  732. ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
  733. if (ret)
  734. btrfs_abort_transaction(trans, root, ret);
  735. }
  736. if (drop_end)
  737. *drop_end = min(end, extent_end);
  738. btrfs_release_path(path);
  739. return ret;
  740. }
  741. int btrfs_drop_extents(struct btrfs_trans_handle *trans,
  742. struct btrfs_root *root, struct inode *inode, u64 start,
  743. u64 end, int drop_cache)
  744. {
  745. struct btrfs_path *path;
  746. int ret;
  747. path = btrfs_alloc_path();
  748. if (!path)
  749. return -ENOMEM;
  750. ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
  751. drop_cache);
  752. btrfs_free_path(path);
  753. return ret;
  754. }
  755. static int extent_mergeable(struct extent_buffer *leaf, int slot,
  756. u64 objectid, u64 bytenr, u64 orig_offset,
  757. u64 *start, u64 *end)
  758. {
  759. struct btrfs_file_extent_item *fi;
  760. struct btrfs_key key;
  761. u64 extent_end;
  762. if (slot < 0 || slot >= btrfs_header_nritems(leaf))
  763. return 0;
  764. btrfs_item_key_to_cpu(leaf, &key, slot);
  765. if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
  766. return 0;
  767. fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
  768. if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
  769. btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
  770. btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
  771. btrfs_file_extent_compression(leaf, fi) ||
  772. btrfs_file_extent_encryption(leaf, fi) ||
  773. btrfs_file_extent_other_encoding(leaf, fi))
  774. return 0;
  775. extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
  776. if ((*start && *start != key.offset) || (*end && *end != extent_end))
  777. return 0;
  778. *start = key.offset;
  779. *end = extent_end;
  780. return 1;
  781. }
  782. /*
  783. * Mark extent in the range start - end as written.
  784. *
  785. * This changes extent type from 'pre-allocated' to 'regular'. If only
  786. * part of extent is marked as written, the extent will be split into
  787. * two or three.
  788. */
  789. int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
  790. struct inode *inode, u64 start, u64 end)
  791. {
  792. struct btrfs_root *root = BTRFS_I(inode)->root;
  793. struct extent_buffer *leaf;
  794. struct btrfs_path *path;
  795. struct btrfs_file_extent_item *fi;
  796. struct btrfs_key key;
  797. struct btrfs_key new_key;
  798. u64 bytenr;
  799. u64 num_bytes;
  800. u64 extent_end;
  801. u64 orig_offset;
  802. u64 other_start;
  803. u64 other_end;
  804. u64 split;
  805. int del_nr = 0;
  806. int del_slot = 0;
  807. int recow;
  808. int ret;
  809. u64 ino = btrfs_ino(inode);
  810. path = btrfs_alloc_path();
  811. if (!path)
  812. return -ENOMEM;
  813. again:
  814. recow = 0;
  815. split = start;
  816. key.objectid = ino;
  817. key.type = BTRFS_EXTENT_DATA_KEY;
  818. key.offset = split;
  819. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  820. if (ret < 0)
  821. goto out;
  822. if (ret > 0 && path->slots[0] > 0)
  823. path->slots[0]--;
  824. leaf = path->nodes[0];
  825. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  826. BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
  827. fi = btrfs_item_ptr(leaf, path->slots[0],
  828. struct btrfs_file_extent_item);
  829. BUG_ON(btrfs_file_extent_type(leaf, fi) !=
  830. BTRFS_FILE_EXTENT_PREALLOC);
  831. extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
  832. BUG_ON(key.offset > start || extent_end < end);
  833. bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
  834. num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
  835. orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
  836. memcpy(&new_key, &key, sizeof(new_key));
  837. if (start == key.offset && end < extent_end) {
  838. other_start = 0;
  839. other_end = start;
  840. if (extent_mergeable(leaf, path->slots[0] - 1,
  841. ino, bytenr, orig_offset,
  842. &other_start, &other_end)) {
  843. new_key.offset = end;
  844. btrfs_set_item_key_safe(trans, root, path, &new_key);
  845. fi = btrfs_item_ptr(leaf, path->slots[0],
  846. struct btrfs_file_extent_item);
  847. btrfs_set_file_extent_generation(leaf, fi,
  848. trans->transid);
  849. btrfs_set_file_extent_num_bytes(leaf, fi,
  850. extent_end - end);
  851. btrfs_set_file_extent_offset(leaf, fi,
  852. end - orig_offset);
  853. fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
  854. struct btrfs_file_extent_item);
  855. btrfs_set_file_extent_generation(leaf, fi,
  856. trans->transid);
  857. btrfs_set_file_extent_num_bytes(leaf, fi,
  858. end - other_start);
  859. btrfs_mark_buffer_dirty(leaf);
  860. goto out;
  861. }
  862. }
  863. if (start > key.offset && end == extent_end) {
  864. other_start = end;
  865. other_end = 0;
  866. if (extent_mergeable(leaf, path->slots[0] + 1,
  867. ino, bytenr, orig_offset,
  868. &other_start, &other_end)) {
  869. fi = btrfs_item_ptr(leaf, path->slots[0],
  870. struct btrfs_file_extent_item);
  871. btrfs_set_file_extent_num_bytes(leaf, fi,
  872. start - key.offset);
  873. btrfs_set_file_extent_generation(leaf, fi,
  874. trans->transid);
  875. path->slots[0]++;
  876. new_key.offset = start;
  877. btrfs_set_item_key_safe(trans, root, path, &new_key);
  878. fi = btrfs_item_ptr(leaf, path->slots[0],
  879. struct btrfs_file_extent_item);
  880. btrfs_set_file_extent_generation(leaf, fi,
  881. trans->transid);
  882. btrfs_set_file_extent_num_bytes(leaf, fi,
  883. other_end - start);
  884. btrfs_set_file_extent_offset(leaf, fi,
  885. start - orig_offset);
  886. btrfs_mark_buffer_dirty(leaf);
  887. goto out;
  888. }
  889. }
  890. while (start > key.offset || end < extent_end) {
  891. if (key.offset == start)
  892. split = end;
  893. new_key.offset = split;
  894. ret = btrfs_duplicate_item(trans, root, path, &new_key);
  895. if (ret == -EAGAIN) {
  896. btrfs_release_path(path);
  897. goto again;
  898. }
  899. if (ret < 0) {
  900. btrfs_abort_transaction(trans, root, ret);
  901. goto out;
  902. }
  903. leaf = path->nodes[0];
  904. fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
  905. struct btrfs_file_extent_item);
  906. btrfs_set_file_extent_generation(leaf, fi, trans->transid);
  907. btrfs_set_file_extent_num_bytes(leaf, fi,
  908. split - key.offset);
  909. fi = btrfs_item_ptr(leaf, path->slots[0],
  910. struct btrfs_file_extent_item);
  911. btrfs_set_file_extent_generation(leaf, fi, trans->transid);
  912. btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
  913. btrfs_set_file_extent_num_bytes(leaf, fi,
  914. extent_end - split);
  915. btrfs_mark_buffer_dirty(leaf);
  916. ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
  917. root->root_key.objectid,
  918. ino, orig_offset, 0);
  919. BUG_ON(ret); /* -ENOMEM */
  920. if (split == start) {
  921. key.offset = start;
  922. } else {
  923. BUG_ON(start != key.offset);
  924. path->slots[0]--;
  925. extent_end = end;
  926. }
  927. recow = 1;
  928. }
  929. other_start = end;
  930. other_end = 0;
  931. if (extent_mergeable(leaf, path->slots[0] + 1,
  932. ino, bytenr, orig_offset,
  933. &other_start, &other_end)) {
  934. if (recow) {
  935. btrfs_release_path(path);
  936. goto again;
  937. }
  938. extent_end = other_end;
  939. del_slot = path->slots[0] + 1;
  940. del_nr++;
  941. ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
  942. 0, root->root_key.objectid,
  943. ino, orig_offset, 0);
  944. BUG_ON(ret); /* -ENOMEM */
  945. }
  946. other_start = 0;
  947. other_end = start;
  948. if (extent_mergeable(leaf, path->slots[0] - 1,
  949. ino, bytenr, orig_offset,
  950. &other_start, &other_end)) {
  951. if (recow) {
  952. btrfs_release_path(path);
  953. goto again;
  954. }
  955. key.offset = other_start;
  956. del_slot = path->slots[0];
  957. del_nr++;
  958. ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
  959. 0, root->root_key.objectid,
  960. ino, orig_offset, 0);
  961. BUG_ON(ret); /* -ENOMEM */
  962. }
  963. if (del_nr == 0) {
  964. fi = btrfs_item_ptr(leaf, path->slots[0],
  965. struct btrfs_file_extent_item);
  966. btrfs_set_file_extent_type(leaf, fi,
  967. BTRFS_FILE_EXTENT_REG);
  968. btrfs_set_file_extent_generation(leaf, fi, trans->transid);
  969. btrfs_mark_buffer_dirty(leaf);
  970. } else {
  971. fi = btrfs_item_ptr(leaf, del_slot - 1,
  972. struct btrfs_file_extent_item);
  973. btrfs_set_file_extent_type(leaf, fi,
  974. BTRFS_FILE_EXTENT_REG);
  975. btrfs_set_file_extent_generation(leaf, fi, trans->transid);
  976. btrfs_set_file_extent_num_bytes(leaf, fi,
  977. extent_end - key.offset);
  978. btrfs_mark_buffer_dirty(leaf);
  979. ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
  980. if (ret < 0) {
  981. btrfs_abort_transaction(trans, root, ret);
  982. goto out;
  983. }
  984. }
  985. out:
  986. btrfs_free_path(path);
  987. return 0;
  988. }
  989. /*
  990. * on error we return an unlocked page and the error value
  991. * on success we return a locked page and 0
  992. */
  993. static int prepare_uptodate_page(struct page *page, u64 pos,
  994. bool force_uptodate)
  995. {
  996. int ret = 0;
  997. if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
  998. !PageUptodate(page)) {
  999. ret = btrfs_readpage(NULL, page);
  1000. if (ret)
  1001. return ret;
  1002. lock_page(page);
  1003. if (!PageUptodate(page)) {
  1004. unlock_page(page);
  1005. return -EIO;
  1006. }
  1007. }
  1008. return 0;
  1009. }
  1010. /*
  1011. * this gets pages into the page cache and locks them down, it also properly
  1012. * waits for data=ordered extents to finish before allowing the pages to be
  1013. * modified.
  1014. */
  1015. static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
  1016. struct page **pages, size_t num_pages,
  1017. loff_t pos, unsigned long first_index,
  1018. size_t write_bytes, bool force_uptodate)
  1019. {
  1020. struct extent_state *cached_state = NULL;
  1021. int i;
  1022. unsigned long index = pos >> PAGE_CACHE_SHIFT;
  1023. struct inode *inode = fdentry(file)->d_inode;
  1024. gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
  1025. int err = 0;
  1026. int faili = 0;
  1027. u64 start_pos;
  1028. u64 last_pos;
  1029. start_pos = pos & ~((u64)root->sectorsize - 1);
  1030. last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
  1031. again:
  1032. for (i = 0; i < num_pages; i++) {
  1033. pages[i] = find_or_create_page(inode->i_mapping, index + i,
  1034. mask | __GFP_WRITE);
  1035. if (!pages[i]) {
  1036. faili = i - 1;
  1037. err = -ENOMEM;
  1038. goto fail;
  1039. }
  1040. if (i == 0)
  1041. err = prepare_uptodate_page(pages[i], pos,
  1042. force_uptodate);
  1043. if (i == num_pages - 1)
  1044. err = prepare_uptodate_page(pages[i],
  1045. pos + write_bytes, false);
  1046. if (err) {
  1047. page_cache_release(pages[i]);
  1048. faili = i - 1;
  1049. goto fail;
  1050. }
  1051. wait_on_page_writeback(pages[i]);
  1052. }
  1053. err = 0;
  1054. if (start_pos < inode->i_size) {
  1055. struct btrfs_ordered_extent *ordered;
  1056. lock_extent_bits(&BTRFS_I(inode)->io_tree,
  1057. start_pos, last_pos - 1, 0, &cached_state);
  1058. ordered = btrfs_lookup_first_ordered_extent(inode,
  1059. last_pos - 1);
  1060. if (ordered &&
  1061. ordered->file_offset + ordered->len > start_pos &&
  1062. ordered->file_offset < last_pos) {
  1063. btrfs_put_ordered_extent(ordered);
  1064. unlock_extent_cached(&BTRFS_I(inode)->io_tree,
  1065. start_pos, last_pos - 1,
  1066. &cached_state, GFP_NOFS);
  1067. for (i = 0; i < num_pages; i++) {
  1068. unlock_page(pages[i]);
  1069. page_cache_release(pages[i]);
  1070. }
  1071. btrfs_wait_ordered_range(inode, start_pos,
  1072. last_pos - start_pos);
  1073. goto again;
  1074. }
  1075. if (ordered)
  1076. btrfs_put_ordered_extent(ordered);
  1077. clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
  1078. last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
  1079. EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
  1080. GFP_NOFS);
  1081. unlock_extent_cached(&BTRFS_I(inode)->io_tree,
  1082. start_pos, last_pos - 1, &cached_state,
  1083. GFP_NOFS);
  1084. }
  1085. for (i = 0; i < num_pages; i++) {
  1086. if (clear_page_dirty_for_io(pages[i]))
  1087. account_page_redirty(pages[i]);
  1088. set_page_extent_mapped(pages[i]);
  1089. WARN_ON(!PageLocked(pages[i]));
  1090. }
  1091. return 0;
  1092. fail:
  1093. while (faili >= 0) {
  1094. unlock_page(pages[faili]);
  1095. page_cache_release(pages[faili]);
  1096. faili--;
  1097. }
  1098. return err;
  1099. }
  1100. static noinline ssize_t __btrfs_buffered_write(struct file *file,
  1101. struct iov_iter *i,
  1102. loff_t pos)
  1103. {
  1104. struct inode *inode = fdentry(file)->d_inode;
  1105. struct btrfs_root *root = BTRFS_I(inode)->root;
  1106. struct page **pages = NULL;
  1107. unsigned long first_index;
  1108. size_t num_written = 0;
  1109. int nrptrs;
  1110. int ret = 0;
  1111. bool force_page_uptodate = false;
  1112. nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
  1113. PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
  1114. (sizeof(struct page *)));
  1115. nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
  1116. nrptrs = max(nrptrs, 8);
  1117. pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
  1118. if (!pages)
  1119. return -ENOMEM;
  1120. first_index = pos >> PAGE_CACHE_SHIFT;
  1121. while (iov_iter_count(i) > 0) {
  1122. size_t offset = pos & (PAGE_CACHE_SIZE - 1);
  1123. size_t write_bytes = min(iov_iter_count(i),
  1124. nrptrs * (size_t)PAGE_CACHE_SIZE -
  1125. offset);
  1126. size_t num_pages = (write_bytes + offset +
  1127. PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  1128. size_t dirty_pages;
  1129. size_t copied;
  1130. WARN_ON(num_pages > nrptrs);
  1131. /*
  1132. * Fault pages before locking them in prepare_pages
  1133. * to avoid recursive lock
  1134. */
  1135. if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
  1136. ret = -EFAULT;
  1137. break;
  1138. }
  1139. ret = btrfs_delalloc_reserve_space(inode,
  1140. num_pages << PAGE_CACHE_SHIFT);
  1141. if (ret)
  1142. break;
  1143. /*
  1144. * This is going to setup the pages array with the number of
  1145. * pages we want, so we don't really need to worry about the
  1146. * contents of pages from loop to loop
  1147. */
  1148. ret = prepare_pages(root, file, pages, num_pages,
  1149. pos, first_index, write_bytes,
  1150. force_page_uptodate);
  1151. if (ret) {
  1152. btrfs_delalloc_release_space(inode,
  1153. num_pages << PAGE_CACHE_SHIFT);
  1154. break;
  1155. }
  1156. copied = btrfs_copy_from_user(pos, num_pages,
  1157. write_bytes, pages, i);
  1158. /*
  1159. * if we have trouble faulting in the pages, fall
  1160. * back to one page at a time
  1161. */
  1162. if (copied < write_bytes)
  1163. nrptrs = 1;
  1164. if (copied == 0) {
  1165. force_page_uptodate = true;
  1166. dirty_pages = 0;
  1167. } else {
  1168. force_page_uptodate = false;
  1169. dirty_pages = (copied + offset +
  1170. PAGE_CACHE_SIZE - 1) >>
  1171. PAGE_CACHE_SHIFT;
  1172. }
  1173. /*
  1174. * If we had a short copy we need to release the excess delaloc
  1175. * bytes we reserved. We need to increment outstanding_extents
  1176. * because btrfs_delalloc_release_space will decrement it, but
  1177. * we still have an outstanding extent for the chunk we actually
  1178. * managed to copy.
  1179. */
  1180. if (num_pages > dirty_pages) {
  1181. if (copied > 0) {
  1182. spin_lock(&BTRFS_I(inode)->lock);
  1183. BTRFS_I(inode)->outstanding_extents++;
  1184. spin_unlock(&BTRFS_I(inode)->lock);
  1185. }
  1186. btrfs_delalloc_release_space(inode,
  1187. (num_pages - dirty_pages) <<
  1188. PAGE_CACHE_SHIFT);
  1189. }
  1190. if (copied > 0) {
  1191. ret = btrfs_dirty_pages(root, inode, pages,
  1192. dirty_pages, pos, copied,
  1193. NULL);
  1194. if (ret) {
  1195. btrfs_delalloc_release_space(inode,
  1196. dirty_pages << PAGE_CACHE_SHIFT);
  1197. btrfs_drop_pages(pages, num_pages);
  1198. break;
  1199. }
  1200. }
  1201. btrfs_drop_pages(pages, num_pages);
  1202. cond_resched();
  1203. balance_dirty_pages_ratelimited_nr(inode->i_mapping,
  1204. dirty_pages);
  1205. if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
  1206. btrfs_btree_balance_dirty(root, 1);
  1207. pos += copied;
  1208. num_written += copied;
  1209. }
  1210. kfree(pages);
  1211. return num_written ? num_written : ret;
  1212. }
  1213. static ssize_t __btrfs_direct_write(struct kiocb *iocb,
  1214. const struct iovec *iov,
  1215. unsigned long nr_segs, loff_t pos,
  1216. loff_t *ppos, size_t count, size_t ocount)
  1217. {
  1218. struct file *file = iocb->ki_filp;
  1219. struct iov_iter i;
  1220. ssize_t written;
  1221. ssize_t written_buffered;
  1222. loff_t endbyte;
  1223. int err;
  1224. written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
  1225. count, ocount);
  1226. if (written < 0 || written == count)
  1227. return written;
  1228. pos += written;
  1229. count -= written;
  1230. iov_iter_init(&i, iov, nr_segs, count, written);
  1231. written_buffered = __btrfs_buffered_write(file, &i, pos);
  1232. if (written_buffered < 0) {
  1233. err = written_buffered;
  1234. goto out;
  1235. }
  1236. endbyte = pos + written_buffered - 1;
  1237. err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
  1238. if (err)
  1239. goto out;
  1240. written += written_buffered;
  1241. *ppos = pos + written_buffered;
  1242. invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
  1243. endbyte >> PAGE_CACHE_SHIFT);
  1244. out:
  1245. return written ? written : err;
  1246. }
  1247. static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
  1248. const struct iovec *iov,
  1249. unsigned long nr_segs, loff_t pos)
  1250. {
  1251. struct file *file = iocb->ki_filp;
  1252. struct inode *inode = fdentry(file)->d_inode;
  1253. struct btrfs_root *root = BTRFS_I(inode)->root;
  1254. loff_t *ppos = &iocb->ki_pos;
  1255. u64 start_pos;
  1256. ssize_t num_written = 0;
  1257. ssize_t err = 0;
  1258. size_t count, ocount;
  1259. sb_start_write(inode->i_sb);
  1260. mutex_lock(&inode->i_mutex);
  1261. err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
  1262. if (err) {
  1263. mutex_unlock(&inode->i_mutex);
  1264. goto out;
  1265. }
  1266. count = ocount;
  1267. current->backing_dev_info = inode->i_mapping->backing_dev_info;
  1268. err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
  1269. if (err) {
  1270. mutex_unlock(&inode->i_mutex);
  1271. goto out;
  1272. }
  1273. if (count == 0) {
  1274. mutex_unlock(&inode->i_mutex);
  1275. goto out;
  1276. }
  1277. err = file_remove_suid(file);
  1278. if (err) {
  1279. mutex_unlock(&inode->i_mutex);
  1280. goto out;
  1281. }
  1282. /*
  1283. * If BTRFS flips readonly due to some impossible error
  1284. * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
  1285. * although we have opened a file as writable, we have
  1286. * to stop this write operation to ensure FS consistency.
  1287. */
  1288. if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
  1289. mutex_unlock(&inode->i_mutex);
  1290. err = -EROFS;
  1291. goto out;
  1292. }
  1293. err = file_update_time(file);
  1294. if (err) {
  1295. mutex_unlock(&inode->i_mutex);
  1296. goto out;
  1297. }
  1298. start_pos = round_down(pos, root->sectorsize);
  1299. if (start_pos > i_size_read(inode)) {
  1300. err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
  1301. if (err) {
  1302. mutex_unlock(&inode->i_mutex);
  1303. goto out;
  1304. }
  1305. }
  1306. if (unlikely(file->f_flags & O_DIRECT)) {
  1307. num_written = __btrfs_direct_write(iocb, iov, nr_segs,
  1308. pos, ppos, count, ocount);
  1309. } else {
  1310. struct iov_iter i;
  1311. iov_iter_init(&i, iov, nr_segs, count, num_written);
  1312. num_written = __btrfs_buffered_write(file, &i, pos);
  1313. if (num_written > 0)
  1314. *ppos = pos + num_written;
  1315. }
  1316. mutex_unlock(&inode->i_mutex);
  1317. /*
  1318. * we want to make sure fsync finds this change
  1319. * but we haven't joined a transaction running right now.
  1320. *
  1321. * Later on, someone is sure to update the inode and get the
  1322. * real transid recorded.
  1323. *
  1324. * We set last_trans now to the fs_info generation + 1,
  1325. * this will either be one more than the running transaction
  1326. * or the generation used for the next transaction if there isn't
  1327. * one running right now.
  1328. */
  1329. BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
  1330. if (num_written > 0 || num_written == -EIOCBQUEUED) {
  1331. err = generic_write_sync(file, pos, num_written);
  1332. if (err < 0 && num_written > 0)
  1333. num_written = err;
  1334. }
  1335. out:
  1336. sb_end_write(inode->i_sb);
  1337. current->backing_dev_info = NULL;
  1338. return num_written ? num_written : err;
  1339. }
  1340. int btrfs_release_file(struct inode *inode, struct file *filp)
  1341. {
  1342. /*
  1343. * ordered_data_close is set by settattr when we are about to truncate
  1344. * a file from a non-zero size to a zero size. This tries to
  1345. * flush down new bytes that may have been written if the
  1346. * application were using truncate to replace a file in place.
  1347. */
  1348. if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
  1349. &BTRFS_I(inode)->runtime_flags)) {
  1350. btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
  1351. if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
  1352. filemap_flush(inode->i_mapping);
  1353. }
  1354. if (filp->private_data)
  1355. btrfs_ioctl_trans_end(filp);
  1356. return 0;
  1357. }
  1358. /*
  1359. * fsync call for both files and directories. This logs the inode into
  1360. * the tree log instead of forcing full commits whenever possible.
  1361. *
  1362. * It needs to call filemap_fdatawait so that all ordered extent updates are
  1363. * in the metadata btree are up to date for copying to the log.
  1364. *
  1365. * It drops the inode mutex before doing the tree log commit. This is an
  1366. * important optimization for directories because holding the mutex prevents
  1367. * new operations on the dir while we write to disk.
  1368. */
  1369. int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
  1370. {
  1371. struct dentry *dentry = file->f_path.dentry;
  1372. struct inode *inode = dentry->d_inode;
  1373. struct btrfs_root *root = BTRFS_I(inode)->root;
  1374. int ret = 0;
  1375. struct btrfs_trans_handle *trans;
  1376. trace_btrfs_sync_file(file, datasync);
  1377. mutex_lock(&inode->i_mutex);
  1378. /*
  1379. * we wait first, since the writeback may change the inode, also wait
  1380. * ordered range does a filemape_write_and_wait_range which is why we
  1381. * don't do it above like other file systems.
  1382. */
  1383. root->log_batch++;
  1384. btrfs_wait_ordered_range(inode, start, end);
  1385. root->log_batch++;
  1386. /*
  1387. * check the transaction that last modified this inode
  1388. * and see if its already been committed
  1389. */
  1390. if (!BTRFS_I(inode)->last_trans) {
  1391. mutex_unlock(&inode->i_mutex);
  1392. goto out;
  1393. }
  1394. /*
  1395. * if the last transaction that changed this file was before
  1396. * the current transaction, we can bail out now without any
  1397. * syncing
  1398. */
  1399. smp_mb();
  1400. if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
  1401. BTRFS_I(inode)->last_trans <=
  1402. root->fs_info->last_trans_committed) {
  1403. BTRFS_I(inode)->last_trans = 0;
  1404. /*
  1405. * We'v had everything committed since the last time we were
  1406. * modified so clear this flag in case it was set for whatever
  1407. * reason, it's no longer relevant.
  1408. */
  1409. clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
  1410. &BTRFS_I(inode)->runtime_flags);
  1411. mutex_unlock(&inode->i_mutex);
  1412. goto out;
  1413. }
  1414. /*
  1415. * ok we haven't committed the transaction yet, lets do a commit
  1416. */
  1417. if (file->private_data)
  1418. btrfs_ioctl_trans_end(file);
  1419. trans = btrfs_start_transaction(root, 0);
  1420. if (IS_ERR(trans)) {
  1421. ret = PTR_ERR(trans);
  1422. mutex_unlock(&inode->i_mutex);
  1423. goto out;
  1424. }
  1425. ret = btrfs_log_dentry_safe(trans, root, dentry);
  1426. if (ret < 0) {
  1427. mutex_unlock(&inode->i_mutex);
  1428. goto out;
  1429. }
  1430. /* we've logged all the items and now have a consistent
  1431. * version of the file in the log. It is possible that
  1432. * someone will come in and modify the file, but that's
  1433. * fine because the log is consistent on disk, and we
  1434. * have references to all of the file's extents
  1435. *
  1436. * It is possible that someone will come in and log the
  1437. * file again, but that will end up using the synchronization
  1438. * inside btrfs_sync_log to keep things safe.
  1439. */
  1440. mutex_unlock(&inode->i_mutex);
  1441. if (ret != BTRFS_NO_LOG_SYNC) {
  1442. if (ret > 0) {
  1443. ret = btrfs_commit_transaction(trans, root);
  1444. } else {
  1445. ret = btrfs_sync_log(trans, root);
  1446. if (ret == 0)
  1447. ret = btrfs_end_transaction(trans, root);
  1448. else
  1449. ret = btrfs_commit_transaction(trans, root);
  1450. }
  1451. } else {
  1452. ret = btrfs_end_transaction(trans, root);
  1453. }
  1454. out:
  1455. return ret > 0 ? -EIO : ret;
  1456. }
  1457. static const struct vm_operations_struct btrfs_file_vm_ops = {
  1458. .fault = filemap_fault,
  1459. .page_mkwrite = btrfs_page_mkwrite,
  1460. };
  1461. static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
  1462. {
  1463. struct address_space *mapping = filp->f_mapping;
  1464. if (!mapping->a_ops->readpage)
  1465. return -ENOEXEC;
  1466. file_accessed(filp);
  1467. vma->vm_ops = &btrfs_file_vm_ops;
  1468. vma->vm_flags |= VM_CAN_NONLINEAR;
  1469. return 0;
  1470. }
  1471. static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
  1472. int slot, u64 start, u64 end)
  1473. {
  1474. struct btrfs_file_extent_item *fi;
  1475. struct btrfs_key key;
  1476. if (slot < 0 || slot >= btrfs_header_nritems(leaf))
  1477. return 0;
  1478. btrfs_item_key_to_cpu(leaf, &key, slot);
  1479. if (key.objectid != btrfs_ino(inode) ||
  1480. key.type != BTRFS_EXTENT_DATA_KEY)
  1481. return 0;
  1482. fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
  1483. if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
  1484. return 0;
  1485. if (btrfs_file_extent_disk_bytenr(leaf, fi))
  1486. return 0;
  1487. if (key.offset == end)
  1488. return 1;
  1489. if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
  1490. return 1;
  1491. return 0;
  1492. }
  1493. static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
  1494. struct btrfs_path *path, u64 offset, u64 end)
  1495. {
  1496. struct btrfs_root *root = BTRFS_I(inode)->root;
  1497. struct extent_buffer *leaf;
  1498. struct btrfs_file_extent_item *fi;
  1499. struct extent_map *hole_em;
  1500. struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
  1501. struct btrfs_key key;
  1502. int ret;
  1503. key.objectid = btrfs_ino(inode);
  1504. key.type = BTRFS_EXTENT_DATA_KEY;
  1505. key.offset = offset;
  1506. ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
  1507. if (ret < 0)
  1508. return ret;
  1509. BUG_ON(!ret);
  1510. leaf = path->nodes[0];
  1511. if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
  1512. u64 num_bytes;
  1513. path->slots[0]--;
  1514. fi = btrfs_item_ptr(leaf, path->slots[0],
  1515. struct btrfs_file_extent_item);
  1516. num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
  1517. end - offset;
  1518. btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
  1519. btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
  1520. btrfs_set_file_extent_offset(leaf, fi, 0);
  1521. btrfs_mark_buffer_dirty(leaf);
  1522. goto out;
  1523. }
  1524. if (hole_mergeable(inode, leaf, path->slots[0]+1, offset, end)) {
  1525. u64 num_bytes;
  1526. path->slots[0]++;
  1527. key.offset = offset;
  1528. btrfs_set_item_key_safe(trans, root, path, &key);
  1529. fi = btrfs_item_ptr(leaf, path->slots[0],
  1530. struct btrfs_file_extent_item);
  1531. num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
  1532. offset;
  1533. btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
  1534. btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
  1535. btrfs_set_file_extent_offset(leaf, fi, 0);
  1536. btrfs_mark_buffer_dirty(leaf);
  1537. goto out;
  1538. }
  1539. btrfs_release_path(path);
  1540. ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
  1541. 0, 0, end - offset, 0, end - offset,
  1542. 0, 0, 0);
  1543. if (ret)
  1544. return ret;
  1545. out:
  1546. btrfs_release_path(path);
  1547. hole_em = alloc_extent_map();
  1548. if (!hole_em) {
  1549. btrfs_drop_extent_cache(inode, offset, end - 1, 0);
  1550. set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
  1551. &BTRFS_I(inode)->runtime_flags);
  1552. } else {
  1553. hole_em->start = offset;
  1554. hole_em->len = end - offset;
  1555. hole_em->orig_start = offset;
  1556. hole_em->block_start = EXTENT_MAP_HOLE;
  1557. hole_em->block_len = 0;
  1558. hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
  1559. hole_em->compress_type = BTRFS_COMPRESS_NONE;
  1560. hole_em->generation = trans->transid;
  1561. do {
  1562. btrfs_drop_extent_cache(inode, offset, end - 1, 0);
  1563. write_lock(&em_tree->lock);
  1564. ret = add_extent_mapping(em_tree, hole_em);
  1565. if (!ret)
  1566. list_move(&hole_em->list,
  1567. &em_tree->modified_extents);
  1568. write_unlock(&em_tree->lock);
  1569. } while (ret == -EEXIST);
  1570. free_extent_map(hole_em);
  1571. if (ret)
  1572. set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
  1573. &BTRFS_I(inode)->runtime_flags);
  1574. }
  1575. return 0;
  1576. }
  1577. static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
  1578. {
  1579. struct btrfs_root *root = BTRFS_I(inode)->root;
  1580. struct extent_state *cached_state = NULL;
  1581. struct btrfs_path *path;
  1582. struct btrfs_block_rsv *rsv;
  1583. struct btrfs_trans_handle *trans;
  1584. u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
  1585. u64 lockstart = (offset + mask) & ~mask;
  1586. u64 lockend = ((offset + len) & ~mask) - 1;
  1587. u64 cur_offset = lockstart;
  1588. u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
  1589. u64 drop_end;
  1590. unsigned long nr;
  1591. int ret = 0;
  1592. int err = 0;
  1593. bool same_page = (offset >> PAGE_CACHE_SHIFT) ==
  1594. ((offset + len) >> PAGE_CACHE_SHIFT);
  1595. btrfs_wait_ordered_range(inode, offset, len);
  1596. mutex_lock(&inode->i_mutex);
  1597. if (offset >= inode->i_size) {
  1598. mutex_unlock(&inode->i_mutex);
  1599. return 0;
  1600. }
  1601. /*
  1602. * Only do this if we are in the same page and we aren't doing the
  1603. * entire page.
  1604. */
  1605. if (same_page && len < PAGE_CACHE_SIZE) {
  1606. ret = btrfs_truncate_page(inode, offset, len, 0);
  1607. mutex_unlock(&inode->i_mutex);
  1608. return ret;
  1609. }
  1610. /* zero back part of the first page */
  1611. ret = btrfs_truncate_page(inode, offset, 0, 0);
  1612. if (ret) {
  1613. mutex_unlock(&inode->i_mutex);
  1614. return ret;
  1615. }
  1616. /* zero the front end of the last page */
  1617. ret = btrfs_truncate_page(inode, offset + len, 0, 1);
  1618. if (ret) {
  1619. mutex_unlock(&inode->i_mutex);
  1620. return ret;
  1621. }
  1622. if (lockend < lockstart) {
  1623. mutex_unlock(&inode->i_mutex);
  1624. return 0;
  1625. }
  1626. while (1) {
  1627. struct btrfs_ordered_extent *ordered;
  1628. truncate_pagecache_range(inode, lockstart, lockend);
  1629. lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
  1630. 0, &cached_state);
  1631. ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
  1632. /*
  1633. * We need to make sure we have no ordered extents in this range
  1634. * and nobody raced in and read a page in this range, if we did
  1635. * we need to try again.
  1636. */
  1637. if ((!ordered ||
  1638. (ordered->file_offset + ordered->len < lockstart ||
  1639. ordered->file_offset > lockend)) &&
  1640. !test_range_bit(&BTRFS_I(inode)->io_tree, lockstart,
  1641. lockend, EXTENT_UPTODATE, 0,
  1642. cached_state)) {
  1643. if (ordered)
  1644. btrfs_put_ordered_extent(ordered);
  1645. break;
  1646. }
  1647. if (ordered)
  1648. btrfs_put_ordered_extent(ordered);
  1649. unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
  1650. lockend, &cached_state, GFP_NOFS);
  1651. btrfs_wait_ordered_range(inode, lockstart,
  1652. lockend - lockstart + 1);
  1653. }
  1654. path = btrfs_alloc_path();
  1655. if (!path) {
  1656. ret = -ENOMEM;
  1657. goto out;
  1658. }
  1659. rsv = btrfs_alloc_block_rsv(root);
  1660. if (!rsv) {
  1661. ret = -ENOMEM;
  1662. goto out_free;
  1663. }
  1664. rsv->size = btrfs_calc_trunc_metadata_size(root, 1);
  1665. rsv->failfast = 1;
  1666. /*
  1667. * 1 - update the inode
  1668. * 1 - removing the extents in the range
  1669. * 1 - adding the hole extent
  1670. */
  1671. trans = btrfs_start_transaction(root, 3);
  1672. if (IS_ERR(trans)) {
  1673. err = PTR_ERR(trans);
  1674. goto out_free;
  1675. }
  1676. ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
  1677. min_size);
  1678. BUG_ON(ret);
  1679. trans->block_rsv = rsv;
  1680. while (cur_offset < lockend) {
  1681. ret = __btrfs_drop_extents(trans, root, inode, path,
  1682. cur_offset, lockend + 1,
  1683. &drop_end, 1);
  1684. if (ret != -ENOSPC)
  1685. break;
  1686. trans->block_rsv = &root->fs_info->trans_block_rsv;
  1687. ret = fill_holes(trans, inode, path, cur_offset, drop_end);
  1688. if (ret) {
  1689. err = ret;
  1690. break;
  1691. }
  1692. cur_offset = drop_end;
  1693. ret = btrfs_update_inode(trans, root, inode);
  1694. if (ret) {
  1695. err = ret;
  1696. break;
  1697. }
  1698. nr = trans->blocks_used;
  1699. btrfs_end_transaction(trans, root);
  1700. btrfs_btree_balance_dirty(root, nr);
  1701. trans = btrfs_start_transaction(root, 3);
  1702. if (IS_ERR(trans)) {
  1703. ret = PTR_ERR(trans);
  1704. trans = NULL;
  1705. break;
  1706. }
  1707. ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
  1708. rsv, min_size);
  1709. BUG_ON(ret); /* shouldn't happen */
  1710. trans->block_rsv = rsv;
  1711. }
  1712. if (ret) {
  1713. err = ret;
  1714. goto out_trans;
  1715. }
  1716. trans->block_rsv = &root->fs_info->trans_block_rsv;
  1717. ret = fill_holes(trans, inode, path, cur_offset, drop_end);
  1718. if (ret) {
  1719. err = ret;
  1720. goto out_trans;
  1721. }
  1722. out_trans:
  1723. if (!trans)
  1724. goto out_free;
  1725. trans->block_rsv = &root->fs_info->trans_block_rsv;
  1726. ret = btrfs_update_inode(trans, root, inode);
  1727. nr = trans->blocks_used;
  1728. btrfs_end_transaction(trans, root);
  1729. btrfs_btree_balance_dirty(root, nr);
  1730. out_free:
  1731. btrfs_free_path(path);
  1732. btrfs_free_block_rsv(root, rsv);
  1733. out:
  1734. unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
  1735. &cached_state, GFP_NOFS);
  1736. mutex_unlock(&inode->i_mutex);
  1737. if (ret && !err)
  1738. err = ret;
  1739. return err;
  1740. }
  1741. static long btrfs_fallocate(struct file *file, int mode,
  1742. loff_t offset, loff_t len)
  1743. {
  1744. struct inode *inode = file->f_path.dentry->d_inode;
  1745. struct extent_state *cached_state = NULL;
  1746. u64 cur_offset;
  1747. u64 last_byte;
  1748. u64 alloc_start;
  1749. u64 alloc_end;
  1750. u64 alloc_hint = 0;
  1751. u64 locked_end;
  1752. u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
  1753. struct extent_map *em;
  1754. int ret;
  1755. alloc_start = offset & ~mask;
  1756. alloc_end = (offset + len + mask) & ~mask;
  1757. /* Make sure we aren't being give some crap mode */
  1758. if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
  1759. return -EOPNOTSUPP;
  1760. if (mode & FALLOC_FL_PUNCH_HOLE)
  1761. return btrfs_punch_hole(inode, offset, len);
  1762. /*
  1763. * Make sure we have enough space before we do the
  1764. * allocation.
  1765. */
  1766. ret = btrfs_check_data_free_space(inode, len);
  1767. if (ret)
  1768. return ret;
  1769. /*
  1770. * wait for ordered IO before we have any locks. We'll loop again
  1771. * below with the locks held.
  1772. */
  1773. btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
  1774. mutex_lock(&inode->i_mutex);
  1775. ret = inode_newsize_ok(inode, alloc_end);
  1776. if (ret)
  1777. goto out;
  1778. if (alloc_start > inode->i_size) {
  1779. ret = btrfs_cont_expand(inode, i_size_read(inode),
  1780. alloc_start);
  1781. if (ret)
  1782. goto out;
  1783. }
  1784. locked_end = alloc_end - 1;
  1785. while (1) {
  1786. struct btrfs_ordered_extent *ordered;
  1787. /* the extent lock is ordered inside the running
  1788. * transaction
  1789. */
  1790. lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
  1791. locked_end, 0, &cached_state);
  1792. ordered = btrfs_lookup_first_ordered_extent(inode,
  1793. alloc_end - 1);
  1794. if (ordered &&
  1795. ordered->file_offset + ordered->len > alloc_start &&
  1796. ordered->file_offset < alloc_end) {
  1797. btrfs_put_ordered_extent(ordered);
  1798. unlock_extent_cached(&BTRFS_I(inode)->io_tree,
  1799. alloc_start, locked_end,
  1800. &cached_state, GFP_NOFS);
  1801. /*
  1802. * we can't wait on the range with the transaction
  1803. * running or with the extent lock held
  1804. */
  1805. btrfs_wait_ordered_range(inode, alloc_start,
  1806. alloc_end - alloc_start);
  1807. } else {
  1808. if (ordered)
  1809. btrfs_put_ordered_extent(ordered);
  1810. break;
  1811. }
  1812. }
  1813. cur_offset = alloc_start;
  1814. while (1) {
  1815. u64 actual_end;
  1816. em = btrfs_get_extent(inode, NULL, 0, cur_offset,
  1817. alloc_end - cur_offset, 0);
  1818. if (IS_ERR_OR_NULL(em)) {
  1819. if (!em)
  1820. ret = -ENOMEM;
  1821. else
  1822. ret = PTR_ERR(em);
  1823. break;
  1824. }
  1825. last_byte = min(extent_map_end(em), alloc_end);
  1826. actual_end = min_t(u64, extent_map_end(em), offset + len);
  1827. last_byte = (last_byte + mask) & ~mask;
  1828. if (em->block_start == EXTENT_MAP_HOLE ||
  1829. (cur_offset >= inode->i_size &&
  1830. !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
  1831. ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
  1832. last_byte - cur_offset,
  1833. 1 << inode->i_blkbits,
  1834. offset + len,
  1835. &alloc_hint);
  1836. if (ret < 0) {
  1837. free_extent_map(em);
  1838. break;
  1839. }
  1840. } else if (actual_end > inode->i_size &&
  1841. !(mode & FALLOC_FL_KEEP_SIZE)) {
  1842. /*
  1843. * We didn't need to allocate any more space, but we
  1844. * still extended the size of the file so we need to
  1845. * update i_size.
  1846. */
  1847. inode->i_ctime = CURRENT_TIME;
  1848. i_size_write(inode, actual_end);
  1849. btrfs_ordered_update_i_size(inode, actual_end, NULL);
  1850. }
  1851. free_extent_map(em);
  1852. cur_offset = last_byte;
  1853. if (cur_offset >= alloc_end) {
  1854. ret = 0;
  1855. break;
  1856. }
  1857. }
  1858. unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
  1859. &cached_state, GFP_NOFS);
  1860. out:
  1861. mutex_unlock(&inode->i_mutex);
  1862. /* Let go of our reservation. */
  1863. btrfs_free_reserved_data_space(inode, len);
  1864. return ret;
  1865. }
  1866. static int find_desired_extent(struct inode *inode, loff_t *offset, int origin)
  1867. {
  1868. struct btrfs_root *root = BTRFS_I(inode)->root;
  1869. struct extent_map *em;
  1870. struct extent_state *cached_state = NULL;
  1871. u64 lockstart = *offset;
  1872. u64 lockend = i_size_read(inode);
  1873. u64 start = *offset;
  1874. u64 orig_start = *offset;
  1875. u64 len = i_size_read(inode);
  1876. u64 last_end = 0;
  1877. int ret = 0;
  1878. lockend = max_t(u64, root->sectorsize, lockend);
  1879. if (lockend <= lockstart)
  1880. lockend = lockstart + root->sectorsize;
  1881. len = lockend - lockstart + 1;
  1882. len = max_t(u64, len, root->sectorsize);
  1883. if (inode->i_size == 0)
  1884. return -ENXIO;
  1885. lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
  1886. &cached_state);
  1887. /*
  1888. * Delalloc is such a pain. If we have a hole and we have pending
  1889. * delalloc for a portion of the hole we will get back a hole that
  1890. * exists for the entire range since it hasn't been actually written
  1891. * yet. So to take care of this case we need to look for an extent just
  1892. * before the position we want in case there is outstanding delalloc
  1893. * going on here.
  1894. */
  1895. if (origin == SEEK_HOLE && start != 0) {
  1896. if (start <= root->sectorsize)
  1897. em = btrfs_get_extent_fiemap(inode, NULL, 0, 0,
  1898. root->sectorsize, 0);
  1899. else
  1900. em = btrfs_get_extent_fiemap(inode, NULL, 0,
  1901. start - root->sectorsize,
  1902. root->sectorsize, 0);
  1903. if (IS_ERR(em)) {
  1904. ret = PTR_ERR(em);
  1905. goto out;
  1906. }
  1907. last_end = em->start + em->len;
  1908. if (em->block_start == EXTENT_MAP_DELALLOC)
  1909. last_end = min_t(u64, last_end, inode->i_size);
  1910. free_extent_map(em);
  1911. }
  1912. while (1) {
  1913. em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
  1914. if (IS_ERR(em)) {
  1915. ret = PTR_ERR(em);
  1916. break;
  1917. }
  1918. if (em->block_start == EXTENT_MAP_HOLE) {
  1919. if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
  1920. if (last_end <= orig_start) {
  1921. free_extent_map(em);
  1922. ret = -ENXIO;
  1923. break;
  1924. }
  1925. }
  1926. if (origin == SEEK_HOLE) {
  1927. *offset = start;
  1928. free_extent_map(em);
  1929. break;
  1930. }
  1931. } else {
  1932. if (origin == SEEK_DATA) {
  1933. if (em->block_start == EXTENT_MAP_DELALLOC) {
  1934. if (start >= inode->i_size) {
  1935. free_extent_map(em);
  1936. ret = -ENXIO;
  1937. break;
  1938. }
  1939. }
  1940. *offset = start;
  1941. free_extent_map(em);
  1942. break;
  1943. }
  1944. }
  1945. start = em->start + em->len;
  1946. last_end = em->start + em->len;
  1947. if (em->block_start == EXTENT_MAP_DELALLOC)
  1948. last_end = min_t(u64, last_end, inode->i_size);
  1949. if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
  1950. free_extent_map(em);
  1951. ret = -ENXIO;
  1952. break;
  1953. }
  1954. free_extent_map(em);
  1955. cond_resched();
  1956. }
  1957. if (!ret)
  1958. *offset = min(*offset, inode->i_size);
  1959. out:
  1960. unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
  1961. &cached_state, GFP_NOFS);
  1962. return ret;
  1963. }
  1964. static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
  1965. {
  1966. struct inode *inode = file->f_mapping->host;
  1967. int ret;
  1968. mutex_lock(&inode->i_mutex);
  1969. switch (origin) {
  1970. case SEEK_END:
  1971. case SEEK_CUR:
  1972. offset = generic_file_llseek(file, offset, origin);
  1973. goto out;
  1974. case SEEK_DATA:
  1975. case SEEK_HOLE:
  1976. if (offset >= i_size_read(inode)) {
  1977. mutex_unlock(&inode->i_mutex);
  1978. return -ENXIO;
  1979. }
  1980. ret = find_desired_extent(inode, &offset, origin);
  1981. if (ret) {
  1982. mutex_unlock(&inode->i_mutex);
  1983. return ret;
  1984. }
  1985. }
  1986. if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) {
  1987. offset = -EINVAL;
  1988. goto out;
  1989. }
  1990. if (offset > inode->i_sb->s_maxbytes) {
  1991. offset = -EINVAL;
  1992. goto out;
  1993. }
  1994. /* Special lock needed here? */
  1995. if (offset != file->f_pos) {
  1996. file->f_pos = offset;
  1997. file->f_version = 0;
  1998. }
  1999. out:
  2000. mutex_unlock(&inode->i_mutex);
  2001. return offset;
  2002. }
  2003. const struct file_operations btrfs_file_operations = {
  2004. .llseek = btrfs_file_llseek,
  2005. .read = do_sync_read,
  2006. .write = do_sync_write,
  2007. .aio_read = generic_file_aio_read,
  2008. .splice_read = generic_file_splice_read,
  2009. .aio_write = btrfs_file_aio_write,
  2010. .mmap = btrfs_file_mmap,
  2011. .open = generic_file_open,
  2012. .release = btrfs_release_file,
  2013. .fsync = btrfs_sync_file,
  2014. .fallocate = btrfs_fallocate,
  2015. .unlocked_ioctl = btrfs_ioctl,
  2016. #ifdef CONFIG_COMPAT
  2017. .compat_ioctl = btrfs_ioctl,
  2018. #endif
  2019. };