file.c 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/highmem.h>
  21. #include <linux/time.h>
  22. #include <linux/init.h>
  23. #include <linux/string.h>
  24. #include <linux/backing-dev.h>
  25. #include <linux/mpage.h>
  26. #include <linux/falloc.h>
  27. #include <linux/swap.h>
  28. #include <linux/writeback.h>
  29. #include <linux/statfs.h>
  30. #include <linux/compat.h>
  31. #include <linux/slab.h>
  32. #include "ctree.h"
  33. #include "disk-io.h"
  34. #include "transaction.h"
  35. #include "btrfs_inode.h"
  36. #include "ioctl.h"
  37. #include "print-tree.h"
  38. #include "tree-log.h"
  39. #include "locking.h"
  40. #include "compat.h"
  41. /*
  42. * when auto defrag is enabled we
  43. * queue up these defrag structs to remember which
  44. * inodes need defragging passes
  45. */
  46. struct inode_defrag {
  47. struct rb_node rb_node;
  48. /* objectid */
  49. u64 ino;
  50. /*
  51. * transid where the defrag was added, we search for
  52. * extents newer than this
  53. */
  54. u64 transid;
  55. /* root objectid */
  56. u64 root;
  57. /* last offset we were able to defrag */
  58. u64 last_offset;
  59. /* if we've wrapped around back to zero once already */
  60. int cycled;
  61. };
  62. /* pop a record for an inode into the defrag tree. The lock
  63. * must be held already
  64. *
  65. * If you're inserting a record for an older transid than an
  66. * existing record, the transid already in the tree is lowered
  67. *
  68. * If an existing record is found the defrag item you
  69. * pass in is freed
  70. */
  71. static void __btrfs_add_inode_defrag(struct inode *inode,
  72. struct inode_defrag *defrag)
  73. {
  74. struct btrfs_root *root = BTRFS_I(inode)->root;
  75. struct inode_defrag *entry;
  76. struct rb_node **p;
  77. struct rb_node *parent = NULL;
  78. p = &root->fs_info->defrag_inodes.rb_node;
  79. while (*p) {
  80. parent = *p;
  81. entry = rb_entry(parent, struct inode_defrag, rb_node);
  82. if (defrag->ino < entry->ino)
  83. p = &parent->rb_left;
  84. else if (defrag->ino > entry->ino)
  85. p = &parent->rb_right;
  86. else {
  87. /* if we're reinserting an entry for
  88. * an old defrag run, make sure to
  89. * lower the transid of our existing record
  90. */
  91. if (defrag->transid < entry->transid)
  92. entry->transid = defrag->transid;
  93. if (defrag->last_offset > entry->last_offset)
  94. entry->last_offset = defrag->last_offset;
  95. goto exists;
  96. }
  97. }
  98. BTRFS_I(inode)->in_defrag = 1;
  99. rb_link_node(&defrag->rb_node, parent, p);
  100. rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
  101. return;
  102. exists:
  103. kfree(defrag);
  104. return;
  105. }
  106. /*
  107. * insert a defrag record for this inode if auto defrag is
  108. * enabled
  109. */
  110. int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
  111. struct inode *inode)
  112. {
  113. struct btrfs_root *root = BTRFS_I(inode)->root;
  114. struct inode_defrag *defrag;
  115. u64 transid;
  116. if (!btrfs_test_opt(root, AUTO_DEFRAG))
  117. return 0;
  118. if (btrfs_fs_closing(root->fs_info))
  119. return 0;
  120. if (BTRFS_I(inode)->in_defrag)
  121. return 0;
  122. if (trans)
  123. transid = trans->transid;
  124. else
  125. transid = BTRFS_I(inode)->root->last_trans;
  126. defrag = kzalloc(sizeof(*defrag), GFP_NOFS);
  127. if (!defrag)
  128. return -ENOMEM;
  129. defrag->ino = btrfs_ino(inode);
  130. defrag->transid = transid;
  131. defrag->root = root->root_key.objectid;
  132. spin_lock(&root->fs_info->defrag_inodes_lock);
  133. if (!BTRFS_I(inode)->in_defrag)
  134. __btrfs_add_inode_defrag(inode, defrag);
  135. else
  136. kfree(defrag);
  137. spin_unlock(&root->fs_info->defrag_inodes_lock);
  138. return 0;
  139. }
  140. /*
  141. * must be called with the defrag_inodes lock held
  142. */
  143. struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info, u64 ino,
  144. struct rb_node **next)
  145. {
  146. struct inode_defrag *entry = NULL;
  147. struct rb_node *p;
  148. struct rb_node *parent = NULL;
  149. p = info->defrag_inodes.rb_node;
  150. while (p) {
  151. parent = p;
  152. entry = rb_entry(parent, struct inode_defrag, rb_node);
  153. if (ino < entry->ino)
  154. p = parent->rb_left;
  155. else if (ino > entry->ino)
  156. p = parent->rb_right;
  157. else
  158. return entry;
  159. }
  160. if (next) {
  161. while (parent && ino > entry->ino) {
  162. parent = rb_next(parent);
  163. entry = rb_entry(parent, struct inode_defrag, rb_node);
  164. }
  165. *next = parent;
  166. }
  167. return NULL;
  168. }
  169. /*
  170. * run through the list of inodes in the FS that need
  171. * defragging
  172. */
  173. int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
  174. {
  175. struct inode_defrag *defrag;
  176. struct btrfs_root *inode_root;
  177. struct inode *inode;
  178. struct rb_node *n;
  179. struct btrfs_key key;
  180. struct btrfs_ioctl_defrag_range_args range;
  181. u64 first_ino = 0;
  182. int num_defrag;
  183. int defrag_batch = 1024;
  184. memset(&range, 0, sizeof(range));
  185. range.len = (u64)-1;
  186. atomic_inc(&fs_info->defrag_running);
  187. spin_lock(&fs_info->defrag_inodes_lock);
  188. while(1) {
  189. n = NULL;
  190. /* find an inode to defrag */
  191. defrag = btrfs_find_defrag_inode(fs_info, first_ino, &n);
  192. if (!defrag) {
  193. if (n)
  194. defrag = rb_entry(n, struct inode_defrag, rb_node);
  195. else if (first_ino) {
  196. first_ino = 0;
  197. continue;
  198. } else {
  199. break;
  200. }
  201. }
  202. /* remove it from the rbtree */
  203. first_ino = defrag->ino + 1;
  204. rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
  205. if (btrfs_fs_closing(fs_info))
  206. goto next_free;
  207. spin_unlock(&fs_info->defrag_inodes_lock);
  208. /* get the inode */
  209. key.objectid = defrag->root;
  210. btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
  211. key.offset = (u64)-1;
  212. inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
  213. if (IS_ERR(inode_root))
  214. goto next;
  215. key.objectid = defrag->ino;
  216. btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
  217. key.offset = 0;
  218. inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
  219. if (IS_ERR(inode))
  220. goto next;
  221. /* do a chunk of defrag */
  222. BTRFS_I(inode)->in_defrag = 0;
  223. range.start = defrag->last_offset;
  224. num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
  225. defrag_batch);
  226. /*
  227. * if we filled the whole defrag batch, there
  228. * must be more work to do. Queue this defrag
  229. * again
  230. */
  231. if (num_defrag == defrag_batch) {
  232. defrag->last_offset = range.start;
  233. __btrfs_add_inode_defrag(inode, defrag);
  234. /*
  235. * we don't want to kfree defrag, we added it back to
  236. * the rbtree
  237. */
  238. defrag = NULL;
  239. } else if (defrag->last_offset && !defrag->cycled) {
  240. /*
  241. * we didn't fill our defrag batch, but
  242. * we didn't start at zero. Make sure we loop
  243. * around to the start of the file.
  244. */
  245. defrag->last_offset = 0;
  246. defrag->cycled = 1;
  247. __btrfs_add_inode_defrag(inode, defrag);
  248. defrag = NULL;
  249. }
  250. iput(inode);
  251. next:
  252. spin_lock(&fs_info->defrag_inodes_lock);
  253. next_free:
  254. kfree(defrag);
  255. }
  256. spin_unlock(&fs_info->defrag_inodes_lock);
  257. atomic_dec(&fs_info->defrag_running);
  258. /*
  259. * during unmount, we use the transaction_wait queue to
  260. * wait for the defragger to stop
  261. */
  262. wake_up(&fs_info->transaction_wait);
  263. return 0;
  264. }
  265. /* simple helper to fault in pages and copy. This should go away
  266. * and be replaced with calls into generic code.
  267. */
  268. static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
  269. size_t write_bytes,
  270. struct page **prepared_pages,
  271. struct iov_iter *i)
  272. {
  273. size_t copied = 0;
  274. size_t total_copied = 0;
  275. int pg = 0;
  276. int offset = pos & (PAGE_CACHE_SIZE - 1);
  277. while (write_bytes > 0) {
  278. size_t count = min_t(size_t,
  279. PAGE_CACHE_SIZE - offset, write_bytes);
  280. struct page *page = prepared_pages[pg];
  281. /*
  282. * Copy data from userspace to the current page
  283. *
  284. * Disable pagefault to avoid recursive lock since
  285. * the pages are already locked
  286. */
  287. pagefault_disable();
  288. copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
  289. pagefault_enable();
  290. /* Flush processor's dcache for this page */
  291. flush_dcache_page(page);
  292. /*
  293. * if we get a partial write, we can end up with
  294. * partially up to date pages. These add
  295. * a lot of complexity, so make sure they don't
  296. * happen by forcing this copy to be retried.
  297. *
  298. * The rest of the btrfs_file_write code will fall
  299. * back to page at a time copies after we return 0.
  300. */
  301. if (!PageUptodate(page) && copied < count)
  302. copied = 0;
  303. iov_iter_advance(i, copied);
  304. write_bytes -= copied;
  305. total_copied += copied;
  306. /* Return to btrfs_file_aio_write to fault page */
  307. if (unlikely(copied == 0))
  308. break;
  309. if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
  310. offset += copied;
  311. } else {
  312. pg++;
  313. offset = 0;
  314. }
  315. }
  316. return total_copied;
  317. }
  318. /*
  319. * unlocks pages after btrfs_file_write is done with them
  320. */
  321. void btrfs_drop_pages(struct page **pages, size_t num_pages)
  322. {
  323. size_t i;
  324. for (i = 0; i < num_pages; i++) {
  325. /* page checked is some magic around finding pages that
  326. * have been modified without going through btrfs_set_page_dirty
  327. * clear it here
  328. */
  329. ClearPageChecked(pages[i]);
  330. unlock_page(pages[i]);
  331. mark_page_accessed(pages[i]);
  332. page_cache_release(pages[i]);
  333. }
  334. }
  335. /*
  336. * after copy_from_user, pages need to be dirtied and we need to make
  337. * sure holes are created between the current EOF and the start of
  338. * any next extents (if required).
  339. *
  340. * this also makes the decision about creating an inline extent vs
  341. * doing real data extents, marking pages dirty and delalloc as required.
  342. */
  343. int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
  344. struct page **pages, size_t num_pages,
  345. loff_t pos, size_t write_bytes,
  346. struct extent_state **cached)
  347. {
  348. int err = 0;
  349. int i;
  350. u64 num_bytes;
  351. u64 start_pos;
  352. u64 end_of_last_block;
  353. u64 end_pos = pos + write_bytes;
  354. loff_t isize = i_size_read(inode);
  355. start_pos = pos & ~((u64)root->sectorsize - 1);
  356. num_bytes = (write_bytes + pos - start_pos +
  357. root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
  358. end_of_last_block = start_pos + num_bytes - 1;
  359. err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
  360. cached);
  361. if (err)
  362. return err;
  363. for (i = 0; i < num_pages; i++) {
  364. struct page *p = pages[i];
  365. SetPageUptodate(p);
  366. ClearPageChecked(p);
  367. set_page_dirty(p);
  368. }
  369. /*
  370. * we've only changed i_size in ram, and we haven't updated
  371. * the disk i_size. There is no need to log the inode
  372. * at this time.
  373. */
  374. if (end_pos > isize)
  375. i_size_write(inode, end_pos);
  376. return 0;
  377. }
  378. /*
  379. * this drops all the extents in the cache that intersect the range
  380. * [start, end]. Existing extents are split as required.
  381. */
  382. int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
  383. int skip_pinned)
  384. {
  385. struct extent_map *em;
  386. struct extent_map *split = NULL;
  387. struct extent_map *split2 = NULL;
  388. struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
  389. u64 len = end - start + 1;
  390. int ret;
  391. int testend = 1;
  392. unsigned long flags;
  393. int compressed = 0;
  394. WARN_ON(end < start);
  395. if (end == (u64)-1) {
  396. len = (u64)-1;
  397. testend = 0;
  398. }
  399. while (1) {
  400. if (!split)
  401. split = alloc_extent_map();
  402. if (!split2)
  403. split2 = alloc_extent_map();
  404. BUG_ON(!split || !split2); /* -ENOMEM */
  405. write_lock(&em_tree->lock);
  406. em = lookup_extent_mapping(em_tree, start, len);
  407. if (!em) {
  408. write_unlock(&em_tree->lock);
  409. break;
  410. }
  411. flags = em->flags;
  412. if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
  413. if (testend && em->start + em->len >= start + len) {
  414. free_extent_map(em);
  415. write_unlock(&em_tree->lock);
  416. break;
  417. }
  418. start = em->start + em->len;
  419. if (testend)
  420. len = start + len - (em->start + em->len);
  421. free_extent_map(em);
  422. write_unlock(&em_tree->lock);
  423. continue;
  424. }
  425. compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
  426. clear_bit(EXTENT_FLAG_PINNED, &em->flags);
  427. remove_extent_mapping(em_tree, em);
  428. if (em->block_start < EXTENT_MAP_LAST_BYTE &&
  429. em->start < start) {
  430. split->start = em->start;
  431. split->len = start - em->start;
  432. split->orig_start = em->orig_start;
  433. split->block_start = em->block_start;
  434. if (compressed)
  435. split->block_len = em->block_len;
  436. else
  437. split->block_len = split->len;
  438. split->bdev = em->bdev;
  439. split->flags = flags;
  440. split->compress_type = em->compress_type;
  441. ret = add_extent_mapping(em_tree, split);
  442. BUG_ON(ret); /* Logic error */
  443. free_extent_map(split);
  444. split = split2;
  445. split2 = NULL;
  446. }
  447. if (em->block_start < EXTENT_MAP_LAST_BYTE &&
  448. testend && em->start + em->len > start + len) {
  449. u64 diff = start + len - em->start;
  450. split->start = start + len;
  451. split->len = em->start + em->len - (start + len);
  452. split->bdev = em->bdev;
  453. split->flags = flags;
  454. split->compress_type = em->compress_type;
  455. if (compressed) {
  456. split->block_len = em->block_len;
  457. split->block_start = em->block_start;
  458. split->orig_start = em->orig_start;
  459. } else {
  460. split->block_len = split->len;
  461. split->block_start = em->block_start + diff;
  462. split->orig_start = split->start;
  463. }
  464. ret = add_extent_mapping(em_tree, split);
  465. BUG_ON(ret); /* Logic error */
  466. free_extent_map(split);
  467. split = NULL;
  468. }
  469. write_unlock(&em_tree->lock);
  470. /* once for us */
  471. free_extent_map(em);
  472. /* once for the tree*/
  473. free_extent_map(em);
  474. }
  475. if (split)
  476. free_extent_map(split);
  477. if (split2)
  478. free_extent_map(split2);
  479. return 0;
  480. }
  481. /*
  482. * this is very complex, but the basic idea is to drop all extents
  483. * in the range start - end. hint_block is filled in with a block number
  484. * that would be a good hint to the block allocator for this file.
  485. *
  486. * If an extent intersects the range but is not entirely inside the range
  487. * it is either truncated or split. Anything entirely inside the range
  488. * is deleted from the tree.
  489. */
  490. int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
  491. u64 start, u64 end, u64 *hint_byte, int drop_cache)
  492. {
  493. struct btrfs_root *root = BTRFS_I(inode)->root;
  494. struct extent_buffer *leaf;
  495. struct btrfs_file_extent_item *fi;
  496. struct btrfs_path *path;
  497. struct btrfs_key key;
  498. struct btrfs_key new_key;
  499. u64 ino = btrfs_ino(inode);
  500. u64 search_start = start;
  501. u64 disk_bytenr = 0;
  502. u64 num_bytes = 0;
  503. u64 extent_offset = 0;
  504. u64 extent_end = 0;
  505. int del_nr = 0;
  506. int del_slot = 0;
  507. int extent_type;
  508. int recow;
  509. int ret;
  510. int modify_tree = -1;
  511. if (drop_cache)
  512. btrfs_drop_extent_cache(inode, start, end - 1, 0);
  513. path = btrfs_alloc_path();
  514. if (!path)
  515. return -ENOMEM;
  516. if (start >= BTRFS_I(inode)->disk_i_size)
  517. modify_tree = 0;
  518. while (1) {
  519. recow = 0;
  520. ret = btrfs_lookup_file_extent(trans, root, path, ino,
  521. search_start, modify_tree);
  522. if (ret < 0)
  523. break;
  524. if (ret > 0 && path->slots[0] > 0 && search_start == start) {
  525. leaf = path->nodes[0];
  526. btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
  527. if (key.objectid == ino &&
  528. key.type == BTRFS_EXTENT_DATA_KEY)
  529. path->slots[0]--;
  530. }
  531. ret = 0;
  532. next_slot:
  533. leaf = path->nodes[0];
  534. if (path->slots[0] >= btrfs_header_nritems(leaf)) {
  535. BUG_ON(del_nr > 0);
  536. ret = btrfs_next_leaf(root, path);
  537. if (ret < 0)
  538. break;
  539. if (ret > 0) {
  540. ret = 0;
  541. break;
  542. }
  543. leaf = path->nodes[0];
  544. recow = 1;
  545. }
  546. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  547. if (key.objectid > ino ||
  548. key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
  549. break;
  550. fi = btrfs_item_ptr(leaf, path->slots[0],
  551. struct btrfs_file_extent_item);
  552. extent_type = btrfs_file_extent_type(leaf, fi);
  553. if (extent_type == BTRFS_FILE_EXTENT_REG ||
  554. extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
  555. disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
  556. num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
  557. extent_offset = btrfs_file_extent_offset(leaf, fi);
  558. extent_end = key.offset +
  559. btrfs_file_extent_num_bytes(leaf, fi);
  560. } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
  561. extent_end = key.offset +
  562. btrfs_file_extent_inline_len(leaf, fi);
  563. } else {
  564. WARN_ON(1);
  565. extent_end = search_start;
  566. }
  567. if (extent_end <= search_start) {
  568. path->slots[0]++;
  569. goto next_slot;
  570. }
  571. search_start = max(key.offset, start);
  572. if (recow || !modify_tree) {
  573. modify_tree = -1;
  574. btrfs_release_path(path);
  575. continue;
  576. }
  577. /*
  578. * | - range to drop - |
  579. * | -------- extent -------- |
  580. */
  581. if (start > key.offset && end < extent_end) {
  582. BUG_ON(del_nr > 0);
  583. BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
  584. memcpy(&new_key, &key, sizeof(new_key));
  585. new_key.offset = start;
  586. ret = btrfs_duplicate_item(trans, root, path,
  587. &new_key);
  588. if (ret == -EAGAIN) {
  589. btrfs_release_path(path);
  590. continue;
  591. }
  592. if (ret < 0)
  593. break;
  594. leaf = path->nodes[0];
  595. fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
  596. struct btrfs_file_extent_item);
  597. btrfs_set_file_extent_num_bytes(leaf, fi,
  598. start - key.offset);
  599. fi = btrfs_item_ptr(leaf, path->slots[0],
  600. struct btrfs_file_extent_item);
  601. extent_offset += start - key.offset;
  602. btrfs_set_file_extent_offset(leaf, fi, extent_offset);
  603. btrfs_set_file_extent_num_bytes(leaf, fi,
  604. extent_end - start);
  605. btrfs_mark_buffer_dirty(leaf);
  606. if (disk_bytenr > 0) {
  607. ret = btrfs_inc_extent_ref(trans, root,
  608. disk_bytenr, num_bytes, 0,
  609. root->root_key.objectid,
  610. new_key.objectid,
  611. start - extent_offset, 0);
  612. BUG_ON(ret); /* -ENOMEM */
  613. *hint_byte = disk_bytenr;
  614. }
  615. key.offset = start;
  616. }
  617. /*
  618. * | ---- range to drop ----- |
  619. * | -------- extent -------- |
  620. */
  621. if (start <= key.offset && end < extent_end) {
  622. BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
  623. memcpy(&new_key, &key, sizeof(new_key));
  624. new_key.offset = end;
  625. btrfs_set_item_key_safe(trans, root, path, &new_key);
  626. extent_offset += end - key.offset;
  627. btrfs_set_file_extent_offset(leaf, fi, extent_offset);
  628. btrfs_set_file_extent_num_bytes(leaf, fi,
  629. extent_end - end);
  630. btrfs_mark_buffer_dirty(leaf);
  631. if (disk_bytenr > 0) {
  632. inode_sub_bytes(inode, end - key.offset);
  633. *hint_byte = disk_bytenr;
  634. }
  635. break;
  636. }
  637. search_start = extent_end;
  638. /*
  639. * | ---- range to drop ----- |
  640. * | -------- extent -------- |
  641. */
  642. if (start > key.offset && end >= extent_end) {
  643. BUG_ON(del_nr > 0);
  644. BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
  645. btrfs_set_file_extent_num_bytes(leaf, fi,
  646. start - key.offset);
  647. btrfs_mark_buffer_dirty(leaf);
  648. if (disk_bytenr > 0) {
  649. inode_sub_bytes(inode, extent_end - start);
  650. *hint_byte = disk_bytenr;
  651. }
  652. if (end == extent_end)
  653. break;
  654. path->slots[0]++;
  655. goto next_slot;
  656. }
  657. /*
  658. * | ---- range to drop ----- |
  659. * | ------ extent ------ |
  660. */
  661. if (start <= key.offset && end >= extent_end) {
  662. if (del_nr == 0) {
  663. del_slot = path->slots[0];
  664. del_nr = 1;
  665. } else {
  666. BUG_ON(del_slot + del_nr != path->slots[0]);
  667. del_nr++;
  668. }
  669. if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
  670. inode_sub_bytes(inode,
  671. extent_end - key.offset);
  672. extent_end = ALIGN(extent_end,
  673. root->sectorsize);
  674. } else if (disk_bytenr > 0) {
  675. ret = btrfs_free_extent(trans, root,
  676. disk_bytenr, num_bytes, 0,
  677. root->root_key.objectid,
  678. key.objectid, key.offset -
  679. extent_offset, 0);
  680. BUG_ON(ret); /* -ENOMEM */
  681. inode_sub_bytes(inode,
  682. extent_end - key.offset);
  683. *hint_byte = disk_bytenr;
  684. }
  685. if (end == extent_end)
  686. break;
  687. if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
  688. path->slots[0]++;
  689. goto next_slot;
  690. }
  691. ret = btrfs_del_items(trans, root, path, del_slot,
  692. del_nr);
  693. if (ret) {
  694. btrfs_abort_transaction(trans, root, ret);
  695. goto out;
  696. }
  697. del_nr = 0;
  698. del_slot = 0;
  699. btrfs_release_path(path);
  700. continue;
  701. }
  702. BUG_ON(1);
  703. }
  704. if (!ret && del_nr > 0) {
  705. ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
  706. if (ret)
  707. btrfs_abort_transaction(trans, root, ret);
  708. }
  709. out:
  710. btrfs_free_path(path);
  711. return ret;
  712. }
  713. static int extent_mergeable(struct extent_buffer *leaf, int slot,
  714. u64 objectid, u64 bytenr, u64 orig_offset,
  715. u64 *start, u64 *end)
  716. {
  717. struct btrfs_file_extent_item *fi;
  718. struct btrfs_key key;
  719. u64 extent_end;
  720. if (slot < 0 || slot >= btrfs_header_nritems(leaf))
  721. return 0;
  722. btrfs_item_key_to_cpu(leaf, &key, slot);
  723. if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
  724. return 0;
  725. fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
  726. if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
  727. btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
  728. btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
  729. btrfs_file_extent_compression(leaf, fi) ||
  730. btrfs_file_extent_encryption(leaf, fi) ||
  731. btrfs_file_extent_other_encoding(leaf, fi))
  732. return 0;
  733. extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
  734. if ((*start && *start != key.offset) || (*end && *end != extent_end))
  735. return 0;
  736. *start = key.offset;
  737. *end = extent_end;
  738. return 1;
  739. }
  740. /*
  741. * Mark extent in the range start - end as written.
  742. *
  743. * This changes extent type from 'pre-allocated' to 'regular'. If only
  744. * part of extent is marked as written, the extent will be split into
  745. * two or three.
  746. */
  747. int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
  748. struct inode *inode, u64 start, u64 end)
  749. {
  750. struct btrfs_root *root = BTRFS_I(inode)->root;
  751. struct extent_buffer *leaf;
  752. struct btrfs_path *path;
  753. struct btrfs_file_extent_item *fi;
  754. struct btrfs_key key;
  755. struct btrfs_key new_key;
  756. u64 bytenr;
  757. u64 num_bytes;
  758. u64 extent_end;
  759. u64 orig_offset;
  760. u64 other_start;
  761. u64 other_end;
  762. u64 split;
  763. int del_nr = 0;
  764. int del_slot = 0;
  765. int recow;
  766. int ret;
  767. u64 ino = btrfs_ino(inode);
  768. btrfs_drop_extent_cache(inode, start, end - 1, 0);
  769. path = btrfs_alloc_path();
  770. if (!path)
  771. return -ENOMEM;
  772. again:
  773. recow = 0;
  774. split = start;
  775. key.objectid = ino;
  776. key.type = BTRFS_EXTENT_DATA_KEY;
  777. key.offset = split;
  778. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  779. if (ret < 0)
  780. goto out;
  781. if (ret > 0 && path->slots[0] > 0)
  782. path->slots[0]--;
  783. leaf = path->nodes[0];
  784. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  785. BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
  786. fi = btrfs_item_ptr(leaf, path->slots[0],
  787. struct btrfs_file_extent_item);
  788. BUG_ON(btrfs_file_extent_type(leaf, fi) !=
  789. BTRFS_FILE_EXTENT_PREALLOC);
  790. extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
  791. BUG_ON(key.offset > start || extent_end < end);
  792. bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
  793. num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
  794. orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
  795. memcpy(&new_key, &key, sizeof(new_key));
  796. if (start == key.offset && end < extent_end) {
  797. other_start = 0;
  798. other_end = start;
  799. if (extent_mergeable(leaf, path->slots[0] - 1,
  800. ino, bytenr, orig_offset,
  801. &other_start, &other_end)) {
  802. new_key.offset = end;
  803. btrfs_set_item_key_safe(trans, root, path, &new_key);
  804. fi = btrfs_item_ptr(leaf, path->slots[0],
  805. struct btrfs_file_extent_item);
  806. btrfs_set_file_extent_num_bytes(leaf, fi,
  807. extent_end - end);
  808. btrfs_set_file_extent_offset(leaf, fi,
  809. end - orig_offset);
  810. fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
  811. struct btrfs_file_extent_item);
  812. btrfs_set_file_extent_num_bytes(leaf, fi,
  813. end - other_start);
  814. btrfs_mark_buffer_dirty(leaf);
  815. goto out;
  816. }
  817. }
  818. if (start > key.offset && end == extent_end) {
  819. other_start = end;
  820. other_end = 0;
  821. if (extent_mergeable(leaf, path->slots[0] + 1,
  822. ino, bytenr, orig_offset,
  823. &other_start, &other_end)) {
  824. fi = btrfs_item_ptr(leaf, path->slots[0],
  825. struct btrfs_file_extent_item);
  826. btrfs_set_file_extent_num_bytes(leaf, fi,
  827. start - key.offset);
  828. path->slots[0]++;
  829. new_key.offset = start;
  830. btrfs_set_item_key_safe(trans, root, path, &new_key);
  831. fi = btrfs_item_ptr(leaf, path->slots[0],
  832. struct btrfs_file_extent_item);
  833. btrfs_set_file_extent_num_bytes(leaf, fi,
  834. other_end - start);
  835. btrfs_set_file_extent_offset(leaf, fi,
  836. start - orig_offset);
  837. btrfs_mark_buffer_dirty(leaf);
  838. goto out;
  839. }
  840. }
  841. while (start > key.offset || end < extent_end) {
  842. if (key.offset == start)
  843. split = end;
  844. new_key.offset = split;
  845. ret = btrfs_duplicate_item(trans, root, path, &new_key);
  846. if (ret == -EAGAIN) {
  847. btrfs_release_path(path);
  848. goto again;
  849. }
  850. if (ret < 0) {
  851. btrfs_abort_transaction(trans, root, ret);
  852. goto out;
  853. }
  854. leaf = path->nodes[0];
  855. fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
  856. struct btrfs_file_extent_item);
  857. btrfs_set_file_extent_num_bytes(leaf, fi,
  858. split - key.offset);
  859. fi = btrfs_item_ptr(leaf, path->slots[0],
  860. struct btrfs_file_extent_item);
  861. btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
  862. btrfs_set_file_extent_num_bytes(leaf, fi,
  863. extent_end - split);
  864. btrfs_mark_buffer_dirty(leaf);
  865. ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
  866. root->root_key.objectid,
  867. ino, orig_offset, 0);
  868. BUG_ON(ret); /* -ENOMEM */
  869. if (split == start) {
  870. key.offset = start;
  871. } else {
  872. BUG_ON(start != key.offset);
  873. path->slots[0]--;
  874. extent_end = end;
  875. }
  876. recow = 1;
  877. }
  878. other_start = end;
  879. other_end = 0;
  880. if (extent_mergeable(leaf, path->slots[0] + 1,
  881. ino, bytenr, orig_offset,
  882. &other_start, &other_end)) {
  883. if (recow) {
  884. btrfs_release_path(path);
  885. goto again;
  886. }
  887. extent_end = other_end;
  888. del_slot = path->slots[0] + 1;
  889. del_nr++;
  890. ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
  891. 0, root->root_key.objectid,
  892. ino, orig_offset, 0);
  893. BUG_ON(ret); /* -ENOMEM */
  894. }
  895. other_start = 0;
  896. other_end = start;
  897. if (extent_mergeable(leaf, path->slots[0] - 1,
  898. ino, bytenr, orig_offset,
  899. &other_start, &other_end)) {
  900. if (recow) {
  901. btrfs_release_path(path);
  902. goto again;
  903. }
  904. key.offset = other_start;
  905. del_slot = path->slots[0];
  906. del_nr++;
  907. ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
  908. 0, root->root_key.objectid,
  909. ino, orig_offset, 0);
  910. BUG_ON(ret); /* -ENOMEM */
  911. }
  912. if (del_nr == 0) {
  913. fi = btrfs_item_ptr(leaf, path->slots[0],
  914. struct btrfs_file_extent_item);
  915. btrfs_set_file_extent_type(leaf, fi,
  916. BTRFS_FILE_EXTENT_REG);
  917. btrfs_mark_buffer_dirty(leaf);
  918. } else {
  919. fi = btrfs_item_ptr(leaf, del_slot - 1,
  920. struct btrfs_file_extent_item);
  921. btrfs_set_file_extent_type(leaf, fi,
  922. BTRFS_FILE_EXTENT_REG);
  923. btrfs_set_file_extent_num_bytes(leaf, fi,
  924. extent_end - key.offset);
  925. btrfs_mark_buffer_dirty(leaf);
  926. ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
  927. if (ret < 0) {
  928. btrfs_abort_transaction(trans, root, ret);
  929. goto out;
  930. }
  931. }
  932. out:
  933. btrfs_free_path(path);
  934. return 0;
  935. }
  936. /*
  937. * on error we return an unlocked page and the error value
  938. * on success we return a locked page and 0
  939. */
  940. static int prepare_uptodate_page(struct page *page, u64 pos,
  941. bool force_uptodate)
  942. {
  943. int ret = 0;
  944. if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
  945. !PageUptodate(page)) {
  946. ret = btrfs_readpage(NULL, page);
  947. if (ret)
  948. return ret;
  949. lock_page(page);
  950. if (!PageUptodate(page)) {
  951. unlock_page(page);
  952. return -EIO;
  953. }
  954. }
  955. return 0;
  956. }
  957. /*
  958. * this gets pages into the page cache and locks them down, it also properly
  959. * waits for data=ordered extents to finish before allowing the pages to be
  960. * modified.
  961. */
  962. static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
  963. struct page **pages, size_t num_pages,
  964. loff_t pos, unsigned long first_index,
  965. size_t write_bytes, bool force_uptodate)
  966. {
  967. struct extent_state *cached_state = NULL;
  968. int i;
  969. unsigned long index = pos >> PAGE_CACHE_SHIFT;
  970. struct inode *inode = fdentry(file)->d_inode;
  971. gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
  972. int err = 0;
  973. int faili = 0;
  974. u64 start_pos;
  975. u64 last_pos;
  976. start_pos = pos & ~((u64)root->sectorsize - 1);
  977. last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
  978. again:
  979. for (i = 0; i < num_pages; i++) {
  980. pages[i] = find_or_create_page(inode->i_mapping, index + i,
  981. mask | __GFP_WRITE);
  982. if (!pages[i]) {
  983. faili = i - 1;
  984. err = -ENOMEM;
  985. goto fail;
  986. }
  987. if (i == 0)
  988. err = prepare_uptodate_page(pages[i], pos,
  989. force_uptodate);
  990. if (i == num_pages - 1)
  991. err = prepare_uptodate_page(pages[i],
  992. pos + write_bytes, false);
  993. if (err) {
  994. page_cache_release(pages[i]);
  995. faili = i - 1;
  996. goto fail;
  997. }
  998. wait_on_page_writeback(pages[i]);
  999. }
  1000. err = 0;
  1001. if (start_pos < inode->i_size) {
  1002. struct btrfs_ordered_extent *ordered;
  1003. lock_extent_bits(&BTRFS_I(inode)->io_tree,
  1004. start_pos, last_pos - 1, 0, &cached_state);
  1005. ordered = btrfs_lookup_first_ordered_extent(inode,
  1006. last_pos - 1);
  1007. if (ordered &&
  1008. ordered->file_offset + ordered->len > start_pos &&
  1009. ordered->file_offset < last_pos) {
  1010. btrfs_put_ordered_extent(ordered);
  1011. unlock_extent_cached(&BTRFS_I(inode)->io_tree,
  1012. start_pos, last_pos - 1,
  1013. &cached_state, GFP_NOFS);
  1014. for (i = 0; i < num_pages; i++) {
  1015. unlock_page(pages[i]);
  1016. page_cache_release(pages[i]);
  1017. }
  1018. btrfs_wait_ordered_range(inode, start_pos,
  1019. last_pos - start_pos);
  1020. goto again;
  1021. }
  1022. if (ordered)
  1023. btrfs_put_ordered_extent(ordered);
  1024. clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
  1025. last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
  1026. EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
  1027. GFP_NOFS);
  1028. unlock_extent_cached(&BTRFS_I(inode)->io_tree,
  1029. start_pos, last_pos - 1, &cached_state,
  1030. GFP_NOFS);
  1031. }
  1032. for (i = 0; i < num_pages; i++) {
  1033. if (clear_page_dirty_for_io(pages[i]))
  1034. account_page_redirty(pages[i]);
  1035. set_page_extent_mapped(pages[i]);
  1036. WARN_ON(!PageLocked(pages[i]));
  1037. }
  1038. return 0;
  1039. fail:
  1040. while (faili >= 0) {
  1041. unlock_page(pages[faili]);
  1042. page_cache_release(pages[faili]);
  1043. faili--;
  1044. }
  1045. return err;
  1046. }
  1047. static noinline ssize_t __btrfs_buffered_write(struct file *file,
  1048. struct iov_iter *i,
  1049. loff_t pos)
  1050. {
  1051. struct inode *inode = fdentry(file)->d_inode;
  1052. struct btrfs_root *root = BTRFS_I(inode)->root;
  1053. struct page **pages = NULL;
  1054. unsigned long first_index;
  1055. size_t num_written = 0;
  1056. int nrptrs;
  1057. int ret = 0;
  1058. bool force_page_uptodate = false;
  1059. nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
  1060. PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
  1061. (sizeof(struct page *)));
  1062. nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
  1063. nrptrs = max(nrptrs, 8);
  1064. pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
  1065. if (!pages)
  1066. return -ENOMEM;
  1067. first_index = pos >> PAGE_CACHE_SHIFT;
  1068. while (iov_iter_count(i) > 0) {
  1069. size_t offset = pos & (PAGE_CACHE_SIZE - 1);
  1070. size_t write_bytes = min(iov_iter_count(i),
  1071. nrptrs * (size_t)PAGE_CACHE_SIZE -
  1072. offset);
  1073. size_t num_pages = (write_bytes + offset +
  1074. PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  1075. size_t dirty_pages;
  1076. size_t copied;
  1077. WARN_ON(num_pages > nrptrs);
  1078. /*
  1079. * Fault pages before locking them in prepare_pages
  1080. * to avoid recursive lock
  1081. */
  1082. if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
  1083. ret = -EFAULT;
  1084. break;
  1085. }
  1086. ret = btrfs_delalloc_reserve_space(inode,
  1087. num_pages << PAGE_CACHE_SHIFT);
  1088. if (ret)
  1089. break;
  1090. /*
  1091. * This is going to setup the pages array with the number of
  1092. * pages we want, so we don't really need to worry about the
  1093. * contents of pages from loop to loop
  1094. */
  1095. ret = prepare_pages(root, file, pages, num_pages,
  1096. pos, first_index, write_bytes,
  1097. force_page_uptodate);
  1098. if (ret) {
  1099. btrfs_delalloc_release_space(inode,
  1100. num_pages << PAGE_CACHE_SHIFT);
  1101. break;
  1102. }
  1103. copied = btrfs_copy_from_user(pos, num_pages,
  1104. write_bytes, pages, i);
  1105. /*
  1106. * if we have trouble faulting in the pages, fall
  1107. * back to one page at a time
  1108. */
  1109. if (copied < write_bytes)
  1110. nrptrs = 1;
  1111. if (copied == 0) {
  1112. force_page_uptodate = true;
  1113. dirty_pages = 0;
  1114. } else {
  1115. force_page_uptodate = false;
  1116. dirty_pages = (copied + offset +
  1117. PAGE_CACHE_SIZE - 1) >>
  1118. PAGE_CACHE_SHIFT;
  1119. }
  1120. /*
  1121. * If we had a short copy we need to release the excess delaloc
  1122. * bytes we reserved. We need to increment outstanding_extents
  1123. * because btrfs_delalloc_release_space will decrement it, but
  1124. * we still have an outstanding extent for the chunk we actually
  1125. * managed to copy.
  1126. */
  1127. if (num_pages > dirty_pages) {
  1128. if (copied > 0) {
  1129. spin_lock(&BTRFS_I(inode)->lock);
  1130. BTRFS_I(inode)->outstanding_extents++;
  1131. spin_unlock(&BTRFS_I(inode)->lock);
  1132. }
  1133. btrfs_delalloc_release_space(inode,
  1134. (num_pages - dirty_pages) <<
  1135. PAGE_CACHE_SHIFT);
  1136. }
  1137. if (copied > 0) {
  1138. ret = btrfs_dirty_pages(root, inode, pages,
  1139. dirty_pages, pos, copied,
  1140. NULL);
  1141. if (ret) {
  1142. btrfs_delalloc_release_space(inode,
  1143. dirty_pages << PAGE_CACHE_SHIFT);
  1144. btrfs_drop_pages(pages, num_pages);
  1145. break;
  1146. }
  1147. }
  1148. btrfs_drop_pages(pages, num_pages);
  1149. cond_resched();
  1150. balance_dirty_pages_ratelimited_nr(inode->i_mapping,
  1151. dirty_pages);
  1152. if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
  1153. btrfs_btree_balance_dirty(root, 1);
  1154. pos += copied;
  1155. num_written += copied;
  1156. }
  1157. kfree(pages);
  1158. return num_written ? num_written : ret;
  1159. }
  1160. static ssize_t __btrfs_direct_write(struct kiocb *iocb,
  1161. const struct iovec *iov,
  1162. unsigned long nr_segs, loff_t pos,
  1163. loff_t *ppos, size_t count, size_t ocount)
  1164. {
  1165. struct file *file = iocb->ki_filp;
  1166. struct inode *inode = fdentry(file)->d_inode;
  1167. struct iov_iter i;
  1168. ssize_t written;
  1169. ssize_t written_buffered;
  1170. loff_t endbyte;
  1171. int err;
  1172. written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
  1173. count, ocount);
  1174. /*
  1175. * the generic O_DIRECT will update in-memory i_size after the
  1176. * DIOs are done. But our endio handlers that update the on
  1177. * disk i_size never update past the in memory i_size. So we
  1178. * need one more update here to catch any additions to the
  1179. * file
  1180. */
  1181. if (inode->i_size != BTRFS_I(inode)->disk_i_size) {
  1182. btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
  1183. mark_inode_dirty(inode);
  1184. }
  1185. if (written < 0 || written == count)
  1186. return written;
  1187. pos += written;
  1188. count -= written;
  1189. iov_iter_init(&i, iov, nr_segs, count, written);
  1190. written_buffered = __btrfs_buffered_write(file, &i, pos);
  1191. if (written_buffered < 0) {
  1192. err = written_buffered;
  1193. goto out;
  1194. }
  1195. endbyte = pos + written_buffered - 1;
  1196. err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
  1197. if (err)
  1198. goto out;
  1199. written += written_buffered;
  1200. *ppos = pos + written_buffered;
  1201. invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
  1202. endbyte >> PAGE_CACHE_SHIFT);
  1203. out:
  1204. return written ? written : err;
  1205. }
  1206. static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
  1207. const struct iovec *iov,
  1208. unsigned long nr_segs, loff_t pos)
  1209. {
  1210. struct file *file = iocb->ki_filp;
  1211. struct inode *inode = fdentry(file)->d_inode;
  1212. struct btrfs_root *root = BTRFS_I(inode)->root;
  1213. loff_t *ppos = &iocb->ki_pos;
  1214. u64 start_pos;
  1215. ssize_t num_written = 0;
  1216. ssize_t err = 0;
  1217. size_t count, ocount;
  1218. vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
  1219. mutex_lock(&inode->i_mutex);
  1220. err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
  1221. if (err) {
  1222. mutex_unlock(&inode->i_mutex);
  1223. goto out;
  1224. }
  1225. count = ocount;
  1226. current->backing_dev_info = inode->i_mapping->backing_dev_info;
  1227. err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
  1228. if (err) {
  1229. mutex_unlock(&inode->i_mutex);
  1230. goto out;
  1231. }
  1232. if (count == 0) {
  1233. mutex_unlock(&inode->i_mutex);
  1234. goto out;
  1235. }
  1236. err = file_remove_suid(file);
  1237. if (err) {
  1238. mutex_unlock(&inode->i_mutex);
  1239. goto out;
  1240. }
  1241. /*
  1242. * If BTRFS flips readonly due to some impossible error
  1243. * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
  1244. * although we have opened a file as writable, we have
  1245. * to stop this write operation to ensure FS consistency.
  1246. */
  1247. if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
  1248. mutex_unlock(&inode->i_mutex);
  1249. err = -EROFS;
  1250. goto out;
  1251. }
  1252. err = btrfs_update_time(file);
  1253. if (err) {
  1254. mutex_unlock(&inode->i_mutex);
  1255. goto out;
  1256. }
  1257. start_pos = round_down(pos, root->sectorsize);
  1258. if (start_pos > i_size_read(inode)) {
  1259. err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
  1260. if (err) {
  1261. mutex_unlock(&inode->i_mutex);
  1262. goto out;
  1263. }
  1264. }
  1265. if (unlikely(file->f_flags & O_DIRECT)) {
  1266. num_written = __btrfs_direct_write(iocb, iov, nr_segs,
  1267. pos, ppos, count, ocount);
  1268. } else {
  1269. struct iov_iter i;
  1270. iov_iter_init(&i, iov, nr_segs, count, num_written);
  1271. num_written = __btrfs_buffered_write(file, &i, pos);
  1272. if (num_written > 0)
  1273. *ppos = pos + num_written;
  1274. }
  1275. mutex_unlock(&inode->i_mutex);
  1276. /*
  1277. * we want to make sure fsync finds this change
  1278. * but we haven't joined a transaction running right now.
  1279. *
  1280. * Later on, someone is sure to update the inode and get the
  1281. * real transid recorded.
  1282. *
  1283. * We set last_trans now to the fs_info generation + 1,
  1284. * this will either be one more than the running transaction
  1285. * or the generation used for the next transaction if there isn't
  1286. * one running right now.
  1287. */
  1288. BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
  1289. if (num_written > 0 || num_written == -EIOCBQUEUED) {
  1290. err = generic_write_sync(file, pos, num_written);
  1291. if (err < 0 && num_written > 0)
  1292. num_written = err;
  1293. }
  1294. out:
  1295. current->backing_dev_info = NULL;
  1296. return num_written ? num_written : err;
  1297. }
  1298. int btrfs_release_file(struct inode *inode, struct file *filp)
  1299. {
  1300. /*
  1301. * ordered_data_close is set by settattr when we are about to truncate
  1302. * a file from a non-zero size to a zero size. This tries to
  1303. * flush down new bytes that may have been written if the
  1304. * application were using truncate to replace a file in place.
  1305. */
  1306. if (BTRFS_I(inode)->ordered_data_close) {
  1307. BTRFS_I(inode)->ordered_data_close = 0;
  1308. btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
  1309. if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
  1310. filemap_flush(inode->i_mapping);
  1311. }
  1312. if (filp->private_data)
  1313. btrfs_ioctl_trans_end(filp);
  1314. return 0;
  1315. }
  1316. /*
  1317. * fsync call for both files and directories. This logs the inode into
  1318. * the tree log instead of forcing full commits whenever possible.
  1319. *
  1320. * It needs to call filemap_fdatawait so that all ordered extent updates are
  1321. * in the metadata btree are up to date for copying to the log.
  1322. *
  1323. * It drops the inode mutex before doing the tree log commit. This is an
  1324. * important optimization for directories because holding the mutex prevents
  1325. * new operations on the dir while we write to disk.
  1326. */
  1327. int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
  1328. {
  1329. struct dentry *dentry = file->f_path.dentry;
  1330. struct inode *inode = dentry->d_inode;
  1331. struct btrfs_root *root = BTRFS_I(inode)->root;
  1332. int ret = 0;
  1333. struct btrfs_trans_handle *trans;
  1334. trace_btrfs_sync_file(file, datasync);
  1335. ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
  1336. if (ret)
  1337. return ret;
  1338. mutex_lock(&inode->i_mutex);
  1339. /* we wait first, since the writeback may change the inode */
  1340. root->log_batch++;
  1341. btrfs_wait_ordered_range(inode, 0, (u64)-1);
  1342. root->log_batch++;
  1343. /*
  1344. * check the transaction that last modified this inode
  1345. * and see if its already been committed
  1346. */
  1347. if (!BTRFS_I(inode)->last_trans) {
  1348. mutex_unlock(&inode->i_mutex);
  1349. goto out;
  1350. }
  1351. /*
  1352. * if the last transaction that changed this file was before
  1353. * the current transaction, we can bail out now without any
  1354. * syncing
  1355. */
  1356. smp_mb();
  1357. if (BTRFS_I(inode)->last_trans <=
  1358. root->fs_info->last_trans_committed) {
  1359. BTRFS_I(inode)->last_trans = 0;
  1360. mutex_unlock(&inode->i_mutex);
  1361. goto out;
  1362. }
  1363. /*
  1364. * ok we haven't committed the transaction yet, lets do a commit
  1365. */
  1366. if (file->private_data)
  1367. btrfs_ioctl_trans_end(file);
  1368. trans = btrfs_start_transaction(root, 0);
  1369. if (IS_ERR(trans)) {
  1370. ret = PTR_ERR(trans);
  1371. mutex_unlock(&inode->i_mutex);
  1372. goto out;
  1373. }
  1374. ret = btrfs_log_dentry_safe(trans, root, dentry);
  1375. if (ret < 0) {
  1376. mutex_unlock(&inode->i_mutex);
  1377. goto out;
  1378. }
  1379. /* we've logged all the items and now have a consistent
  1380. * version of the file in the log. It is possible that
  1381. * someone will come in and modify the file, but that's
  1382. * fine because the log is consistent on disk, and we
  1383. * have references to all of the file's extents
  1384. *
  1385. * It is possible that someone will come in and log the
  1386. * file again, but that will end up using the synchronization
  1387. * inside btrfs_sync_log to keep things safe.
  1388. */
  1389. mutex_unlock(&inode->i_mutex);
  1390. if (ret != BTRFS_NO_LOG_SYNC) {
  1391. if (ret > 0) {
  1392. ret = btrfs_commit_transaction(trans, root);
  1393. } else {
  1394. ret = btrfs_sync_log(trans, root);
  1395. if (ret == 0)
  1396. ret = btrfs_end_transaction(trans, root);
  1397. else
  1398. ret = btrfs_commit_transaction(trans, root);
  1399. }
  1400. } else {
  1401. ret = btrfs_end_transaction(trans, root);
  1402. }
  1403. out:
  1404. return ret > 0 ? -EIO : ret;
  1405. }
  1406. static const struct vm_operations_struct btrfs_file_vm_ops = {
  1407. .fault = filemap_fault,
  1408. .page_mkwrite = btrfs_page_mkwrite,
  1409. };
  1410. static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
  1411. {
  1412. struct address_space *mapping = filp->f_mapping;
  1413. if (!mapping->a_ops->readpage)
  1414. return -ENOEXEC;
  1415. file_accessed(filp);
  1416. vma->vm_ops = &btrfs_file_vm_ops;
  1417. vma->vm_flags |= VM_CAN_NONLINEAR;
  1418. return 0;
  1419. }
  1420. static long btrfs_fallocate(struct file *file, int mode,
  1421. loff_t offset, loff_t len)
  1422. {
  1423. struct inode *inode = file->f_path.dentry->d_inode;
  1424. struct extent_state *cached_state = NULL;
  1425. u64 cur_offset;
  1426. u64 last_byte;
  1427. u64 alloc_start;
  1428. u64 alloc_end;
  1429. u64 alloc_hint = 0;
  1430. u64 locked_end;
  1431. u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
  1432. struct extent_map *em;
  1433. int ret;
  1434. alloc_start = offset & ~mask;
  1435. alloc_end = (offset + len + mask) & ~mask;
  1436. /* We only support the FALLOC_FL_KEEP_SIZE mode */
  1437. if (mode & ~FALLOC_FL_KEEP_SIZE)
  1438. return -EOPNOTSUPP;
  1439. /*
  1440. * Make sure we have enough space before we do the
  1441. * allocation.
  1442. */
  1443. ret = btrfs_check_data_free_space(inode, len);
  1444. if (ret)
  1445. return ret;
  1446. /*
  1447. * wait for ordered IO before we have any locks. We'll loop again
  1448. * below with the locks held.
  1449. */
  1450. btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
  1451. mutex_lock(&inode->i_mutex);
  1452. ret = inode_newsize_ok(inode, alloc_end);
  1453. if (ret)
  1454. goto out;
  1455. if (alloc_start > inode->i_size) {
  1456. ret = btrfs_cont_expand(inode, i_size_read(inode),
  1457. alloc_start);
  1458. if (ret)
  1459. goto out;
  1460. }
  1461. locked_end = alloc_end - 1;
  1462. while (1) {
  1463. struct btrfs_ordered_extent *ordered;
  1464. /* the extent lock is ordered inside the running
  1465. * transaction
  1466. */
  1467. lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
  1468. locked_end, 0, &cached_state);
  1469. ordered = btrfs_lookup_first_ordered_extent(inode,
  1470. alloc_end - 1);
  1471. if (ordered &&
  1472. ordered->file_offset + ordered->len > alloc_start &&
  1473. ordered->file_offset < alloc_end) {
  1474. btrfs_put_ordered_extent(ordered);
  1475. unlock_extent_cached(&BTRFS_I(inode)->io_tree,
  1476. alloc_start, locked_end,
  1477. &cached_state, GFP_NOFS);
  1478. /*
  1479. * we can't wait on the range with the transaction
  1480. * running or with the extent lock held
  1481. */
  1482. btrfs_wait_ordered_range(inode, alloc_start,
  1483. alloc_end - alloc_start);
  1484. } else {
  1485. if (ordered)
  1486. btrfs_put_ordered_extent(ordered);
  1487. break;
  1488. }
  1489. }
  1490. cur_offset = alloc_start;
  1491. while (1) {
  1492. u64 actual_end;
  1493. em = btrfs_get_extent(inode, NULL, 0, cur_offset,
  1494. alloc_end - cur_offset, 0);
  1495. if (IS_ERR_OR_NULL(em)) {
  1496. if (!em)
  1497. ret = -ENOMEM;
  1498. else
  1499. ret = PTR_ERR(em);
  1500. break;
  1501. }
  1502. last_byte = min(extent_map_end(em), alloc_end);
  1503. actual_end = min_t(u64, extent_map_end(em), offset + len);
  1504. last_byte = (last_byte + mask) & ~mask;
  1505. if (em->block_start == EXTENT_MAP_HOLE ||
  1506. (cur_offset >= inode->i_size &&
  1507. !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
  1508. ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
  1509. last_byte - cur_offset,
  1510. 1 << inode->i_blkbits,
  1511. offset + len,
  1512. &alloc_hint);
  1513. if (ret < 0) {
  1514. free_extent_map(em);
  1515. break;
  1516. }
  1517. } else if (actual_end > inode->i_size &&
  1518. !(mode & FALLOC_FL_KEEP_SIZE)) {
  1519. /*
  1520. * We didn't need to allocate any more space, but we
  1521. * still extended the size of the file so we need to
  1522. * update i_size.
  1523. */
  1524. inode->i_ctime = CURRENT_TIME;
  1525. i_size_write(inode, actual_end);
  1526. btrfs_ordered_update_i_size(inode, actual_end, NULL);
  1527. }
  1528. free_extent_map(em);
  1529. cur_offset = last_byte;
  1530. if (cur_offset >= alloc_end) {
  1531. ret = 0;
  1532. break;
  1533. }
  1534. }
  1535. unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
  1536. &cached_state, GFP_NOFS);
  1537. out:
  1538. mutex_unlock(&inode->i_mutex);
  1539. /* Let go of our reservation. */
  1540. btrfs_free_reserved_data_space(inode, len);
  1541. return ret;
  1542. }
  1543. static int find_desired_extent(struct inode *inode, loff_t *offset, int origin)
  1544. {
  1545. struct btrfs_root *root = BTRFS_I(inode)->root;
  1546. struct extent_map *em;
  1547. struct extent_state *cached_state = NULL;
  1548. u64 lockstart = *offset;
  1549. u64 lockend = i_size_read(inode);
  1550. u64 start = *offset;
  1551. u64 orig_start = *offset;
  1552. u64 len = i_size_read(inode);
  1553. u64 last_end = 0;
  1554. int ret = 0;
  1555. lockend = max_t(u64, root->sectorsize, lockend);
  1556. if (lockend <= lockstart)
  1557. lockend = lockstart + root->sectorsize;
  1558. len = lockend - lockstart + 1;
  1559. len = max_t(u64, len, root->sectorsize);
  1560. if (inode->i_size == 0)
  1561. return -ENXIO;
  1562. lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
  1563. &cached_state);
  1564. /*
  1565. * Delalloc is such a pain. If we have a hole and we have pending
  1566. * delalloc for a portion of the hole we will get back a hole that
  1567. * exists for the entire range since it hasn't been actually written
  1568. * yet. So to take care of this case we need to look for an extent just
  1569. * before the position we want in case there is outstanding delalloc
  1570. * going on here.
  1571. */
  1572. if (origin == SEEK_HOLE && start != 0) {
  1573. if (start <= root->sectorsize)
  1574. em = btrfs_get_extent_fiemap(inode, NULL, 0, 0,
  1575. root->sectorsize, 0);
  1576. else
  1577. em = btrfs_get_extent_fiemap(inode, NULL, 0,
  1578. start - root->sectorsize,
  1579. root->sectorsize, 0);
  1580. if (IS_ERR(em)) {
  1581. ret = PTR_ERR(em);
  1582. goto out;
  1583. }
  1584. last_end = em->start + em->len;
  1585. if (em->block_start == EXTENT_MAP_DELALLOC)
  1586. last_end = min_t(u64, last_end, inode->i_size);
  1587. free_extent_map(em);
  1588. }
  1589. while (1) {
  1590. em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
  1591. if (IS_ERR(em)) {
  1592. ret = PTR_ERR(em);
  1593. break;
  1594. }
  1595. if (em->block_start == EXTENT_MAP_HOLE) {
  1596. if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
  1597. if (last_end <= orig_start) {
  1598. free_extent_map(em);
  1599. ret = -ENXIO;
  1600. break;
  1601. }
  1602. }
  1603. if (origin == SEEK_HOLE) {
  1604. *offset = start;
  1605. free_extent_map(em);
  1606. break;
  1607. }
  1608. } else {
  1609. if (origin == SEEK_DATA) {
  1610. if (em->block_start == EXTENT_MAP_DELALLOC) {
  1611. if (start >= inode->i_size) {
  1612. free_extent_map(em);
  1613. ret = -ENXIO;
  1614. break;
  1615. }
  1616. }
  1617. *offset = start;
  1618. free_extent_map(em);
  1619. break;
  1620. }
  1621. }
  1622. start = em->start + em->len;
  1623. last_end = em->start + em->len;
  1624. if (em->block_start == EXTENT_MAP_DELALLOC)
  1625. last_end = min_t(u64, last_end, inode->i_size);
  1626. if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
  1627. free_extent_map(em);
  1628. ret = -ENXIO;
  1629. break;
  1630. }
  1631. free_extent_map(em);
  1632. cond_resched();
  1633. }
  1634. if (!ret)
  1635. *offset = min(*offset, inode->i_size);
  1636. out:
  1637. unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
  1638. &cached_state, GFP_NOFS);
  1639. return ret;
  1640. }
  1641. static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
  1642. {
  1643. struct inode *inode = file->f_mapping->host;
  1644. int ret;
  1645. mutex_lock(&inode->i_mutex);
  1646. switch (origin) {
  1647. case SEEK_END:
  1648. case SEEK_CUR:
  1649. offset = generic_file_llseek(file, offset, origin);
  1650. goto out;
  1651. case SEEK_DATA:
  1652. case SEEK_HOLE:
  1653. if (offset >= i_size_read(inode)) {
  1654. mutex_unlock(&inode->i_mutex);
  1655. return -ENXIO;
  1656. }
  1657. ret = find_desired_extent(inode, &offset, origin);
  1658. if (ret) {
  1659. mutex_unlock(&inode->i_mutex);
  1660. return ret;
  1661. }
  1662. }
  1663. if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) {
  1664. offset = -EINVAL;
  1665. goto out;
  1666. }
  1667. if (offset > inode->i_sb->s_maxbytes) {
  1668. offset = -EINVAL;
  1669. goto out;
  1670. }
  1671. /* Special lock needed here? */
  1672. if (offset != file->f_pos) {
  1673. file->f_pos = offset;
  1674. file->f_version = 0;
  1675. }
  1676. out:
  1677. mutex_unlock(&inode->i_mutex);
  1678. return offset;
  1679. }
  1680. const struct file_operations btrfs_file_operations = {
  1681. .llseek = btrfs_file_llseek,
  1682. .read = do_sync_read,
  1683. .write = do_sync_write,
  1684. .aio_read = generic_file_aio_read,
  1685. .splice_read = generic_file_splice_read,
  1686. .aio_write = btrfs_file_aio_write,
  1687. .mmap = btrfs_file_mmap,
  1688. .open = generic_file_open,
  1689. .release = btrfs_release_file,
  1690. .fsync = btrfs_sync_file,
  1691. .fallocate = btrfs_fallocate,
  1692. .unlocked_ioctl = btrfs_ioctl,
  1693. #ifdef CONFIG_COMPAT
  1694. .compat_ioctl = btrfs_ioctl,
  1695. #endif
  1696. };