file.c 68 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/highmem.h>
  21. #include <linux/time.h>
  22. #include <linux/init.h>
  23. #include <linux/string.h>
  24. #include <linux/backing-dev.h>
  25. #include <linux/mpage.h>
  26. #include <linux/aio.h>
  27. #include <linux/falloc.h>
  28. #include <linux/swap.h>
  29. #include <linux/writeback.h>
  30. #include <linux/statfs.h>
  31. #include <linux/compat.h>
  32. #include <linux/slab.h>
  33. #include <linux/btrfs.h>
  34. #include "ctree.h"
  35. #include "disk-io.h"
  36. #include "transaction.h"
  37. #include "btrfs_inode.h"
  38. #include "print-tree.h"
  39. #include "tree-log.h"
  40. #include "locking.h"
  41. #include "volumes.h"
  42. static struct kmem_cache *btrfs_inode_defrag_cachep;
  43. /*
  44. * when auto defrag is enabled we
  45. * queue up these defrag structs to remember which
  46. * inodes need defragging passes
  47. */
  48. struct inode_defrag {
  49. struct rb_node rb_node;
  50. /* objectid */
  51. u64 ino;
  52. /*
  53. * transid where the defrag was added, we search for
  54. * extents newer than this
  55. */
  56. u64 transid;
  57. /* root objectid */
  58. u64 root;
  59. /* last offset we were able to defrag */
  60. u64 last_offset;
  61. /* if we've wrapped around back to zero once already */
  62. int cycled;
  63. };
  64. static int __compare_inode_defrag(struct inode_defrag *defrag1,
  65. struct inode_defrag *defrag2)
  66. {
  67. if (defrag1->root > defrag2->root)
  68. return 1;
  69. else if (defrag1->root < defrag2->root)
  70. return -1;
  71. else if (defrag1->ino > defrag2->ino)
  72. return 1;
  73. else if (defrag1->ino < defrag2->ino)
  74. return -1;
  75. else
  76. return 0;
  77. }
  78. /* pop a record for an inode into the defrag tree. The lock
  79. * must be held already
  80. *
  81. * If you're inserting a record for an older transid than an
  82. * existing record, the transid already in the tree is lowered
  83. *
  84. * If an existing record is found the defrag item you
  85. * pass in is freed
  86. */
  87. static int __btrfs_add_inode_defrag(struct inode *inode,
  88. struct inode_defrag *defrag)
  89. {
  90. struct btrfs_root *root = BTRFS_I(inode)->root;
  91. struct inode_defrag *entry;
  92. struct rb_node **p;
  93. struct rb_node *parent = NULL;
  94. int ret;
  95. p = &root->fs_info->defrag_inodes.rb_node;
  96. while (*p) {
  97. parent = *p;
  98. entry = rb_entry(parent, struct inode_defrag, rb_node);
  99. ret = __compare_inode_defrag(defrag, entry);
  100. if (ret < 0)
  101. p = &parent->rb_left;
  102. else if (ret > 0)
  103. p = &parent->rb_right;
  104. else {
  105. /* if we're reinserting an entry for
  106. * an old defrag run, make sure to
  107. * lower the transid of our existing record
  108. */
  109. if (defrag->transid < entry->transid)
  110. entry->transid = defrag->transid;
  111. if (defrag->last_offset > entry->last_offset)
  112. entry->last_offset = defrag->last_offset;
  113. return -EEXIST;
  114. }
  115. }
  116. set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
  117. rb_link_node(&defrag->rb_node, parent, p);
  118. rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
  119. return 0;
  120. }
  121. static inline int __need_auto_defrag(struct btrfs_root *root)
  122. {
  123. if (!btrfs_test_opt(root, AUTO_DEFRAG))
  124. return 0;
  125. if (btrfs_fs_closing(root->fs_info))
  126. return 0;
  127. return 1;
  128. }
  129. /*
  130. * insert a defrag record for this inode if auto defrag is
  131. * enabled
  132. */
  133. int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
  134. struct inode *inode)
  135. {
  136. struct btrfs_root *root = BTRFS_I(inode)->root;
  137. struct inode_defrag *defrag;
  138. u64 transid;
  139. int ret;
  140. if (!__need_auto_defrag(root))
  141. return 0;
  142. if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
  143. return 0;
  144. if (trans)
  145. transid = trans->transid;
  146. else
  147. transid = BTRFS_I(inode)->root->last_trans;
  148. defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
  149. if (!defrag)
  150. return -ENOMEM;
  151. defrag->ino = btrfs_ino(inode);
  152. defrag->transid = transid;
  153. defrag->root = root->root_key.objectid;
  154. spin_lock(&root->fs_info->defrag_inodes_lock);
  155. if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) {
  156. /*
  157. * If we set IN_DEFRAG flag and evict the inode from memory,
  158. * and then re-read this inode, this new inode doesn't have
  159. * IN_DEFRAG flag. At the case, we may find the existed defrag.
  160. */
  161. ret = __btrfs_add_inode_defrag(inode, defrag);
  162. if (ret)
  163. kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
  164. } else {
  165. kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
  166. }
  167. spin_unlock(&root->fs_info->defrag_inodes_lock);
  168. return 0;
  169. }
  170. /*
  171. * Requeue the defrag object. If there is a defrag object that points to
  172. * the same inode in the tree, we will merge them together (by
  173. * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
  174. */
  175. static void btrfs_requeue_inode_defrag(struct inode *inode,
  176. struct inode_defrag *defrag)
  177. {
  178. struct btrfs_root *root = BTRFS_I(inode)->root;
  179. int ret;
  180. if (!__need_auto_defrag(root))
  181. goto out;
  182. /*
  183. * Here we don't check the IN_DEFRAG flag, because we need merge
  184. * them together.
  185. */
  186. spin_lock(&root->fs_info->defrag_inodes_lock);
  187. ret = __btrfs_add_inode_defrag(inode, defrag);
  188. spin_unlock(&root->fs_info->defrag_inodes_lock);
  189. if (ret)
  190. goto out;
  191. return;
  192. out:
  193. kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
  194. }
  195. /*
  196. * pick the defragable inode that we want, if it doesn't exist, we will get
  197. * the next one.
  198. */
  199. static struct inode_defrag *
  200. btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
  201. {
  202. struct inode_defrag *entry = NULL;
  203. struct inode_defrag tmp;
  204. struct rb_node *p;
  205. struct rb_node *parent = NULL;
  206. int ret;
  207. tmp.ino = ino;
  208. tmp.root = root;
  209. spin_lock(&fs_info->defrag_inodes_lock);
  210. p = fs_info->defrag_inodes.rb_node;
  211. while (p) {
  212. parent = p;
  213. entry = rb_entry(parent, struct inode_defrag, rb_node);
  214. ret = __compare_inode_defrag(&tmp, entry);
  215. if (ret < 0)
  216. p = parent->rb_left;
  217. else if (ret > 0)
  218. p = parent->rb_right;
  219. else
  220. goto out;
  221. }
  222. if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
  223. parent = rb_next(parent);
  224. if (parent)
  225. entry = rb_entry(parent, struct inode_defrag, rb_node);
  226. else
  227. entry = NULL;
  228. }
  229. out:
  230. if (entry)
  231. rb_erase(parent, &fs_info->defrag_inodes);
  232. spin_unlock(&fs_info->defrag_inodes_lock);
  233. return entry;
  234. }
  235. void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
  236. {
  237. struct inode_defrag *defrag;
  238. struct rb_node *node;
  239. spin_lock(&fs_info->defrag_inodes_lock);
  240. node = rb_first(&fs_info->defrag_inodes);
  241. while (node) {
  242. rb_erase(node, &fs_info->defrag_inodes);
  243. defrag = rb_entry(node, struct inode_defrag, rb_node);
  244. kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
  245. if (need_resched()) {
  246. spin_unlock(&fs_info->defrag_inodes_lock);
  247. cond_resched();
  248. spin_lock(&fs_info->defrag_inodes_lock);
  249. }
  250. node = rb_first(&fs_info->defrag_inodes);
  251. }
  252. spin_unlock(&fs_info->defrag_inodes_lock);
  253. }
  254. #define BTRFS_DEFRAG_BATCH 1024
  255. static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
  256. struct inode_defrag *defrag)
  257. {
  258. struct btrfs_root *inode_root;
  259. struct inode *inode;
  260. struct btrfs_key key;
  261. struct btrfs_ioctl_defrag_range_args range;
  262. int num_defrag;
  263. int index;
  264. int ret;
  265. /* get the inode */
  266. key.objectid = defrag->root;
  267. btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
  268. key.offset = (u64)-1;
  269. index = srcu_read_lock(&fs_info->subvol_srcu);
  270. inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
  271. if (IS_ERR(inode_root)) {
  272. ret = PTR_ERR(inode_root);
  273. goto cleanup;
  274. }
  275. key.objectid = defrag->ino;
  276. btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
  277. key.offset = 0;
  278. inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
  279. if (IS_ERR(inode)) {
  280. ret = PTR_ERR(inode);
  281. goto cleanup;
  282. }
  283. srcu_read_unlock(&fs_info->subvol_srcu, index);
  284. /* do a chunk of defrag */
  285. clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
  286. memset(&range, 0, sizeof(range));
  287. range.len = (u64)-1;
  288. range.start = defrag->last_offset;
  289. sb_start_write(fs_info->sb);
  290. num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
  291. BTRFS_DEFRAG_BATCH);
  292. sb_end_write(fs_info->sb);
  293. /*
  294. * if we filled the whole defrag batch, there
  295. * must be more work to do. Queue this defrag
  296. * again
  297. */
  298. if (num_defrag == BTRFS_DEFRAG_BATCH) {
  299. defrag->last_offset = range.start;
  300. btrfs_requeue_inode_defrag(inode, defrag);
  301. } else if (defrag->last_offset && !defrag->cycled) {
  302. /*
  303. * we didn't fill our defrag batch, but
  304. * we didn't start at zero. Make sure we loop
  305. * around to the start of the file.
  306. */
  307. defrag->last_offset = 0;
  308. defrag->cycled = 1;
  309. btrfs_requeue_inode_defrag(inode, defrag);
  310. } else {
  311. kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
  312. }
  313. iput(inode);
  314. return 0;
  315. cleanup:
  316. srcu_read_unlock(&fs_info->subvol_srcu, index);
  317. kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
  318. return ret;
  319. }
  320. /*
  321. * run through the list of inodes in the FS that need
  322. * defragging
  323. */
  324. int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
  325. {
  326. struct inode_defrag *defrag;
  327. u64 first_ino = 0;
  328. u64 root_objectid = 0;
  329. atomic_inc(&fs_info->defrag_running);
  330. while (1) {
  331. /* Pause the auto defragger. */
  332. if (test_bit(BTRFS_FS_STATE_REMOUNTING,
  333. &fs_info->fs_state))
  334. break;
  335. if (!__need_auto_defrag(fs_info->tree_root))
  336. break;
  337. /* find an inode to defrag */
  338. defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
  339. first_ino);
  340. if (!defrag) {
  341. if (root_objectid || first_ino) {
  342. root_objectid = 0;
  343. first_ino = 0;
  344. continue;
  345. } else {
  346. break;
  347. }
  348. }
  349. first_ino = defrag->ino + 1;
  350. root_objectid = defrag->root;
  351. __btrfs_run_defrag_inode(fs_info, defrag);
  352. }
  353. atomic_dec(&fs_info->defrag_running);
  354. /*
  355. * during unmount, we use the transaction_wait queue to
  356. * wait for the defragger to stop
  357. */
  358. wake_up(&fs_info->transaction_wait);
  359. return 0;
  360. }
  361. /* simple helper to fault in pages and copy. This should go away
  362. * and be replaced with calls into generic code.
  363. */
  364. static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
  365. size_t write_bytes,
  366. struct page **prepared_pages,
  367. struct iov_iter *i)
  368. {
  369. size_t copied = 0;
  370. size_t total_copied = 0;
  371. int pg = 0;
  372. int offset = pos & (PAGE_CACHE_SIZE - 1);
  373. while (write_bytes > 0) {
  374. size_t count = min_t(size_t,
  375. PAGE_CACHE_SIZE - offset, write_bytes);
  376. struct page *page = prepared_pages[pg];
  377. /*
  378. * Copy data from userspace to the current page
  379. *
  380. * Disable pagefault to avoid recursive lock since
  381. * the pages are already locked
  382. */
  383. pagefault_disable();
  384. copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
  385. pagefault_enable();
  386. /* Flush processor's dcache for this page */
  387. flush_dcache_page(page);
  388. /*
  389. * if we get a partial write, we can end up with
  390. * partially up to date pages. These add
  391. * a lot of complexity, so make sure they don't
  392. * happen by forcing this copy to be retried.
  393. *
  394. * The rest of the btrfs_file_write code will fall
  395. * back to page at a time copies after we return 0.
  396. */
  397. if (!PageUptodate(page) && copied < count)
  398. copied = 0;
  399. iov_iter_advance(i, copied);
  400. write_bytes -= copied;
  401. total_copied += copied;
  402. /* Return to btrfs_file_aio_write to fault page */
  403. if (unlikely(copied == 0))
  404. break;
  405. if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
  406. offset += copied;
  407. } else {
  408. pg++;
  409. offset = 0;
  410. }
  411. }
  412. return total_copied;
  413. }
  414. /*
  415. * unlocks pages after btrfs_file_write is done with them
  416. */
  417. static void btrfs_drop_pages(struct page **pages, size_t num_pages)
  418. {
  419. size_t i;
  420. for (i = 0; i < num_pages; i++) {
  421. /* page checked is some magic around finding pages that
  422. * have been modified without going through btrfs_set_page_dirty
  423. * clear it here
  424. */
  425. ClearPageChecked(pages[i]);
  426. unlock_page(pages[i]);
  427. mark_page_accessed(pages[i]);
  428. page_cache_release(pages[i]);
  429. }
  430. }
  431. /*
  432. * after copy_from_user, pages need to be dirtied and we need to make
  433. * sure holes are created between the current EOF and the start of
  434. * any next extents (if required).
  435. *
  436. * this also makes the decision about creating an inline extent vs
  437. * doing real data extents, marking pages dirty and delalloc as required.
  438. */
  439. int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
  440. struct page **pages, size_t num_pages,
  441. loff_t pos, size_t write_bytes,
  442. struct extent_state **cached)
  443. {
  444. int err = 0;
  445. int i;
  446. u64 num_bytes;
  447. u64 start_pos;
  448. u64 end_of_last_block;
  449. u64 end_pos = pos + write_bytes;
  450. loff_t isize = i_size_read(inode);
  451. start_pos = pos & ~((u64)root->sectorsize - 1);
  452. num_bytes = ALIGN(write_bytes + pos - start_pos, root->sectorsize);
  453. end_of_last_block = start_pos + num_bytes - 1;
  454. err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
  455. cached);
  456. if (err)
  457. return err;
  458. for (i = 0; i < num_pages; i++) {
  459. struct page *p = pages[i];
  460. SetPageUptodate(p);
  461. ClearPageChecked(p);
  462. set_page_dirty(p);
  463. }
  464. /*
  465. * we've only changed i_size in ram, and we haven't updated
  466. * the disk i_size. There is no need to log the inode
  467. * at this time.
  468. */
  469. if (end_pos > isize)
  470. i_size_write(inode, end_pos);
  471. return 0;
  472. }
  473. /*
  474. * this drops all the extents in the cache that intersect the range
  475. * [start, end]. Existing extents are split as required.
  476. */
  477. void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
  478. int skip_pinned)
  479. {
  480. struct extent_map *em;
  481. struct extent_map *split = NULL;
  482. struct extent_map *split2 = NULL;
  483. struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
  484. u64 len = end - start + 1;
  485. u64 gen;
  486. int ret;
  487. int testend = 1;
  488. unsigned long flags;
  489. int compressed = 0;
  490. bool modified;
  491. WARN_ON(end < start);
  492. if (end == (u64)-1) {
  493. len = (u64)-1;
  494. testend = 0;
  495. }
  496. while (1) {
  497. int no_splits = 0;
  498. modified = false;
  499. if (!split)
  500. split = alloc_extent_map();
  501. if (!split2)
  502. split2 = alloc_extent_map();
  503. if (!split || !split2)
  504. no_splits = 1;
  505. write_lock(&em_tree->lock);
  506. em = lookup_extent_mapping(em_tree, start, len);
  507. if (!em) {
  508. write_unlock(&em_tree->lock);
  509. break;
  510. }
  511. flags = em->flags;
  512. gen = em->generation;
  513. if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
  514. if (testend && em->start + em->len >= start + len) {
  515. free_extent_map(em);
  516. write_unlock(&em_tree->lock);
  517. break;
  518. }
  519. start = em->start + em->len;
  520. if (testend)
  521. len = start + len - (em->start + em->len);
  522. free_extent_map(em);
  523. write_unlock(&em_tree->lock);
  524. continue;
  525. }
  526. compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
  527. clear_bit(EXTENT_FLAG_PINNED, &em->flags);
  528. clear_bit(EXTENT_FLAG_LOGGING, &flags);
  529. modified = !list_empty(&em->list);
  530. remove_extent_mapping(em_tree, em);
  531. if (no_splits)
  532. goto next;
  533. if (em->start < start) {
  534. split->start = em->start;
  535. split->len = start - em->start;
  536. if (em->block_start < EXTENT_MAP_LAST_BYTE) {
  537. split->orig_start = em->orig_start;
  538. split->block_start = em->block_start;
  539. if (compressed)
  540. split->block_len = em->block_len;
  541. else
  542. split->block_len = split->len;
  543. split->orig_block_len = max(split->block_len,
  544. em->orig_block_len);
  545. split->ram_bytes = em->ram_bytes;
  546. } else {
  547. split->orig_start = split->start;
  548. split->block_len = 0;
  549. split->block_start = em->block_start;
  550. split->orig_block_len = 0;
  551. split->ram_bytes = split->len;
  552. }
  553. split->generation = gen;
  554. split->bdev = em->bdev;
  555. split->flags = flags;
  556. split->compress_type = em->compress_type;
  557. ret = add_extent_mapping(em_tree, split, modified);
  558. BUG_ON(ret); /* Logic error */
  559. free_extent_map(split);
  560. split = split2;
  561. split2 = NULL;
  562. }
  563. if (testend && em->start + em->len > start + len) {
  564. u64 diff = start + len - em->start;
  565. split->start = start + len;
  566. split->len = em->start + em->len - (start + len);
  567. split->bdev = em->bdev;
  568. split->flags = flags;
  569. split->compress_type = em->compress_type;
  570. split->generation = gen;
  571. if (em->block_start < EXTENT_MAP_LAST_BYTE) {
  572. split->orig_block_len = max(em->block_len,
  573. em->orig_block_len);
  574. split->ram_bytes = em->ram_bytes;
  575. if (compressed) {
  576. split->block_len = em->block_len;
  577. split->block_start = em->block_start;
  578. split->orig_start = em->orig_start;
  579. } else {
  580. split->block_len = split->len;
  581. split->block_start = em->block_start
  582. + diff;
  583. split->orig_start = em->orig_start;
  584. }
  585. } else {
  586. split->ram_bytes = split->len;
  587. split->orig_start = split->start;
  588. split->block_len = 0;
  589. split->block_start = em->block_start;
  590. split->orig_block_len = 0;
  591. }
  592. ret = add_extent_mapping(em_tree, split, modified);
  593. BUG_ON(ret); /* Logic error */
  594. free_extent_map(split);
  595. split = NULL;
  596. }
  597. next:
  598. write_unlock(&em_tree->lock);
  599. /* once for us */
  600. free_extent_map(em);
  601. /* once for the tree*/
  602. free_extent_map(em);
  603. }
  604. if (split)
  605. free_extent_map(split);
  606. if (split2)
  607. free_extent_map(split2);
  608. }
  609. /*
  610. * this is very complex, but the basic idea is to drop all extents
  611. * in the range start - end. hint_block is filled in with a block number
  612. * that would be a good hint to the block allocator for this file.
  613. *
  614. * If an extent intersects the range but is not entirely inside the range
  615. * it is either truncated or split. Anything entirely inside the range
  616. * is deleted from the tree.
  617. */
  618. int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
  619. struct btrfs_root *root, struct inode *inode,
  620. struct btrfs_path *path, u64 start, u64 end,
  621. u64 *drop_end, int drop_cache,
  622. int replace_extent,
  623. u32 extent_item_size,
  624. int *key_inserted)
  625. {
  626. struct extent_buffer *leaf;
  627. struct btrfs_file_extent_item *fi;
  628. struct btrfs_key key;
  629. struct btrfs_key new_key;
  630. u64 ino = btrfs_ino(inode);
  631. u64 search_start = start;
  632. u64 disk_bytenr = 0;
  633. u64 num_bytes = 0;
  634. u64 extent_offset = 0;
  635. u64 extent_end = 0;
  636. int del_nr = 0;
  637. int del_slot = 0;
  638. int extent_type;
  639. int recow;
  640. int ret;
  641. int modify_tree = -1;
  642. int update_refs = (root->ref_cows || root == root->fs_info->tree_root);
  643. int found = 0;
  644. int leafs_visited = 0;
  645. if (drop_cache)
  646. btrfs_drop_extent_cache(inode, start, end - 1, 0);
  647. if (start >= BTRFS_I(inode)->disk_i_size)
  648. modify_tree = 0;
  649. while (1) {
  650. recow = 0;
  651. ret = btrfs_lookup_file_extent(trans, root, path, ino,
  652. search_start, modify_tree);
  653. if (ret < 0)
  654. break;
  655. if (ret > 0 && path->slots[0] > 0 && search_start == start) {
  656. leaf = path->nodes[0];
  657. btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
  658. if (key.objectid == ino &&
  659. key.type == BTRFS_EXTENT_DATA_KEY)
  660. path->slots[0]--;
  661. }
  662. ret = 0;
  663. leafs_visited++;
  664. next_slot:
  665. leaf = path->nodes[0];
  666. if (path->slots[0] >= btrfs_header_nritems(leaf)) {
  667. BUG_ON(del_nr > 0);
  668. ret = btrfs_next_leaf(root, path);
  669. if (ret < 0)
  670. break;
  671. if (ret > 0) {
  672. ret = 0;
  673. break;
  674. }
  675. leafs_visited++;
  676. leaf = path->nodes[0];
  677. recow = 1;
  678. }
  679. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  680. if (key.objectid > ino ||
  681. key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
  682. break;
  683. fi = btrfs_item_ptr(leaf, path->slots[0],
  684. struct btrfs_file_extent_item);
  685. extent_type = btrfs_file_extent_type(leaf, fi);
  686. if (extent_type == BTRFS_FILE_EXTENT_REG ||
  687. extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
  688. disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
  689. num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
  690. extent_offset = btrfs_file_extent_offset(leaf, fi);
  691. extent_end = key.offset +
  692. btrfs_file_extent_num_bytes(leaf, fi);
  693. } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
  694. extent_end = key.offset +
  695. btrfs_file_extent_inline_len(leaf,
  696. path->slots[0], fi);
  697. } else {
  698. WARN_ON(1);
  699. extent_end = search_start;
  700. }
  701. if (extent_end <= search_start) {
  702. path->slots[0]++;
  703. goto next_slot;
  704. }
  705. found = 1;
  706. search_start = max(key.offset, start);
  707. if (recow || !modify_tree) {
  708. modify_tree = -1;
  709. btrfs_release_path(path);
  710. continue;
  711. }
  712. /*
  713. * | - range to drop - |
  714. * | -------- extent -------- |
  715. */
  716. if (start > key.offset && end < extent_end) {
  717. BUG_ON(del_nr > 0);
  718. BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
  719. memcpy(&new_key, &key, sizeof(new_key));
  720. new_key.offset = start;
  721. ret = btrfs_duplicate_item(trans, root, path,
  722. &new_key);
  723. if (ret == -EAGAIN) {
  724. btrfs_release_path(path);
  725. continue;
  726. }
  727. if (ret < 0)
  728. break;
  729. leaf = path->nodes[0];
  730. fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
  731. struct btrfs_file_extent_item);
  732. btrfs_set_file_extent_num_bytes(leaf, fi,
  733. start - key.offset);
  734. fi = btrfs_item_ptr(leaf, path->slots[0],
  735. struct btrfs_file_extent_item);
  736. extent_offset += start - key.offset;
  737. btrfs_set_file_extent_offset(leaf, fi, extent_offset);
  738. btrfs_set_file_extent_num_bytes(leaf, fi,
  739. extent_end - start);
  740. btrfs_mark_buffer_dirty(leaf);
  741. if (update_refs && disk_bytenr > 0) {
  742. ret = btrfs_inc_extent_ref(trans, root,
  743. disk_bytenr, num_bytes, 0,
  744. root->root_key.objectid,
  745. new_key.objectid,
  746. start - extent_offset, 0);
  747. BUG_ON(ret); /* -ENOMEM */
  748. }
  749. key.offset = start;
  750. }
  751. /*
  752. * | ---- range to drop ----- |
  753. * | -------- extent -------- |
  754. */
  755. if (start <= key.offset && end < extent_end) {
  756. BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
  757. memcpy(&new_key, &key, sizeof(new_key));
  758. new_key.offset = end;
  759. btrfs_set_item_key_safe(root, path, &new_key);
  760. extent_offset += end - key.offset;
  761. btrfs_set_file_extent_offset(leaf, fi, extent_offset);
  762. btrfs_set_file_extent_num_bytes(leaf, fi,
  763. extent_end - end);
  764. btrfs_mark_buffer_dirty(leaf);
  765. if (update_refs && disk_bytenr > 0)
  766. inode_sub_bytes(inode, end - key.offset);
  767. break;
  768. }
  769. search_start = extent_end;
  770. /*
  771. * | ---- range to drop ----- |
  772. * | -------- extent -------- |
  773. */
  774. if (start > key.offset && end >= extent_end) {
  775. BUG_ON(del_nr > 0);
  776. BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
  777. btrfs_set_file_extent_num_bytes(leaf, fi,
  778. start - key.offset);
  779. btrfs_mark_buffer_dirty(leaf);
  780. if (update_refs && disk_bytenr > 0)
  781. inode_sub_bytes(inode, extent_end - start);
  782. if (end == extent_end)
  783. break;
  784. path->slots[0]++;
  785. goto next_slot;
  786. }
  787. /*
  788. * | ---- range to drop ----- |
  789. * | ------ extent ------ |
  790. */
  791. if (start <= key.offset && end >= extent_end) {
  792. if (del_nr == 0) {
  793. del_slot = path->slots[0];
  794. del_nr = 1;
  795. } else {
  796. BUG_ON(del_slot + del_nr != path->slots[0]);
  797. del_nr++;
  798. }
  799. if (update_refs &&
  800. extent_type == BTRFS_FILE_EXTENT_INLINE) {
  801. inode_sub_bytes(inode,
  802. extent_end - key.offset);
  803. extent_end = ALIGN(extent_end,
  804. root->sectorsize);
  805. } else if (update_refs && disk_bytenr > 0) {
  806. ret = btrfs_free_extent(trans, root,
  807. disk_bytenr, num_bytes, 0,
  808. root->root_key.objectid,
  809. key.objectid, key.offset -
  810. extent_offset, 0);
  811. BUG_ON(ret); /* -ENOMEM */
  812. inode_sub_bytes(inode,
  813. extent_end - key.offset);
  814. }
  815. if (end == extent_end)
  816. break;
  817. if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
  818. path->slots[0]++;
  819. goto next_slot;
  820. }
  821. ret = btrfs_del_items(trans, root, path, del_slot,
  822. del_nr);
  823. if (ret) {
  824. btrfs_abort_transaction(trans, root, ret);
  825. break;
  826. }
  827. del_nr = 0;
  828. del_slot = 0;
  829. btrfs_release_path(path);
  830. continue;
  831. }
  832. BUG_ON(1);
  833. }
  834. if (!ret && del_nr > 0) {
  835. /*
  836. * Set path->slots[0] to first slot, so that after the delete
  837. * if items are move off from our leaf to its immediate left or
  838. * right neighbor leafs, we end up with a correct and adjusted
  839. * path->slots[0] for our insertion.
  840. */
  841. path->slots[0] = del_slot;
  842. ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
  843. if (ret)
  844. btrfs_abort_transaction(trans, root, ret);
  845. leaf = path->nodes[0];
  846. /*
  847. * leaf eb has flag EXTENT_BUFFER_STALE if it was deleted (that
  848. * is, its contents got pushed to its neighbors), in which case
  849. * it means path->locks[0] == 0
  850. */
  851. if (!ret && replace_extent && leafs_visited == 1 &&
  852. path->locks[0] &&
  853. btrfs_leaf_free_space(root, leaf) >=
  854. sizeof(struct btrfs_item) + extent_item_size) {
  855. key.objectid = ino;
  856. key.type = BTRFS_EXTENT_DATA_KEY;
  857. key.offset = start;
  858. setup_items_for_insert(root, path, &key,
  859. &extent_item_size,
  860. extent_item_size,
  861. sizeof(struct btrfs_item) +
  862. extent_item_size, 1);
  863. *key_inserted = 1;
  864. }
  865. }
  866. if (!replace_extent || !(*key_inserted))
  867. btrfs_release_path(path);
  868. if (drop_end)
  869. *drop_end = found ? min(end, extent_end) : end;
  870. return ret;
  871. }
  872. int btrfs_drop_extents(struct btrfs_trans_handle *trans,
  873. struct btrfs_root *root, struct inode *inode, u64 start,
  874. u64 end, int drop_cache)
  875. {
  876. struct btrfs_path *path;
  877. int ret;
  878. path = btrfs_alloc_path();
  879. if (!path)
  880. return -ENOMEM;
  881. ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
  882. drop_cache, 0, 0, NULL);
  883. btrfs_free_path(path);
  884. return ret;
  885. }
  886. static int extent_mergeable(struct extent_buffer *leaf, int slot,
  887. u64 objectid, u64 bytenr, u64 orig_offset,
  888. u64 *start, u64 *end)
  889. {
  890. struct btrfs_file_extent_item *fi;
  891. struct btrfs_key key;
  892. u64 extent_end;
  893. if (slot < 0 || slot >= btrfs_header_nritems(leaf))
  894. return 0;
  895. btrfs_item_key_to_cpu(leaf, &key, slot);
  896. if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
  897. return 0;
  898. fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
  899. if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
  900. btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
  901. btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
  902. btrfs_file_extent_compression(leaf, fi) ||
  903. btrfs_file_extent_encryption(leaf, fi) ||
  904. btrfs_file_extent_other_encoding(leaf, fi))
  905. return 0;
  906. extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
  907. if ((*start && *start != key.offset) || (*end && *end != extent_end))
  908. return 0;
  909. *start = key.offset;
  910. *end = extent_end;
  911. return 1;
  912. }
  913. /*
  914. * Mark extent in the range start - end as written.
  915. *
  916. * This changes extent type from 'pre-allocated' to 'regular'. If only
  917. * part of extent is marked as written, the extent will be split into
  918. * two or three.
  919. */
  920. int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
  921. struct inode *inode, u64 start, u64 end)
  922. {
  923. struct btrfs_root *root = BTRFS_I(inode)->root;
  924. struct extent_buffer *leaf;
  925. struct btrfs_path *path;
  926. struct btrfs_file_extent_item *fi;
  927. struct btrfs_key key;
  928. struct btrfs_key new_key;
  929. u64 bytenr;
  930. u64 num_bytes;
  931. u64 extent_end;
  932. u64 orig_offset;
  933. u64 other_start;
  934. u64 other_end;
  935. u64 split;
  936. int del_nr = 0;
  937. int del_slot = 0;
  938. int recow;
  939. int ret;
  940. u64 ino = btrfs_ino(inode);
  941. path = btrfs_alloc_path();
  942. if (!path)
  943. return -ENOMEM;
  944. again:
  945. recow = 0;
  946. split = start;
  947. key.objectid = ino;
  948. key.type = BTRFS_EXTENT_DATA_KEY;
  949. key.offset = split;
  950. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  951. if (ret < 0)
  952. goto out;
  953. if (ret > 0 && path->slots[0] > 0)
  954. path->slots[0]--;
  955. leaf = path->nodes[0];
  956. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  957. BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
  958. fi = btrfs_item_ptr(leaf, path->slots[0],
  959. struct btrfs_file_extent_item);
  960. BUG_ON(btrfs_file_extent_type(leaf, fi) !=
  961. BTRFS_FILE_EXTENT_PREALLOC);
  962. extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
  963. BUG_ON(key.offset > start || extent_end < end);
  964. bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
  965. num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
  966. orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
  967. memcpy(&new_key, &key, sizeof(new_key));
  968. if (start == key.offset && end < extent_end) {
  969. other_start = 0;
  970. other_end = start;
  971. if (extent_mergeable(leaf, path->slots[0] - 1,
  972. ino, bytenr, orig_offset,
  973. &other_start, &other_end)) {
  974. new_key.offset = end;
  975. btrfs_set_item_key_safe(root, path, &new_key);
  976. fi = btrfs_item_ptr(leaf, path->slots[0],
  977. struct btrfs_file_extent_item);
  978. btrfs_set_file_extent_generation(leaf, fi,
  979. trans->transid);
  980. btrfs_set_file_extent_num_bytes(leaf, fi,
  981. extent_end - end);
  982. btrfs_set_file_extent_offset(leaf, fi,
  983. end - orig_offset);
  984. fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
  985. struct btrfs_file_extent_item);
  986. btrfs_set_file_extent_generation(leaf, fi,
  987. trans->transid);
  988. btrfs_set_file_extent_num_bytes(leaf, fi,
  989. end - other_start);
  990. btrfs_mark_buffer_dirty(leaf);
  991. goto out;
  992. }
  993. }
  994. if (start > key.offset && end == extent_end) {
  995. other_start = end;
  996. other_end = 0;
  997. if (extent_mergeable(leaf, path->slots[0] + 1,
  998. ino, bytenr, orig_offset,
  999. &other_start, &other_end)) {
  1000. fi = btrfs_item_ptr(leaf, path->slots[0],
  1001. struct btrfs_file_extent_item);
  1002. btrfs_set_file_extent_num_bytes(leaf, fi,
  1003. start - key.offset);
  1004. btrfs_set_file_extent_generation(leaf, fi,
  1005. trans->transid);
  1006. path->slots[0]++;
  1007. new_key.offset = start;
  1008. btrfs_set_item_key_safe(root, path, &new_key);
  1009. fi = btrfs_item_ptr(leaf, path->slots[0],
  1010. struct btrfs_file_extent_item);
  1011. btrfs_set_file_extent_generation(leaf, fi,
  1012. trans->transid);
  1013. btrfs_set_file_extent_num_bytes(leaf, fi,
  1014. other_end - start);
  1015. btrfs_set_file_extent_offset(leaf, fi,
  1016. start - orig_offset);
  1017. btrfs_mark_buffer_dirty(leaf);
  1018. goto out;
  1019. }
  1020. }
  1021. while (start > key.offset || end < extent_end) {
  1022. if (key.offset == start)
  1023. split = end;
  1024. new_key.offset = split;
  1025. ret = btrfs_duplicate_item(trans, root, path, &new_key);
  1026. if (ret == -EAGAIN) {
  1027. btrfs_release_path(path);
  1028. goto again;
  1029. }
  1030. if (ret < 0) {
  1031. btrfs_abort_transaction(trans, root, ret);
  1032. goto out;
  1033. }
  1034. leaf = path->nodes[0];
  1035. fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
  1036. struct btrfs_file_extent_item);
  1037. btrfs_set_file_extent_generation(leaf, fi, trans->transid);
  1038. btrfs_set_file_extent_num_bytes(leaf, fi,
  1039. split - key.offset);
  1040. fi = btrfs_item_ptr(leaf, path->slots[0],
  1041. struct btrfs_file_extent_item);
  1042. btrfs_set_file_extent_generation(leaf, fi, trans->transid);
  1043. btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
  1044. btrfs_set_file_extent_num_bytes(leaf, fi,
  1045. extent_end - split);
  1046. btrfs_mark_buffer_dirty(leaf);
  1047. ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
  1048. root->root_key.objectid,
  1049. ino, orig_offset, 0);
  1050. BUG_ON(ret); /* -ENOMEM */
  1051. if (split == start) {
  1052. key.offset = start;
  1053. } else {
  1054. BUG_ON(start != key.offset);
  1055. path->slots[0]--;
  1056. extent_end = end;
  1057. }
  1058. recow = 1;
  1059. }
  1060. other_start = end;
  1061. other_end = 0;
  1062. if (extent_mergeable(leaf, path->slots[0] + 1,
  1063. ino, bytenr, orig_offset,
  1064. &other_start, &other_end)) {
  1065. if (recow) {
  1066. btrfs_release_path(path);
  1067. goto again;
  1068. }
  1069. extent_end = other_end;
  1070. del_slot = path->slots[0] + 1;
  1071. del_nr++;
  1072. ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
  1073. 0, root->root_key.objectid,
  1074. ino, orig_offset, 0);
  1075. BUG_ON(ret); /* -ENOMEM */
  1076. }
  1077. other_start = 0;
  1078. other_end = start;
  1079. if (extent_mergeable(leaf, path->slots[0] - 1,
  1080. ino, bytenr, orig_offset,
  1081. &other_start, &other_end)) {
  1082. if (recow) {
  1083. btrfs_release_path(path);
  1084. goto again;
  1085. }
  1086. key.offset = other_start;
  1087. del_slot = path->slots[0];
  1088. del_nr++;
  1089. ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
  1090. 0, root->root_key.objectid,
  1091. ino, orig_offset, 0);
  1092. BUG_ON(ret); /* -ENOMEM */
  1093. }
  1094. if (del_nr == 0) {
  1095. fi = btrfs_item_ptr(leaf, path->slots[0],
  1096. struct btrfs_file_extent_item);
  1097. btrfs_set_file_extent_type(leaf, fi,
  1098. BTRFS_FILE_EXTENT_REG);
  1099. btrfs_set_file_extent_generation(leaf, fi, trans->transid);
  1100. btrfs_mark_buffer_dirty(leaf);
  1101. } else {
  1102. fi = btrfs_item_ptr(leaf, del_slot - 1,
  1103. struct btrfs_file_extent_item);
  1104. btrfs_set_file_extent_type(leaf, fi,
  1105. BTRFS_FILE_EXTENT_REG);
  1106. btrfs_set_file_extent_generation(leaf, fi, trans->transid);
  1107. btrfs_set_file_extent_num_bytes(leaf, fi,
  1108. extent_end - key.offset);
  1109. btrfs_mark_buffer_dirty(leaf);
  1110. ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
  1111. if (ret < 0) {
  1112. btrfs_abort_transaction(trans, root, ret);
  1113. goto out;
  1114. }
  1115. }
  1116. out:
  1117. btrfs_free_path(path);
  1118. return 0;
  1119. }
  1120. /*
  1121. * on error we return an unlocked page and the error value
  1122. * on success we return a locked page and 0
  1123. */
  1124. static int prepare_uptodate_page(struct page *page, u64 pos,
  1125. bool force_uptodate)
  1126. {
  1127. int ret = 0;
  1128. if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
  1129. !PageUptodate(page)) {
  1130. ret = btrfs_readpage(NULL, page);
  1131. if (ret)
  1132. return ret;
  1133. lock_page(page);
  1134. if (!PageUptodate(page)) {
  1135. unlock_page(page);
  1136. return -EIO;
  1137. }
  1138. }
  1139. return 0;
  1140. }
  1141. /*
  1142. * this just gets pages into the page cache and locks them down.
  1143. */
  1144. static noinline int prepare_pages(struct inode *inode, struct page **pages,
  1145. size_t num_pages, loff_t pos,
  1146. size_t write_bytes, bool force_uptodate)
  1147. {
  1148. int i;
  1149. unsigned long index = pos >> PAGE_CACHE_SHIFT;
  1150. gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
  1151. int err = 0;
  1152. int faili;
  1153. for (i = 0; i < num_pages; i++) {
  1154. pages[i] = find_or_create_page(inode->i_mapping, index + i,
  1155. mask | __GFP_WRITE);
  1156. if (!pages[i]) {
  1157. faili = i - 1;
  1158. err = -ENOMEM;
  1159. goto fail;
  1160. }
  1161. if (i == 0)
  1162. err = prepare_uptodate_page(pages[i], pos,
  1163. force_uptodate);
  1164. if (i == num_pages - 1)
  1165. err = prepare_uptodate_page(pages[i],
  1166. pos + write_bytes, false);
  1167. if (err) {
  1168. page_cache_release(pages[i]);
  1169. faili = i - 1;
  1170. goto fail;
  1171. }
  1172. wait_on_page_writeback(pages[i]);
  1173. }
  1174. return 0;
  1175. fail:
  1176. while (faili >= 0) {
  1177. unlock_page(pages[faili]);
  1178. page_cache_release(pages[faili]);
  1179. faili--;
  1180. }
  1181. return err;
  1182. }
  1183. /*
  1184. * This function locks the extent and properly waits for data=ordered extents
  1185. * to finish before allowing the pages to be modified if need.
  1186. *
  1187. * The return value:
  1188. * 1 - the extent is locked
  1189. * 0 - the extent is not locked, and everything is OK
  1190. * -EAGAIN - need re-prepare the pages
  1191. * the other < 0 number - Something wrong happens
  1192. */
  1193. static noinline int
  1194. lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
  1195. size_t num_pages, loff_t pos,
  1196. u64 *lockstart, u64 *lockend,
  1197. struct extent_state **cached_state)
  1198. {
  1199. u64 start_pos;
  1200. u64 last_pos;
  1201. int i;
  1202. int ret = 0;
  1203. start_pos = pos & ~((u64)PAGE_CACHE_SIZE - 1);
  1204. last_pos = start_pos + ((u64)num_pages << PAGE_CACHE_SHIFT) - 1;
  1205. if (start_pos < inode->i_size) {
  1206. struct btrfs_ordered_extent *ordered;
  1207. lock_extent_bits(&BTRFS_I(inode)->io_tree,
  1208. start_pos, last_pos, 0, cached_state);
  1209. ordered = btrfs_lookup_first_ordered_extent(inode, last_pos);
  1210. if (ordered &&
  1211. ordered->file_offset + ordered->len > start_pos &&
  1212. ordered->file_offset <= last_pos) {
  1213. btrfs_put_ordered_extent(ordered);
  1214. unlock_extent_cached(&BTRFS_I(inode)->io_tree,
  1215. start_pos, last_pos,
  1216. cached_state, GFP_NOFS);
  1217. for (i = 0; i < num_pages; i++) {
  1218. unlock_page(pages[i]);
  1219. page_cache_release(pages[i]);
  1220. }
  1221. ret = btrfs_wait_ordered_range(inode, start_pos,
  1222. last_pos - start_pos + 1);
  1223. if (ret)
  1224. return ret;
  1225. else
  1226. return -EAGAIN;
  1227. }
  1228. if (ordered)
  1229. btrfs_put_ordered_extent(ordered);
  1230. clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
  1231. last_pos, EXTENT_DIRTY | EXTENT_DELALLOC |
  1232. EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
  1233. 0, 0, cached_state, GFP_NOFS);
  1234. *lockstart = start_pos;
  1235. *lockend = last_pos;
  1236. ret = 1;
  1237. }
  1238. for (i = 0; i < num_pages; i++) {
  1239. if (clear_page_dirty_for_io(pages[i]))
  1240. account_page_redirty(pages[i]);
  1241. set_page_extent_mapped(pages[i]);
  1242. WARN_ON(!PageLocked(pages[i]));
  1243. }
  1244. return ret;
  1245. }
  1246. static noinline int check_can_nocow(struct inode *inode, loff_t pos,
  1247. size_t *write_bytes)
  1248. {
  1249. struct btrfs_root *root = BTRFS_I(inode)->root;
  1250. struct btrfs_ordered_extent *ordered;
  1251. u64 lockstart, lockend;
  1252. u64 num_bytes;
  1253. int ret;
  1254. lockstart = round_down(pos, root->sectorsize);
  1255. lockend = lockstart + round_up(*write_bytes, root->sectorsize) - 1;
  1256. while (1) {
  1257. lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
  1258. ordered = btrfs_lookup_ordered_range(inode, lockstart,
  1259. lockend - lockstart + 1);
  1260. if (!ordered) {
  1261. break;
  1262. }
  1263. unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
  1264. btrfs_start_ordered_extent(inode, ordered, 1);
  1265. btrfs_put_ordered_extent(ordered);
  1266. }
  1267. num_bytes = lockend - lockstart + 1;
  1268. ret = can_nocow_extent(inode, lockstart, &num_bytes, NULL, NULL, NULL);
  1269. if (ret <= 0) {
  1270. ret = 0;
  1271. } else {
  1272. clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
  1273. EXTENT_DIRTY | EXTENT_DELALLOC |
  1274. EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0,
  1275. NULL, GFP_NOFS);
  1276. *write_bytes = min_t(size_t, *write_bytes, num_bytes);
  1277. }
  1278. unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
  1279. return ret;
  1280. }
  1281. static noinline ssize_t __btrfs_buffered_write(struct file *file,
  1282. struct iov_iter *i,
  1283. loff_t pos)
  1284. {
  1285. struct inode *inode = file_inode(file);
  1286. struct btrfs_root *root = BTRFS_I(inode)->root;
  1287. struct page **pages = NULL;
  1288. struct extent_state *cached_state = NULL;
  1289. u64 release_bytes = 0;
  1290. u64 lockstart;
  1291. u64 lockend;
  1292. unsigned long first_index;
  1293. size_t num_written = 0;
  1294. int nrptrs;
  1295. int ret = 0;
  1296. bool only_release_metadata = false;
  1297. bool force_page_uptodate = false;
  1298. bool need_unlock;
  1299. nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
  1300. PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
  1301. (sizeof(struct page *)));
  1302. nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
  1303. nrptrs = max(nrptrs, 8);
  1304. pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
  1305. if (!pages)
  1306. return -ENOMEM;
  1307. first_index = pos >> PAGE_CACHE_SHIFT;
  1308. while (iov_iter_count(i) > 0) {
  1309. size_t offset = pos & (PAGE_CACHE_SIZE - 1);
  1310. size_t write_bytes = min(iov_iter_count(i),
  1311. nrptrs * (size_t)PAGE_CACHE_SIZE -
  1312. offset);
  1313. size_t num_pages = (write_bytes + offset +
  1314. PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  1315. size_t reserve_bytes;
  1316. size_t dirty_pages;
  1317. size_t copied;
  1318. WARN_ON(num_pages > nrptrs);
  1319. /*
  1320. * Fault pages before locking them in prepare_pages
  1321. * to avoid recursive lock
  1322. */
  1323. if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
  1324. ret = -EFAULT;
  1325. break;
  1326. }
  1327. reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
  1328. ret = btrfs_check_data_free_space(inode, reserve_bytes);
  1329. if (ret == -ENOSPC &&
  1330. (BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
  1331. BTRFS_INODE_PREALLOC))) {
  1332. ret = check_can_nocow(inode, pos, &write_bytes);
  1333. if (ret > 0) {
  1334. only_release_metadata = true;
  1335. /*
  1336. * our prealloc extent may be smaller than
  1337. * write_bytes, so scale down.
  1338. */
  1339. num_pages = (write_bytes + offset +
  1340. PAGE_CACHE_SIZE - 1) >>
  1341. PAGE_CACHE_SHIFT;
  1342. reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
  1343. ret = 0;
  1344. } else {
  1345. ret = -ENOSPC;
  1346. }
  1347. }
  1348. if (ret)
  1349. break;
  1350. ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes);
  1351. if (ret) {
  1352. if (!only_release_metadata)
  1353. btrfs_free_reserved_data_space(inode,
  1354. reserve_bytes);
  1355. break;
  1356. }
  1357. release_bytes = reserve_bytes;
  1358. need_unlock = false;
  1359. again:
  1360. /*
  1361. * This is going to setup the pages array with the number of
  1362. * pages we want, so we don't really need to worry about the
  1363. * contents of pages from loop to loop
  1364. */
  1365. ret = prepare_pages(inode, pages, num_pages,
  1366. pos, write_bytes,
  1367. force_page_uptodate);
  1368. if (ret)
  1369. break;
  1370. ret = lock_and_cleanup_extent_if_need(inode, pages, num_pages,
  1371. pos, &lockstart, &lockend,
  1372. &cached_state);
  1373. if (ret < 0) {
  1374. if (ret == -EAGAIN)
  1375. goto again;
  1376. break;
  1377. } else if (ret > 0) {
  1378. need_unlock = true;
  1379. ret = 0;
  1380. }
  1381. copied = btrfs_copy_from_user(pos, num_pages,
  1382. write_bytes, pages, i);
  1383. /*
  1384. * if we have trouble faulting in the pages, fall
  1385. * back to one page at a time
  1386. */
  1387. if (copied < write_bytes)
  1388. nrptrs = 1;
  1389. if (copied == 0) {
  1390. force_page_uptodate = true;
  1391. dirty_pages = 0;
  1392. } else {
  1393. force_page_uptodate = false;
  1394. dirty_pages = (copied + offset +
  1395. PAGE_CACHE_SIZE - 1) >>
  1396. PAGE_CACHE_SHIFT;
  1397. }
  1398. /*
  1399. * If we had a short copy we need to release the excess delaloc
  1400. * bytes we reserved. We need to increment outstanding_extents
  1401. * because btrfs_delalloc_release_space will decrement it, but
  1402. * we still have an outstanding extent for the chunk we actually
  1403. * managed to copy.
  1404. */
  1405. if (num_pages > dirty_pages) {
  1406. release_bytes = (num_pages - dirty_pages) <<
  1407. PAGE_CACHE_SHIFT;
  1408. if (copied > 0) {
  1409. spin_lock(&BTRFS_I(inode)->lock);
  1410. BTRFS_I(inode)->outstanding_extents++;
  1411. spin_unlock(&BTRFS_I(inode)->lock);
  1412. }
  1413. if (only_release_metadata)
  1414. btrfs_delalloc_release_metadata(inode,
  1415. release_bytes);
  1416. else
  1417. btrfs_delalloc_release_space(inode,
  1418. release_bytes);
  1419. }
  1420. release_bytes = dirty_pages << PAGE_CACHE_SHIFT;
  1421. if (copied > 0)
  1422. ret = btrfs_dirty_pages(root, inode, pages,
  1423. dirty_pages, pos, copied,
  1424. NULL);
  1425. if (need_unlock)
  1426. unlock_extent_cached(&BTRFS_I(inode)->io_tree,
  1427. lockstart, lockend, &cached_state,
  1428. GFP_NOFS);
  1429. if (ret) {
  1430. btrfs_drop_pages(pages, num_pages);
  1431. break;
  1432. }
  1433. release_bytes = 0;
  1434. if (only_release_metadata && copied > 0) {
  1435. u64 lockstart = round_down(pos, root->sectorsize);
  1436. u64 lockend = lockstart +
  1437. (dirty_pages << PAGE_CACHE_SHIFT) - 1;
  1438. set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
  1439. lockend, EXTENT_NORESERVE, NULL,
  1440. NULL, GFP_NOFS);
  1441. only_release_metadata = false;
  1442. }
  1443. btrfs_drop_pages(pages, num_pages);
  1444. cond_resched();
  1445. balance_dirty_pages_ratelimited(inode->i_mapping);
  1446. if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
  1447. btrfs_btree_balance_dirty(root);
  1448. pos += copied;
  1449. num_written += copied;
  1450. }
  1451. kfree(pages);
  1452. if (release_bytes) {
  1453. if (only_release_metadata)
  1454. btrfs_delalloc_release_metadata(inode, release_bytes);
  1455. else
  1456. btrfs_delalloc_release_space(inode, release_bytes);
  1457. }
  1458. return num_written ? num_written : ret;
  1459. }
  1460. static ssize_t __btrfs_direct_write(struct kiocb *iocb,
  1461. const struct iovec *iov,
  1462. unsigned long nr_segs, loff_t pos,
  1463. loff_t *ppos, size_t count, size_t ocount)
  1464. {
  1465. struct file *file = iocb->ki_filp;
  1466. struct iov_iter i;
  1467. ssize_t written;
  1468. ssize_t written_buffered;
  1469. loff_t endbyte;
  1470. int err;
  1471. written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
  1472. count, ocount);
  1473. if (written < 0 || written == count)
  1474. return written;
  1475. pos += written;
  1476. count -= written;
  1477. iov_iter_init(&i, iov, nr_segs, count, written);
  1478. written_buffered = __btrfs_buffered_write(file, &i, pos);
  1479. if (written_buffered < 0) {
  1480. err = written_buffered;
  1481. goto out;
  1482. }
  1483. endbyte = pos + written_buffered - 1;
  1484. err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
  1485. if (err)
  1486. goto out;
  1487. written += written_buffered;
  1488. *ppos = pos + written_buffered;
  1489. invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
  1490. endbyte >> PAGE_CACHE_SHIFT);
  1491. out:
  1492. return written ? written : err;
  1493. }
  1494. static void update_time_for_write(struct inode *inode)
  1495. {
  1496. struct timespec now;
  1497. if (IS_NOCMTIME(inode))
  1498. return;
  1499. now = current_fs_time(inode->i_sb);
  1500. if (!timespec_equal(&inode->i_mtime, &now))
  1501. inode->i_mtime = now;
  1502. if (!timespec_equal(&inode->i_ctime, &now))
  1503. inode->i_ctime = now;
  1504. if (IS_I_VERSION(inode))
  1505. inode_inc_iversion(inode);
  1506. }
  1507. static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
  1508. const struct iovec *iov,
  1509. unsigned long nr_segs, loff_t pos)
  1510. {
  1511. struct file *file = iocb->ki_filp;
  1512. struct inode *inode = file_inode(file);
  1513. struct btrfs_root *root = BTRFS_I(inode)->root;
  1514. loff_t *ppos = &iocb->ki_pos;
  1515. u64 start_pos;
  1516. ssize_t num_written = 0;
  1517. ssize_t err = 0;
  1518. size_t count, ocount;
  1519. bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
  1520. mutex_lock(&inode->i_mutex);
  1521. err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
  1522. if (err) {
  1523. mutex_unlock(&inode->i_mutex);
  1524. goto out;
  1525. }
  1526. count = ocount;
  1527. current->backing_dev_info = inode->i_mapping->backing_dev_info;
  1528. err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
  1529. if (err) {
  1530. mutex_unlock(&inode->i_mutex);
  1531. goto out;
  1532. }
  1533. if (count == 0) {
  1534. mutex_unlock(&inode->i_mutex);
  1535. goto out;
  1536. }
  1537. err = file_remove_suid(file);
  1538. if (err) {
  1539. mutex_unlock(&inode->i_mutex);
  1540. goto out;
  1541. }
  1542. /*
  1543. * If BTRFS flips readonly due to some impossible error
  1544. * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
  1545. * although we have opened a file as writable, we have
  1546. * to stop this write operation to ensure FS consistency.
  1547. */
  1548. if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
  1549. mutex_unlock(&inode->i_mutex);
  1550. err = -EROFS;
  1551. goto out;
  1552. }
  1553. /*
  1554. * We reserve space for updating the inode when we reserve space for the
  1555. * extent we are going to write, so we will enospc out there. We don't
  1556. * need to start yet another transaction to update the inode as we will
  1557. * update the inode when we finish writing whatever data we write.
  1558. */
  1559. update_time_for_write(inode);
  1560. start_pos = round_down(pos, root->sectorsize);
  1561. if (start_pos > i_size_read(inode)) {
  1562. err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
  1563. if (err) {
  1564. mutex_unlock(&inode->i_mutex);
  1565. goto out;
  1566. }
  1567. }
  1568. if (sync)
  1569. atomic_inc(&BTRFS_I(inode)->sync_writers);
  1570. if (unlikely(file->f_flags & O_DIRECT)) {
  1571. num_written = __btrfs_direct_write(iocb, iov, nr_segs,
  1572. pos, ppos, count, ocount);
  1573. } else {
  1574. struct iov_iter i;
  1575. iov_iter_init(&i, iov, nr_segs, count, num_written);
  1576. num_written = __btrfs_buffered_write(file, &i, pos);
  1577. if (num_written > 0)
  1578. *ppos = pos + num_written;
  1579. }
  1580. mutex_unlock(&inode->i_mutex);
  1581. /*
  1582. * we want to make sure fsync finds this change
  1583. * but we haven't joined a transaction running right now.
  1584. *
  1585. * Later on, someone is sure to update the inode and get the
  1586. * real transid recorded.
  1587. *
  1588. * We set last_trans now to the fs_info generation + 1,
  1589. * this will either be one more than the running transaction
  1590. * or the generation used for the next transaction if there isn't
  1591. * one running right now.
  1592. *
  1593. * We also have to set last_sub_trans to the current log transid,
  1594. * otherwise subsequent syncs to a file that's been synced in this
  1595. * transaction will appear to have already occured.
  1596. */
  1597. BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
  1598. BTRFS_I(inode)->last_sub_trans = root->log_transid;
  1599. if (num_written > 0) {
  1600. err = generic_write_sync(file, pos, num_written);
  1601. if (err < 0 && num_written > 0)
  1602. num_written = err;
  1603. }
  1604. if (sync)
  1605. atomic_dec(&BTRFS_I(inode)->sync_writers);
  1606. out:
  1607. current->backing_dev_info = NULL;
  1608. return num_written ? num_written : err;
  1609. }
  1610. int btrfs_release_file(struct inode *inode, struct file *filp)
  1611. {
  1612. /*
  1613. * ordered_data_close is set by settattr when we are about to truncate
  1614. * a file from a non-zero size to a zero size. This tries to
  1615. * flush down new bytes that may have been written if the
  1616. * application were using truncate to replace a file in place.
  1617. */
  1618. if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
  1619. &BTRFS_I(inode)->runtime_flags)) {
  1620. struct btrfs_trans_handle *trans;
  1621. struct btrfs_root *root = BTRFS_I(inode)->root;
  1622. /*
  1623. * We need to block on a committing transaction to keep us from
  1624. * throwing a ordered operation on to the list and causing
  1625. * something like sync to deadlock trying to flush out this
  1626. * inode.
  1627. */
  1628. trans = btrfs_start_transaction(root, 0);
  1629. if (IS_ERR(trans))
  1630. return PTR_ERR(trans);
  1631. btrfs_add_ordered_operation(trans, BTRFS_I(inode)->root, inode);
  1632. btrfs_end_transaction(trans, root);
  1633. if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
  1634. filemap_flush(inode->i_mapping);
  1635. }
  1636. if (filp->private_data)
  1637. btrfs_ioctl_trans_end(filp);
  1638. return 0;
  1639. }
  1640. /*
  1641. * fsync call for both files and directories. This logs the inode into
  1642. * the tree log instead of forcing full commits whenever possible.
  1643. *
  1644. * It needs to call filemap_fdatawait so that all ordered extent updates are
  1645. * in the metadata btree are up to date for copying to the log.
  1646. *
  1647. * It drops the inode mutex before doing the tree log commit. This is an
  1648. * important optimization for directories because holding the mutex prevents
  1649. * new operations on the dir while we write to disk.
  1650. */
  1651. int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
  1652. {
  1653. struct dentry *dentry = file->f_path.dentry;
  1654. struct inode *inode = dentry->d_inode;
  1655. struct btrfs_root *root = BTRFS_I(inode)->root;
  1656. int ret = 0;
  1657. struct btrfs_trans_handle *trans;
  1658. bool full_sync = 0;
  1659. trace_btrfs_sync_file(file, datasync);
  1660. /*
  1661. * We write the dirty pages in the range and wait until they complete
  1662. * out of the ->i_mutex. If so, we can flush the dirty pages by
  1663. * multi-task, and make the performance up. See
  1664. * btrfs_wait_ordered_range for an explanation of the ASYNC check.
  1665. */
  1666. atomic_inc(&BTRFS_I(inode)->sync_writers);
  1667. ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
  1668. if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
  1669. &BTRFS_I(inode)->runtime_flags))
  1670. ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
  1671. atomic_dec(&BTRFS_I(inode)->sync_writers);
  1672. if (ret)
  1673. return ret;
  1674. mutex_lock(&inode->i_mutex);
  1675. /*
  1676. * We flush the dirty pages again to avoid some dirty pages in the
  1677. * range being left.
  1678. */
  1679. atomic_inc(&root->log_batch);
  1680. full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
  1681. &BTRFS_I(inode)->runtime_flags);
  1682. if (full_sync) {
  1683. ret = btrfs_wait_ordered_range(inode, start, end - start + 1);
  1684. if (ret) {
  1685. mutex_unlock(&inode->i_mutex);
  1686. goto out;
  1687. }
  1688. }
  1689. atomic_inc(&root->log_batch);
  1690. /*
  1691. * check the transaction that last modified this inode
  1692. * and see if its already been committed
  1693. */
  1694. if (!BTRFS_I(inode)->last_trans) {
  1695. mutex_unlock(&inode->i_mutex);
  1696. goto out;
  1697. }
  1698. /*
  1699. * if the last transaction that changed this file was before
  1700. * the current transaction, we can bail out now without any
  1701. * syncing
  1702. */
  1703. smp_mb();
  1704. if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
  1705. BTRFS_I(inode)->last_trans <=
  1706. root->fs_info->last_trans_committed) {
  1707. BTRFS_I(inode)->last_trans = 0;
  1708. /*
  1709. * We'v had everything committed since the last time we were
  1710. * modified so clear this flag in case it was set for whatever
  1711. * reason, it's no longer relevant.
  1712. */
  1713. clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
  1714. &BTRFS_I(inode)->runtime_flags);
  1715. mutex_unlock(&inode->i_mutex);
  1716. goto out;
  1717. }
  1718. /*
  1719. * ok we haven't committed the transaction yet, lets do a commit
  1720. */
  1721. if (file->private_data)
  1722. btrfs_ioctl_trans_end(file);
  1723. /*
  1724. * We use start here because we will need to wait on the IO to complete
  1725. * in btrfs_sync_log, which could require joining a transaction (for
  1726. * example checking cross references in the nocow path). If we use join
  1727. * here we could get into a situation where we're waiting on IO to
  1728. * happen that is blocked on a transaction trying to commit. With start
  1729. * we inc the extwriter counter, so we wait for all extwriters to exit
  1730. * before we start blocking join'ers. This comment is to keep somebody
  1731. * from thinking they are super smart and changing this to
  1732. * btrfs_join_transaction *cough*Josef*cough*.
  1733. */
  1734. trans = btrfs_start_transaction(root, 0);
  1735. if (IS_ERR(trans)) {
  1736. ret = PTR_ERR(trans);
  1737. mutex_unlock(&inode->i_mutex);
  1738. goto out;
  1739. }
  1740. trans->sync = true;
  1741. ret = btrfs_log_dentry_safe(trans, root, dentry);
  1742. if (ret < 0) {
  1743. /* Fallthrough and commit/free transaction. */
  1744. ret = 1;
  1745. }
  1746. /* we've logged all the items and now have a consistent
  1747. * version of the file in the log. It is possible that
  1748. * someone will come in and modify the file, but that's
  1749. * fine because the log is consistent on disk, and we
  1750. * have references to all of the file's extents
  1751. *
  1752. * It is possible that someone will come in and log the
  1753. * file again, but that will end up using the synchronization
  1754. * inside btrfs_sync_log to keep things safe.
  1755. */
  1756. mutex_unlock(&inode->i_mutex);
  1757. if (ret != BTRFS_NO_LOG_SYNC) {
  1758. if (!ret) {
  1759. ret = btrfs_sync_log(trans, root);
  1760. if (!ret) {
  1761. ret = btrfs_end_transaction(trans, root);
  1762. goto out;
  1763. }
  1764. }
  1765. if (!full_sync) {
  1766. ret = btrfs_wait_ordered_range(inode, start,
  1767. end - start + 1);
  1768. if (ret)
  1769. goto out;
  1770. }
  1771. ret = btrfs_commit_transaction(trans, root);
  1772. } else {
  1773. ret = btrfs_end_transaction(trans, root);
  1774. }
  1775. out:
  1776. return ret > 0 ? -EIO : ret;
  1777. }
  1778. static const struct vm_operations_struct btrfs_file_vm_ops = {
  1779. .fault = filemap_fault,
  1780. .page_mkwrite = btrfs_page_mkwrite,
  1781. .remap_pages = generic_file_remap_pages,
  1782. };
  1783. static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
  1784. {
  1785. struct address_space *mapping = filp->f_mapping;
  1786. if (!mapping->a_ops->readpage)
  1787. return -ENOEXEC;
  1788. file_accessed(filp);
  1789. vma->vm_ops = &btrfs_file_vm_ops;
  1790. return 0;
  1791. }
  1792. static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
  1793. int slot, u64 start, u64 end)
  1794. {
  1795. struct btrfs_file_extent_item *fi;
  1796. struct btrfs_key key;
  1797. if (slot < 0 || slot >= btrfs_header_nritems(leaf))
  1798. return 0;
  1799. btrfs_item_key_to_cpu(leaf, &key, slot);
  1800. if (key.objectid != btrfs_ino(inode) ||
  1801. key.type != BTRFS_EXTENT_DATA_KEY)
  1802. return 0;
  1803. fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
  1804. if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
  1805. return 0;
  1806. if (btrfs_file_extent_disk_bytenr(leaf, fi))
  1807. return 0;
  1808. if (key.offset == end)
  1809. return 1;
  1810. if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
  1811. return 1;
  1812. return 0;
  1813. }
  1814. static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
  1815. struct btrfs_path *path, u64 offset, u64 end)
  1816. {
  1817. struct btrfs_root *root = BTRFS_I(inode)->root;
  1818. struct extent_buffer *leaf;
  1819. struct btrfs_file_extent_item *fi;
  1820. struct extent_map *hole_em;
  1821. struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
  1822. struct btrfs_key key;
  1823. int ret;
  1824. if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
  1825. goto out;
  1826. key.objectid = btrfs_ino(inode);
  1827. key.type = BTRFS_EXTENT_DATA_KEY;
  1828. key.offset = offset;
  1829. ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
  1830. if (ret < 0)
  1831. return ret;
  1832. BUG_ON(!ret);
  1833. leaf = path->nodes[0];
  1834. if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
  1835. u64 num_bytes;
  1836. path->slots[0]--;
  1837. fi = btrfs_item_ptr(leaf, path->slots[0],
  1838. struct btrfs_file_extent_item);
  1839. num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
  1840. end - offset;
  1841. btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
  1842. btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
  1843. btrfs_set_file_extent_offset(leaf, fi, 0);
  1844. btrfs_mark_buffer_dirty(leaf);
  1845. goto out;
  1846. }
  1847. if (hole_mergeable(inode, leaf, path->slots[0]+1, offset, end)) {
  1848. u64 num_bytes;
  1849. path->slots[0]++;
  1850. key.offset = offset;
  1851. btrfs_set_item_key_safe(root, path, &key);
  1852. fi = btrfs_item_ptr(leaf, path->slots[0],
  1853. struct btrfs_file_extent_item);
  1854. num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
  1855. offset;
  1856. btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
  1857. btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
  1858. btrfs_set_file_extent_offset(leaf, fi, 0);
  1859. btrfs_mark_buffer_dirty(leaf);
  1860. goto out;
  1861. }
  1862. btrfs_release_path(path);
  1863. ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
  1864. 0, 0, end - offset, 0, end - offset,
  1865. 0, 0, 0);
  1866. if (ret)
  1867. return ret;
  1868. out:
  1869. btrfs_release_path(path);
  1870. hole_em = alloc_extent_map();
  1871. if (!hole_em) {
  1872. btrfs_drop_extent_cache(inode, offset, end - 1, 0);
  1873. set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
  1874. &BTRFS_I(inode)->runtime_flags);
  1875. } else {
  1876. hole_em->start = offset;
  1877. hole_em->len = end - offset;
  1878. hole_em->ram_bytes = hole_em->len;
  1879. hole_em->orig_start = offset;
  1880. hole_em->block_start = EXTENT_MAP_HOLE;
  1881. hole_em->block_len = 0;
  1882. hole_em->orig_block_len = 0;
  1883. hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
  1884. hole_em->compress_type = BTRFS_COMPRESS_NONE;
  1885. hole_em->generation = trans->transid;
  1886. do {
  1887. btrfs_drop_extent_cache(inode, offset, end - 1, 0);
  1888. write_lock(&em_tree->lock);
  1889. ret = add_extent_mapping(em_tree, hole_em, 1);
  1890. write_unlock(&em_tree->lock);
  1891. } while (ret == -EEXIST);
  1892. free_extent_map(hole_em);
  1893. if (ret)
  1894. set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
  1895. &BTRFS_I(inode)->runtime_flags);
  1896. }
  1897. return 0;
  1898. }
  1899. static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
  1900. {
  1901. struct btrfs_root *root = BTRFS_I(inode)->root;
  1902. struct extent_state *cached_state = NULL;
  1903. struct btrfs_path *path;
  1904. struct btrfs_block_rsv *rsv;
  1905. struct btrfs_trans_handle *trans;
  1906. u64 lockstart = round_up(offset, BTRFS_I(inode)->root->sectorsize);
  1907. u64 lockend = round_down(offset + len,
  1908. BTRFS_I(inode)->root->sectorsize) - 1;
  1909. u64 cur_offset = lockstart;
  1910. u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
  1911. u64 drop_end;
  1912. int ret = 0;
  1913. int err = 0;
  1914. int rsv_count;
  1915. bool same_page = ((offset >> PAGE_CACHE_SHIFT) ==
  1916. ((offset + len - 1) >> PAGE_CACHE_SHIFT));
  1917. bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES);
  1918. ret = btrfs_wait_ordered_range(inode, offset, len);
  1919. if (ret)
  1920. return ret;
  1921. mutex_lock(&inode->i_mutex);
  1922. /*
  1923. * We needn't truncate any page which is beyond the end of the file
  1924. * because we are sure there is no data there.
  1925. */
  1926. /*
  1927. * Only do this if we are in the same page and we aren't doing the
  1928. * entire page.
  1929. */
  1930. if (same_page && len < PAGE_CACHE_SIZE) {
  1931. if (offset < round_up(inode->i_size, PAGE_CACHE_SIZE))
  1932. ret = btrfs_truncate_page(inode, offset, len, 0);
  1933. mutex_unlock(&inode->i_mutex);
  1934. return ret;
  1935. }
  1936. /* zero back part of the first page */
  1937. if (offset < round_up(inode->i_size, PAGE_CACHE_SIZE)) {
  1938. ret = btrfs_truncate_page(inode, offset, 0, 0);
  1939. if (ret) {
  1940. mutex_unlock(&inode->i_mutex);
  1941. return ret;
  1942. }
  1943. }
  1944. /* zero the front end of the last page */
  1945. if (offset + len < round_up(inode->i_size, PAGE_CACHE_SIZE)) {
  1946. ret = btrfs_truncate_page(inode, offset + len, 0, 1);
  1947. if (ret) {
  1948. mutex_unlock(&inode->i_mutex);
  1949. return ret;
  1950. }
  1951. }
  1952. if (lockend < lockstart) {
  1953. mutex_unlock(&inode->i_mutex);
  1954. return 0;
  1955. }
  1956. while (1) {
  1957. struct btrfs_ordered_extent *ordered;
  1958. truncate_pagecache_range(inode, lockstart, lockend);
  1959. lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
  1960. 0, &cached_state);
  1961. ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
  1962. /*
  1963. * We need to make sure we have no ordered extents in this range
  1964. * and nobody raced in and read a page in this range, if we did
  1965. * we need to try again.
  1966. */
  1967. if ((!ordered ||
  1968. (ordered->file_offset + ordered->len <= lockstart ||
  1969. ordered->file_offset > lockend)) &&
  1970. !test_range_bit(&BTRFS_I(inode)->io_tree, lockstart,
  1971. lockend, EXTENT_UPTODATE, 0,
  1972. cached_state)) {
  1973. if (ordered)
  1974. btrfs_put_ordered_extent(ordered);
  1975. break;
  1976. }
  1977. if (ordered)
  1978. btrfs_put_ordered_extent(ordered);
  1979. unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
  1980. lockend, &cached_state, GFP_NOFS);
  1981. ret = btrfs_wait_ordered_range(inode, lockstart,
  1982. lockend - lockstart + 1);
  1983. if (ret) {
  1984. mutex_unlock(&inode->i_mutex);
  1985. return ret;
  1986. }
  1987. }
  1988. path = btrfs_alloc_path();
  1989. if (!path) {
  1990. ret = -ENOMEM;
  1991. goto out;
  1992. }
  1993. rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
  1994. if (!rsv) {
  1995. ret = -ENOMEM;
  1996. goto out_free;
  1997. }
  1998. rsv->size = btrfs_calc_trunc_metadata_size(root, 1);
  1999. rsv->failfast = 1;
  2000. /*
  2001. * 1 - update the inode
  2002. * 1 - removing the extents in the range
  2003. * 1 - adding the hole extent if no_holes isn't set
  2004. */
  2005. rsv_count = no_holes ? 2 : 3;
  2006. trans = btrfs_start_transaction(root, rsv_count);
  2007. if (IS_ERR(trans)) {
  2008. err = PTR_ERR(trans);
  2009. goto out_free;
  2010. }
  2011. ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
  2012. min_size);
  2013. BUG_ON(ret);
  2014. trans->block_rsv = rsv;
  2015. while (cur_offset < lockend) {
  2016. ret = __btrfs_drop_extents(trans, root, inode, path,
  2017. cur_offset, lockend + 1,
  2018. &drop_end, 1, 0, 0, NULL);
  2019. if (ret != -ENOSPC)
  2020. break;
  2021. trans->block_rsv = &root->fs_info->trans_block_rsv;
  2022. ret = fill_holes(trans, inode, path, cur_offset, drop_end);
  2023. if (ret) {
  2024. err = ret;
  2025. break;
  2026. }
  2027. cur_offset = drop_end;
  2028. ret = btrfs_update_inode(trans, root, inode);
  2029. if (ret) {
  2030. err = ret;
  2031. break;
  2032. }
  2033. btrfs_end_transaction(trans, root);
  2034. btrfs_btree_balance_dirty(root);
  2035. trans = btrfs_start_transaction(root, rsv_count);
  2036. if (IS_ERR(trans)) {
  2037. ret = PTR_ERR(trans);
  2038. trans = NULL;
  2039. break;
  2040. }
  2041. ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
  2042. rsv, min_size);
  2043. BUG_ON(ret); /* shouldn't happen */
  2044. trans->block_rsv = rsv;
  2045. }
  2046. if (ret) {
  2047. err = ret;
  2048. goto out_trans;
  2049. }
  2050. trans->block_rsv = &root->fs_info->trans_block_rsv;
  2051. ret = fill_holes(trans, inode, path, cur_offset, drop_end);
  2052. if (ret) {
  2053. err = ret;
  2054. goto out_trans;
  2055. }
  2056. out_trans:
  2057. if (!trans)
  2058. goto out_free;
  2059. inode_inc_iversion(inode);
  2060. inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  2061. trans->block_rsv = &root->fs_info->trans_block_rsv;
  2062. ret = btrfs_update_inode(trans, root, inode);
  2063. btrfs_end_transaction(trans, root);
  2064. btrfs_btree_balance_dirty(root);
  2065. out_free:
  2066. btrfs_free_path(path);
  2067. btrfs_free_block_rsv(root, rsv);
  2068. out:
  2069. unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
  2070. &cached_state, GFP_NOFS);
  2071. mutex_unlock(&inode->i_mutex);
  2072. if (ret && !err)
  2073. err = ret;
  2074. return err;
  2075. }
  2076. static long btrfs_fallocate(struct file *file, int mode,
  2077. loff_t offset, loff_t len)
  2078. {
  2079. struct inode *inode = file_inode(file);
  2080. struct extent_state *cached_state = NULL;
  2081. struct btrfs_root *root = BTRFS_I(inode)->root;
  2082. u64 cur_offset;
  2083. u64 last_byte;
  2084. u64 alloc_start;
  2085. u64 alloc_end;
  2086. u64 alloc_hint = 0;
  2087. u64 locked_end;
  2088. struct extent_map *em;
  2089. int blocksize = BTRFS_I(inode)->root->sectorsize;
  2090. int ret;
  2091. alloc_start = round_down(offset, blocksize);
  2092. alloc_end = round_up(offset + len, blocksize);
  2093. /* Make sure we aren't being give some crap mode */
  2094. if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
  2095. return -EOPNOTSUPP;
  2096. if (mode & FALLOC_FL_PUNCH_HOLE)
  2097. return btrfs_punch_hole(inode, offset, len);
  2098. /*
  2099. * Make sure we have enough space before we do the
  2100. * allocation.
  2101. */
  2102. ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
  2103. if (ret)
  2104. return ret;
  2105. if (root->fs_info->quota_enabled) {
  2106. ret = btrfs_qgroup_reserve(root, alloc_end - alloc_start);
  2107. if (ret)
  2108. goto out_reserve_fail;
  2109. }
  2110. mutex_lock(&inode->i_mutex);
  2111. ret = inode_newsize_ok(inode, alloc_end);
  2112. if (ret)
  2113. goto out;
  2114. if (alloc_start > inode->i_size) {
  2115. ret = btrfs_cont_expand(inode, i_size_read(inode),
  2116. alloc_start);
  2117. if (ret)
  2118. goto out;
  2119. } else {
  2120. /*
  2121. * If we are fallocating from the end of the file onward we
  2122. * need to zero out the end of the page if i_size lands in the
  2123. * middle of a page.
  2124. */
  2125. ret = btrfs_truncate_page(inode, inode->i_size, 0, 0);
  2126. if (ret)
  2127. goto out;
  2128. }
  2129. /*
  2130. * wait for ordered IO before we have any locks. We'll loop again
  2131. * below with the locks held.
  2132. */
  2133. ret = btrfs_wait_ordered_range(inode, alloc_start,
  2134. alloc_end - alloc_start);
  2135. if (ret)
  2136. goto out;
  2137. locked_end = alloc_end - 1;
  2138. while (1) {
  2139. struct btrfs_ordered_extent *ordered;
  2140. /* the extent lock is ordered inside the running
  2141. * transaction
  2142. */
  2143. lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
  2144. locked_end, 0, &cached_state);
  2145. ordered = btrfs_lookup_first_ordered_extent(inode,
  2146. alloc_end - 1);
  2147. if (ordered &&
  2148. ordered->file_offset + ordered->len > alloc_start &&
  2149. ordered->file_offset < alloc_end) {
  2150. btrfs_put_ordered_extent(ordered);
  2151. unlock_extent_cached(&BTRFS_I(inode)->io_tree,
  2152. alloc_start, locked_end,
  2153. &cached_state, GFP_NOFS);
  2154. /*
  2155. * we can't wait on the range with the transaction
  2156. * running or with the extent lock held
  2157. */
  2158. ret = btrfs_wait_ordered_range(inode, alloc_start,
  2159. alloc_end - alloc_start);
  2160. if (ret)
  2161. goto out;
  2162. } else {
  2163. if (ordered)
  2164. btrfs_put_ordered_extent(ordered);
  2165. break;
  2166. }
  2167. }
  2168. cur_offset = alloc_start;
  2169. while (1) {
  2170. u64 actual_end;
  2171. em = btrfs_get_extent(inode, NULL, 0, cur_offset,
  2172. alloc_end - cur_offset, 0);
  2173. if (IS_ERR_OR_NULL(em)) {
  2174. if (!em)
  2175. ret = -ENOMEM;
  2176. else
  2177. ret = PTR_ERR(em);
  2178. break;
  2179. }
  2180. last_byte = min(extent_map_end(em), alloc_end);
  2181. actual_end = min_t(u64, extent_map_end(em), offset + len);
  2182. last_byte = ALIGN(last_byte, blocksize);
  2183. if (em->block_start == EXTENT_MAP_HOLE ||
  2184. (cur_offset >= inode->i_size &&
  2185. !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
  2186. ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
  2187. last_byte - cur_offset,
  2188. 1 << inode->i_blkbits,
  2189. offset + len,
  2190. &alloc_hint);
  2191. if (ret < 0) {
  2192. free_extent_map(em);
  2193. break;
  2194. }
  2195. } else if (actual_end > inode->i_size &&
  2196. !(mode & FALLOC_FL_KEEP_SIZE)) {
  2197. /*
  2198. * We didn't need to allocate any more space, but we
  2199. * still extended the size of the file so we need to
  2200. * update i_size.
  2201. */
  2202. inode->i_ctime = CURRENT_TIME;
  2203. i_size_write(inode, actual_end);
  2204. btrfs_ordered_update_i_size(inode, actual_end, NULL);
  2205. }
  2206. free_extent_map(em);
  2207. cur_offset = last_byte;
  2208. if (cur_offset >= alloc_end) {
  2209. ret = 0;
  2210. break;
  2211. }
  2212. }
  2213. unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
  2214. &cached_state, GFP_NOFS);
  2215. out:
  2216. mutex_unlock(&inode->i_mutex);
  2217. if (root->fs_info->quota_enabled)
  2218. btrfs_qgroup_free(root, alloc_end - alloc_start);
  2219. out_reserve_fail:
  2220. /* Let go of our reservation. */
  2221. btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
  2222. return ret;
  2223. }
  2224. static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
  2225. {
  2226. struct btrfs_root *root = BTRFS_I(inode)->root;
  2227. struct extent_map *em = NULL;
  2228. struct extent_state *cached_state = NULL;
  2229. u64 lockstart = *offset;
  2230. u64 lockend = i_size_read(inode);
  2231. u64 start = *offset;
  2232. u64 len = i_size_read(inode);
  2233. int ret = 0;
  2234. lockend = max_t(u64, root->sectorsize, lockend);
  2235. if (lockend <= lockstart)
  2236. lockend = lockstart + root->sectorsize;
  2237. lockend--;
  2238. len = lockend - lockstart + 1;
  2239. len = max_t(u64, len, root->sectorsize);
  2240. if (inode->i_size == 0)
  2241. return -ENXIO;
  2242. lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
  2243. &cached_state);
  2244. while (start < inode->i_size) {
  2245. em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
  2246. if (IS_ERR(em)) {
  2247. ret = PTR_ERR(em);
  2248. em = NULL;
  2249. break;
  2250. }
  2251. if (whence == SEEK_HOLE &&
  2252. (em->block_start == EXTENT_MAP_HOLE ||
  2253. test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
  2254. break;
  2255. else if (whence == SEEK_DATA &&
  2256. (em->block_start != EXTENT_MAP_HOLE &&
  2257. !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
  2258. break;
  2259. start = em->start + em->len;
  2260. free_extent_map(em);
  2261. em = NULL;
  2262. cond_resched();
  2263. }
  2264. free_extent_map(em);
  2265. if (!ret) {
  2266. if (whence == SEEK_DATA && start >= inode->i_size)
  2267. ret = -ENXIO;
  2268. else
  2269. *offset = min_t(loff_t, start, inode->i_size);
  2270. }
  2271. unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
  2272. &cached_state, GFP_NOFS);
  2273. return ret;
  2274. }
  2275. static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
  2276. {
  2277. struct inode *inode = file->f_mapping->host;
  2278. int ret;
  2279. mutex_lock(&inode->i_mutex);
  2280. switch (whence) {
  2281. case SEEK_END:
  2282. case SEEK_CUR:
  2283. offset = generic_file_llseek(file, offset, whence);
  2284. goto out;
  2285. case SEEK_DATA:
  2286. case SEEK_HOLE:
  2287. if (offset >= i_size_read(inode)) {
  2288. mutex_unlock(&inode->i_mutex);
  2289. return -ENXIO;
  2290. }
  2291. ret = find_desired_extent(inode, &offset, whence);
  2292. if (ret) {
  2293. mutex_unlock(&inode->i_mutex);
  2294. return ret;
  2295. }
  2296. }
  2297. offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
  2298. out:
  2299. mutex_unlock(&inode->i_mutex);
  2300. return offset;
  2301. }
  2302. const struct file_operations btrfs_file_operations = {
  2303. .llseek = btrfs_file_llseek,
  2304. .read = do_sync_read,
  2305. .write = do_sync_write,
  2306. .aio_read = generic_file_aio_read,
  2307. .splice_read = generic_file_splice_read,
  2308. .aio_write = btrfs_file_aio_write,
  2309. .mmap = btrfs_file_mmap,
  2310. .open = generic_file_open,
  2311. .release = btrfs_release_file,
  2312. .fsync = btrfs_sync_file,
  2313. .fallocate = btrfs_fallocate,
  2314. .unlocked_ioctl = btrfs_ioctl,
  2315. #ifdef CONFIG_COMPAT
  2316. .compat_ioctl = btrfs_ioctl,
  2317. #endif
  2318. };
  2319. void btrfs_auto_defrag_exit(void)
  2320. {
  2321. if (btrfs_inode_defrag_cachep)
  2322. kmem_cache_destroy(btrfs_inode_defrag_cachep);
  2323. }
  2324. int btrfs_auto_defrag_init(void)
  2325. {
  2326. btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
  2327. sizeof(struct inode_defrag), 0,
  2328. SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
  2329. NULL);
  2330. if (!btrfs_inode_defrag_cachep)
  2331. return -ENOMEM;
  2332. return 0;
  2333. }