delayed-inode.c 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988
  1. /*
  2. * Copyright (C) 2011 Fujitsu. All rights reserved.
  3. * Written by Miao Xie <miaox@cn.fujitsu.com>
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public
  7. * License v2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public
  15. * License along with this program; if not, write to the
  16. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  17. * Boston, MA 021110-1307, USA.
  18. */
  19. #include <linux/slab.h>
  20. #include "delayed-inode.h"
  21. #include "disk-io.h"
  22. #include "transaction.h"
  23. #include "ctree.h"
  24. #define BTRFS_DELAYED_WRITEBACK 512
  25. #define BTRFS_DELAYED_BACKGROUND 128
  26. #define BTRFS_DELAYED_BATCH 16
  27. static struct kmem_cache *delayed_node_cache;
  28. int __init btrfs_delayed_inode_init(void)
  29. {
  30. delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
  31. sizeof(struct btrfs_delayed_node),
  32. 0,
  33. SLAB_MEM_SPREAD,
  34. NULL);
  35. if (!delayed_node_cache)
  36. return -ENOMEM;
  37. return 0;
  38. }
  39. void btrfs_delayed_inode_exit(void)
  40. {
  41. kmem_cache_destroy(delayed_node_cache);
  42. }
  43. static inline void btrfs_init_delayed_node(
  44. struct btrfs_delayed_node *delayed_node,
  45. struct btrfs_root *root, u64 inode_id)
  46. {
  47. delayed_node->root = root;
  48. delayed_node->inode_id = inode_id;
  49. atomic_set(&delayed_node->refs, 0);
  50. delayed_node->ins_root = RB_ROOT;
  51. delayed_node->del_root = RB_ROOT;
  52. mutex_init(&delayed_node->mutex);
  53. INIT_LIST_HEAD(&delayed_node->n_list);
  54. INIT_LIST_HEAD(&delayed_node->p_list);
  55. }
  56. static inline int btrfs_is_continuous_delayed_item(
  57. struct btrfs_delayed_item *item1,
  58. struct btrfs_delayed_item *item2)
  59. {
  60. if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
  61. item1->key.objectid == item2->key.objectid &&
  62. item1->key.type == item2->key.type &&
  63. item1->key.offset + 1 == item2->key.offset)
  64. return 1;
  65. return 0;
  66. }
  67. static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
  68. {
  69. struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
  70. struct btrfs_root *root = btrfs_inode->root;
  71. u64 ino = btrfs_ino(inode);
  72. struct btrfs_delayed_node *node;
  73. node = ACCESS_ONCE(btrfs_inode->delayed_node);
  74. if (node) {
  75. atomic_inc(&node->refs);
  76. return node;
  77. }
  78. spin_lock(&root->inode_lock);
  79. node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
  80. if (node) {
  81. if (btrfs_inode->delayed_node) {
  82. atomic_inc(&node->refs); /* can be accessed */
  83. BUG_ON(btrfs_inode->delayed_node != node);
  84. spin_unlock(&root->inode_lock);
  85. return node;
  86. }
  87. btrfs_inode->delayed_node = node;
  88. /* can be accessed and cached in the inode */
  89. atomic_add(2, &node->refs);
  90. spin_unlock(&root->inode_lock);
  91. return node;
  92. }
  93. spin_unlock(&root->inode_lock);
  94. return NULL;
  95. }
  96. /* Will return either the node or PTR_ERR(-ENOMEM) */
  97. static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
  98. struct inode *inode)
  99. {
  100. struct btrfs_delayed_node *node;
  101. struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
  102. struct btrfs_root *root = btrfs_inode->root;
  103. u64 ino = btrfs_ino(inode);
  104. int ret;
  105. again:
  106. node = btrfs_get_delayed_node(inode);
  107. if (node)
  108. return node;
  109. node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
  110. if (!node)
  111. return ERR_PTR(-ENOMEM);
  112. btrfs_init_delayed_node(node, root, ino);
  113. /* cached in the btrfs inode and can be accessed */
  114. atomic_add(2, &node->refs);
  115. ret = radix_tree_preload(GFP_NOFS);
  116. if (ret) {
  117. kmem_cache_free(delayed_node_cache, node);
  118. return ERR_PTR(ret);
  119. }
  120. spin_lock(&root->inode_lock);
  121. ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
  122. if (ret == -EEXIST) {
  123. spin_unlock(&root->inode_lock);
  124. kmem_cache_free(delayed_node_cache, node);
  125. radix_tree_preload_end();
  126. goto again;
  127. }
  128. btrfs_inode->delayed_node = node;
  129. spin_unlock(&root->inode_lock);
  130. radix_tree_preload_end();
  131. return node;
  132. }
  133. /*
  134. * Call it when holding delayed_node->mutex
  135. *
  136. * If mod = 1, add this node into the prepared list.
  137. */
  138. static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
  139. struct btrfs_delayed_node *node,
  140. int mod)
  141. {
  142. spin_lock(&root->lock);
  143. if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
  144. if (!list_empty(&node->p_list))
  145. list_move_tail(&node->p_list, &root->prepare_list);
  146. else if (mod)
  147. list_add_tail(&node->p_list, &root->prepare_list);
  148. } else {
  149. list_add_tail(&node->n_list, &root->node_list);
  150. list_add_tail(&node->p_list, &root->prepare_list);
  151. atomic_inc(&node->refs); /* inserted into list */
  152. root->nodes++;
  153. set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
  154. }
  155. spin_unlock(&root->lock);
  156. }
  157. /* Call it when holding delayed_node->mutex */
  158. static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
  159. struct btrfs_delayed_node *node)
  160. {
  161. spin_lock(&root->lock);
  162. if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
  163. root->nodes--;
  164. atomic_dec(&node->refs); /* not in the list */
  165. list_del_init(&node->n_list);
  166. if (!list_empty(&node->p_list))
  167. list_del_init(&node->p_list);
  168. clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
  169. }
  170. spin_unlock(&root->lock);
  171. }
  172. static struct btrfs_delayed_node *btrfs_first_delayed_node(
  173. struct btrfs_delayed_root *delayed_root)
  174. {
  175. struct list_head *p;
  176. struct btrfs_delayed_node *node = NULL;
  177. spin_lock(&delayed_root->lock);
  178. if (list_empty(&delayed_root->node_list))
  179. goto out;
  180. p = delayed_root->node_list.next;
  181. node = list_entry(p, struct btrfs_delayed_node, n_list);
  182. atomic_inc(&node->refs);
  183. out:
  184. spin_unlock(&delayed_root->lock);
  185. return node;
  186. }
  187. static struct btrfs_delayed_node *btrfs_next_delayed_node(
  188. struct btrfs_delayed_node *node)
  189. {
  190. struct btrfs_delayed_root *delayed_root;
  191. struct list_head *p;
  192. struct btrfs_delayed_node *next = NULL;
  193. delayed_root = node->root->fs_info->delayed_root;
  194. spin_lock(&delayed_root->lock);
  195. if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
  196. /* not in the list */
  197. if (list_empty(&delayed_root->node_list))
  198. goto out;
  199. p = delayed_root->node_list.next;
  200. } else if (list_is_last(&node->n_list, &delayed_root->node_list))
  201. goto out;
  202. else
  203. p = node->n_list.next;
  204. next = list_entry(p, struct btrfs_delayed_node, n_list);
  205. atomic_inc(&next->refs);
  206. out:
  207. spin_unlock(&delayed_root->lock);
  208. return next;
  209. }
  210. static void __btrfs_release_delayed_node(
  211. struct btrfs_delayed_node *delayed_node,
  212. int mod)
  213. {
  214. struct btrfs_delayed_root *delayed_root;
  215. if (!delayed_node)
  216. return;
  217. delayed_root = delayed_node->root->fs_info->delayed_root;
  218. mutex_lock(&delayed_node->mutex);
  219. if (delayed_node->count)
  220. btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
  221. else
  222. btrfs_dequeue_delayed_node(delayed_root, delayed_node);
  223. mutex_unlock(&delayed_node->mutex);
  224. if (atomic_dec_and_test(&delayed_node->refs)) {
  225. bool free = false;
  226. struct btrfs_root *root = delayed_node->root;
  227. spin_lock(&root->inode_lock);
  228. if (atomic_read(&delayed_node->refs) == 0) {
  229. radix_tree_delete(&root->delayed_nodes_tree,
  230. delayed_node->inode_id);
  231. free = true;
  232. }
  233. spin_unlock(&root->inode_lock);
  234. if (free)
  235. kmem_cache_free(delayed_node_cache, delayed_node);
  236. }
  237. }
  238. static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
  239. {
  240. __btrfs_release_delayed_node(node, 0);
  241. }
  242. static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
  243. struct btrfs_delayed_root *delayed_root)
  244. {
  245. struct list_head *p;
  246. struct btrfs_delayed_node *node = NULL;
  247. spin_lock(&delayed_root->lock);
  248. if (list_empty(&delayed_root->prepare_list))
  249. goto out;
  250. p = delayed_root->prepare_list.next;
  251. list_del_init(p);
  252. node = list_entry(p, struct btrfs_delayed_node, p_list);
  253. atomic_inc(&node->refs);
  254. out:
  255. spin_unlock(&delayed_root->lock);
  256. return node;
  257. }
  258. static inline void btrfs_release_prepared_delayed_node(
  259. struct btrfs_delayed_node *node)
  260. {
  261. __btrfs_release_delayed_node(node, 1);
  262. }
  263. static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
  264. {
  265. struct btrfs_delayed_item *item;
  266. item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
  267. if (item) {
  268. item->data_len = data_len;
  269. item->ins_or_del = 0;
  270. item->bytes_reserved = 0;
  271. item->delayed_node = NULL;
  272. atomic_set(&item->refs, 1);
  273. }
  274. return item;
  275. }
  276. /*
  277. * __btrfs_lookup_delayed_item - look up the delayed item by key
  278. * @delayed_node: pointer to the delayed node
  279. * @key: the key to look up
  280. * @prev: used to store the prev item if the right item isn't found
  281. * @next: used to store the next item if the right item isn't found
  282. *
  283. * Note: if we don't find the right item, we will return the prev item and
  284. * the next item.
  285. */
  286. static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
  287. struct rb_root *root,
  288. struct btrfs_key *key,
  289. struct btrfs_delayed_item **prev,
  290. struct btrfs_delayed_item **next)
  291. {
  292. struct rb_node *node, *prev_node = NULL;
  293. struct btrfs_delayed_item *delayed_item = NULL;
  294. int ret = 0;
  295. node = root->rb_node;
  296. while (node) {
  297. delayed_item = rb_entry(node, struct btrfs_delayed_item,
  298. rb_node);
  299. prev_node = node;
  300. ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
  301. if (ret < 0)
  302. node = node->rb_right;
  303. else if (ret > 0)
  304. node = node->rb_left;
  305. else
  306. return delayed_item;
  307. }
  308. if (prev) {
  309. if (!prev_node)
  310. *prev = NULL;
  311. else if (ret < 0)
  312. *prev = delayed_item;
  313. else if ((node = rb_prev(prev_node)) != NULL) {
  314. *prev = rb_entry(node, struct btrfs_delayed_item,
  315. rb_node);
  316. } else
  317. *prev = NULL;
  318. }
  319. if (next) {
  320. if (!prev_node)
  321. *next = NULL;
  322. else if (ret > 0)
  323. *next = delayed_item;
  324. else if ((node = rb_next(prev_node)) != NULL) {
  325. *next = rb_entry(node, struct btrfs_delayed_item,
  326. rb_node);
  327. } else
  328. *next = NULL;
  329. }
  330. return NULL;
  331. }
  332. static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
  333. struct btrfs_delayed_node *delayed_node,
  334. struct btrfs_key *key)
  335. {
  336. return __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
  337. NULL, NULL);
  338. }
  339. static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
  340. struct btrfs_delayed_item *ins,
  341. int action)
  342. {
  343. struct rb_node **p, *node;
  344. struct rb_node *parent_node = NULL;
  345. struct rb_root *root;
  346. struct btrfs_delayed_item *item;
  347. int cmp;
  348. if (action == BTRFS_DELAYED_INSERTION_ITEM)
  349. root = &delayed_node->ins_root;
  350. else if (action == BTRFS_DELAYED_DELETION_ITEM)
  351. root = &delayed_node->del_root;
  352. else
  353. BUG();
  354. p = &root->rb_node;
  355. node = &ins->rb_node;
  356. while (*p) {
  357. parent_node = *p;
  358. item = rb_entry(parent_node, struct btrfs_delayed_item,
  359. rb_node);
  360. cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
  361. if (cmp < 0)
  362. p = &(*p)->rb_right;
  363. else if (cmp > 0)
  364. p = &(*p)->rb_left;
  365. else
  366. return -EEXIST;
  367. }
  368. rb_link_node(node, parent_node, p);
  369. rb_insert_color(node, root);
  370. ins->delayed_node = delayed_node;
  371. ins->ins_or_del = action;
  372. if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
  373. action == BTRFS_DELAYED_INSERTION_ITEM &&
  374. ins->key.offset >= delayed_node->index_cnt)
  375. delayed_node->index_cnt = ins->key.offset + 1;
  376. delayed_node->count++;
  377. atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
  378. return 0;
  379. }
  380. static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
  381. struct btrfs_delayed_item *item)
  382. {
  383. return __btrfs_add_delayed_item(node, item,
  384. BTRFS_DELAYED_INSERTION_ITEM);
  385. }
  386. static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
  387. struct btrfs_delayed_item *item)
  388. {
  389. return __btrfs_add_delayed_item(node, item,
  390. BTRFS_DELAYED_DELETION_ITEM);
  391. }
  392. static void finish_one_item(struct btrfs_delayed_root *delayed_root)
  393. {
  394. int seq = atomic_inc_return(&delayed_root->items_seq);
  395. /*
  396. * atomic_dec_return implies a barrier for waitqueue_active
  397. */
  398. if ((atomic_dec_return(&delayed_root->items) <
  399. BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
  400. waitqueue_active(&delayed_root->wait))
  401. wake_up(&delayed_root->wait);
  402. }
  403. static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
  404. {
  405. struct rb_root *root;
  406. struct btrfs_delayed_root *delayed_root;
  407. delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
  408. BUG_ON(!delayed_root);
  409. BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
  410. delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
  411. if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
  412. root = &delayed_item->delayed_node->ins_root;
  413. else
  414. root = &delayed_item->delayed_node->del_root;
  415. rb_erase(&delayed_item->rb_node, root);
  416. delayed_item->delayed_node->count--;
  417. finish_one_item(delayed_root);
  418. }
  419. static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
  420. {
  421. if (item) {
  422. __btrfs_remove_delayed_item(item);
  423. if (atomic_dec_and_test(&item->refs))
  424. kfree(item);
  425. }
  426. }
  427. static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
  428. struct btrfs_delayed_node *delayed_node)
  429. {
  430. struct rb_node *p;
  431. struct btrfs_delayed_item *item = NULL;
  432. p = rb_first(&delayed_node->ins_root);
  433. if (p)
  434. item = rb_entry(p, struct btrfs_delayed_item, rb_node);
  435. return item;
  436. }
  437. static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
  438. struct btrfs_delayed_node *delayed_node)
  439. {
  440. struct rb_node *p;
  441. struct btrfs_delayed_item *item = NULL;
  442. p = rb_first(&delayed_node->del_root);
  443. if (p)
  444. item = rb_entry(p, struct btrfs_delayed_item, rb_node);
  445. return item;
  446. }
  447. static struct btrfs_delayed_item *__btrfs_next_delayed_item(
  448. struct btrfs_delayed_item *item)
  449. {
  450. struct rb_node *p;
  451. struct btrfs_delayed_item *next = NULL;
  452. p = rb_next(&item->rb_node);
  453. if (p)
  454. next = rb_entry(p, struct btrfs_delayed_item, rb_node);
  455. return next;
  456. }
  457. static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
  458. struct btrfs_root *root,
  459. struct btrfs_delayed_item *item)
  460. {
  461. struct btrfs_fs_info *fs_info = root->fs_info;
  462. struct btrfs_block_rsv *src_rsv;
  463. struct btrfs_block_rsv *dst_rsv;
  464. u64 num_bytes;
  465. int ret;
  466. if (!trans->bytes_reserved)
  467. return 0;
  468. src_rsv = trans->block_rsv;
  469. dst_rsv = &fs_info->delayed_block_rsv;
  470. num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
  471. ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
  472. if (!ret) {
  473. trace_btrfs_space_reservation(fs_info, "delayed_item",
  474. item->key.objectid,
  475. num_bytes, 1);
  476. item->bytes_reserved = num_bytes;
  477. }
  478. return ret;
  479. }
  480. static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
  481. struct btrfs_delayed_item *item)
  482. {
  483. struct btrfs_fs_info *fs_info = root->fs_info;
  484. struct btrfs_block_rsv *rsv;
  485. if (!item->bytes_reserved)
  486. return;
  487. rsv = &fs_info->delayed_block_rsv;
  488. trace_btrfs_space_reservation(fs_info, "delayed_item",
  489. item->key.objectid, item->bytes_reserved,
  490. 0);
  491. btrfs_block_rsv_release(root, rsv,
  492. item->bytes_reserved);
  493. }
  494. static int btrfs_delayed_inode_reserve_metadata(
  495. struct btrfs_trans_handle *trans,
  496. struct btrfs_root *root,
  497. struct inode *inode,
  498. struct btrfs_delayed_node *node)
  499. {
  500. struct btrfs_fs_info *fs_info = root->fs_info;
  501. struct btrfs_block_rsv *src_rsv;
  502. struct btrfs_block_rsv *dst_rsv;
  503. u64 num_bytes;
  504. int ret;
  505. bool release = false;
  506. src_rsv = trans->block_rsv;
  507. dst_rsv = &fs_info->delayed_block_rsv;
  508. num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
  509. /*
  510. * If our block_rsv is the delalloc block reserve then check and see if
  511. * we have our extra reservation for updating the inode. If not fall
  512. * through and try to reserve space quickly.
  513. *
  514. * We used to try and steal from the delalloc block rsv or the global
  515. * reserve, but we'd steal a full reservation, which isn't kind. We are
  516. * here through delalloc which means we've likely just cowed down close
  517. * to the leaf that contains the inode, so we would steal less just
  518. * doing the fallback inode update, so if we do end up having to steal
  519. * from the global block rsv we hopefully only steal one or two blocks
  520. * worth which is less likely to hurt us.
  521. */
  522. if (src_rsv && src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
  523. spin_lock(&BTRFS_I(inode)->lock);
  524. if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
  525. &BTRFS_I(inode)->runtime_flags))
  526. release = true;
  527. else
  528. src_rsv = NULL;
  529. spin_unlock(&BTRFS_I(inode)->lock);
  530. }
  531. /*
  532. * btrfs_dirty_inode will update the inode under btrfs_join_transaction
  533. * which doesn't reserve space for speed. This is a problem since we
  534. * still need to reserve space for this update, so try to reserve the
  535. * space.
  536. *
  537. * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
  538. * we're accounted for.
  539. */
  540. if (!src_rsv || (!trans->bytes_reserved &&
  541. src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
  542. ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
  543. BTRFS_RESERVE_NO_FLUSH);
  544. /*
  545. * Since we're under a transaction reserve_metadata_bytes could
  546. * try to commit the transaction which will make it return
  547. * EAGAIN to make us stop the transaction we have, so return
  548. * ENOSPC instead so that btrfs_dirty_inode knows what to do.
  549. */
  550. if (ret == -EAGAIN)
  551. ret = -ENOSPC;
  552. if (!ret) {
  553. node->bytes_reserved = num_bytes;
  554. trace_btrfs_space_reservation(fs_info,
  555. "delayed_inode",
  556. btrfs_ino(inode),
  557. num_bytes, 1);
  558. }
  559. return ret;
  560. }
  561. ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
  562. /*
  563. * Migrate only takes a reservation, it doesn't touch the size of the
  564. * block_rsv. This is to simplify people who don't normally have things
  565. * migrated from their block rsv. If they go to release their
  566. * reservation, that will decrease the size as well, so if migrate
  567. * reduced size we'd end up with a negative size. But for the
  568. * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
  569. * but we could in fact do this reserve/migrate dance several times
  570. * between the time we did the original reservation and we'd clean it
  571. * up. So to take care of this, release the space for the meta
  572. * reservation here. I think it may be time for a documentation page on
  573. * how block rsvs. work.
  574. */
  575. if (!ret) {
  576. trace_btrfs_space_reservation(fs_info, "delayed_inode",
  577. btrfs_ino(inode), num_bytes, 1);
  578. node->bytes_reserved = num_bytes;
  579. }
  580. if (release) {
  581. trace_btrfs_space_reservation(fs_info, "delalloc",
  582. btrfs_ino(inode), num_bytes, 0);
  583. btrfs_block_rsv_release(root, src_rsv, num_bytes);
  584. }
  585. return ret;
  586. }
  587. static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
  588. struct btrfs_delayed_node *node)
  589. {
  590. struct btrfs_fs_info *fs_info = root->fs_info;
  591. struct btrfs_block_rsv *rsv;
  592. if (!node->bytes_reserved)
  593. return;
  594. rsv = &fs_info->delayed_block_rsv;
  595. trace_btrfs_space_reservation(fs_info, "delayed_inode",
  596. node->inode_id, node->bytes_reserved, 0);
  597. btrfs_block_rsv_release(root, rsv,
  598. node->bytes_reserved);
  599. node->bytes_reserved = 0;
  600. }
  601. /*
  602. * This helper will insert some continuous items into the same leaf according
  603. * to the free space of the leaf.
  604. */
  605. static int btrfs_batch_insert_items(struct btrfs_root *root,
  606. struct btrfs_path *path,
  607. struct btrfs_delayed_item *item)
  608. {
  609. struct btrfs_delayed_item *curr, *next;
  610. int free_space;
  611. int total_data_size = 0, total_size = 0;
  612. struct extent_buffer *leaf;
  613. char *data_ptr;
  614. struct btrfs_key *keys;
  615. u32 *data_size;
  616. struct list_head head;
  617. int slot;
  618. int nitems;
  619. int i;
  620. int ret = 0;
  621. BUG_ON(!path->nodes[0]);
  622. leaf = path->nodes[0];
  623. free_space = btrfs_leaf_free_space(root, leaf);
  624. INIT_LIST_HEAD(&head);
  625. next = item;
  626. nitems = 0;
  627. /*
  628. * count the number of the continuous items that we can insert in batch
  629. */
  630. while (total_size + next->data_len + sizeof(struct btrfs_item) <=
  631. free_space) {
  632. total_data_size += next->data_len;
  633. total_size += next->data_len + sizeof(struct btrfs_item);
  634. list_add_tail(&next->tree_list, &head);
  635. nitems++;
  636. curr = next;
  637. next = __btrfs_next_delayed_item(curr);
  638. if (!next)
  639. break;
  640. if (!btrfs_is_continuous_delayed_item(curr, next))
  641. break;
  642. }
  643. if (!nitems) {
  644. ret = 0;
  645. goto out;
  646. }
  647. /*
  648. * we need allocate some memory space, but it might cause the task
  649. * to sleep, so we set all locked nodes in the path to blocking locks
  650. * first.
  651. */
  652. btrfs_set_path_blocking(path);
  653. keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
  654. if (!keys) {
  655. ret = -ENOMEM;
  656. goto out;
  657. }
  658. data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
  659. if (!data_size) {
  660. ret = -ENOMEM;
  661. goto error;
  662. }
  663. /* get keys of all the delayed items */
  664. i = 0;
  665. list_for_each_entry(next, &head, tree_list) {
  666. keys[i] = next->key;
  667. data_size[i] = next->data_len;
  668. i++;
  669. }
  670. /* reset all the locked nodes in the patch to spinning locks. */
  671. btrfs_clear_path_blocking(path, NULL, 0);
  672. /* insert the keys of the items */
  673. setup_items_for_insert(root, path, keys, data_size,
  674. total_data_size, total_size, nitems);
  675. /* insert the dir index items */
  676. slot = path->slots[0];
  677. list_for_each_entry_safe(curr, next, &head, tree_list) {
  678. data_ptr = btrfs_item_ptr(leaf, slot, char);
  679. write_extent_buffer(leaf, &curr->data,
  680. (unsigned long)data_ptr,
  681. curr->data_len);
  682. slot++;
  683. btrfs_delayed_item_release_metadata(root, curr);
  684. list_del(&curr->tree_list);
  685. btrfs_release_delayed_item(curr);
  686. }
  687. error:
  688. kfree(data_size);
  689. kfree(keys);
  690. out:
  691. return ret;
  692. }
  693. /*
  694. * This helper can just do simple insertion that needn't extend item for new
  695. * data, such as directory name index insertion, inode insertion.
  696. */
  697. static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
  698. struct btrfs_root *root,
  699. struct btrfs_path *path,
  700. struct btrfs_delayed_item *delayed_item)
  701. {
  702. struct extent_buffer *leaf;
  703. char *ptr;
  704. int ret;
  705. ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
  706. delayed_item->data_len);
  707. if (ret < 0 && ret != -EEXIST)
  708. return ret;
  709. leaf = path->nodes[0];
  710. ptr = btrfs_item_ptr(leaf, path->slots[0], char);
  711. write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
  712. delayed_item->data_len);
  713. btrfs_mark_buffer_dirty(leaf);
  714. btrfs_delayed_item_release_metadata(root, delayed_item);
  715. return 0;
  716. }
  717. /*
  718. * we insert an item first, then if there are some continuous items, we try
  719. * to insert those items into the same leaf.
  720. */
  721. static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
  722. struct btrfs_path *path,
  723. struct btrfs_root *root,
  724. struct btrfs_delayed_node *node)
  725. {
  726. struct btrfs_delayed_item *curr, *prev;
  727. int ret = 0;
  728. do_again:
  729. mutex_lock(&node->mutex);
  730. curr = __btrfs_first_delayed_insertion_item(node);
  731. if (!curr)
  732. goto insert_end;
  733. ret = btrfs_insert_delayed_item(trans, root, path, curr);
  734. if (ret < 0) {
  735. btrfs_release_path(path);
  736. goto insert_end;
  737. }
  738. prev = curr;
  739. curr = __btrfs_next_delayed_item(prev);
  740. if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
  741. /* insert the continuous items into the same leaf */
  742. path->slots[0]++;
  743. btrfs_batch_insert_items(root, path, curr);
  744. }
  745. btrfs_release_delayed_item(prev);
  746. btrfs_mark_buffer_dirty(path->nodes[0]);
  747. btrfs_release_path(path);
  748. mutex_unlock(&node->mutex);
  749. goto do_again;
  750. insert_end:
  751. mutex_unlock(&node->mutex);
  752. return ret;
  753. }
  754. static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
  755. struct btrfs_root *root,
  756. struct btrfs_path *path,
  757. struct btrfs_delayed_item *item)
  758. {
  759. struct btrfs_delayed_item *curr, *next;
  760. struct extent_buffer *leaf;
  761. struct btrfs_key key;
  762. struct list_head head;
  763. int nitems, i, last_item;
  764. int ret = 0;
  765. BUG_ON(!path->nodes[0]);
  766. leaf = path->nodes[0];
  767. i = path->slots[0];
  768. last_item = btrfs_header_nritems(leaf) - 1;
  769. if (i > last_item)
  770. return -ENOENT; /* FIXME: Is errno suitable? */
  771. next = item;
  772. INIT_LIST_HEAD(&head);
  773. btrfs_item_key_to_cpu(leaf, &key, i);
  774. nitems = 0;
  775. /*
  776. * count the number of the dir index items that we can delete in batch
  777. */
  778. while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
  779. list_add_tail(&next->tree_list, &head);
  780. nitems++;
  781. curr = next;
  782. next = __btrfs_next_delayed_item(curr);
  783. if (!next)
  784. break;
  785. if (!btrfs_is_continuous_delayed_item(curr, next))
  786. break;
  787. i++;
  788. if (i > last_item)
  789. break;
  790. btrfs_item_key_to_cpu(leaf, &key, i);
  791. }
  792. if (!nitems)
  793. return 0;
  794. ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
  795. if (ret)
  796. goto out;
  797. list_for_each_entry_safe(curr, next, &head, tree_list) {
  798. btrfs_delayed_item_release_metadata(root, curr);
  799. list_del(&curr->tree_list);
  800. btrfs_release_delayed_item(curr);
  801. }
  802. out:
  803. return ret;
  804. }
  805. static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
  806. struct btrfs_path *path,
  807. struct btrfs_root *root,
  808. struct btrfs_delayed_node *node)
  809. {
  810. struct btrfs_delayed_item *curr, *prev;
  811. int ret = 0;
  812. do_again:
  813. mutex_lock(&node->mutex);
  814. curr = __btrfs_first_delayed_deletion_item(node);
  815. if (!curr)
  816. goto delete_fail;
  817. ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
  818. if (ret < 0)
  819. goto delete_fail;
  820. else if (ret > 0) {
  821. /*
  822. * can't find the item which the node points to, so this node
  823. * is invalid, just drop it.
  824. */
  825. prev = curr;
  826. curr = __btrfs_next_delayed_item(prev);
  827. btrfs_release_delayed_item(prev);
  828. ret = 0;
  829. btrfs_release_path(path);
  830. if (curr) {
  831. mutex_unlock(&node->mutex);
  832. goto do_again;
  833. } else
  834. goto delete_fail;
  835. }
  836. btrfs_batch_delete_items(trans, root, path, curr);
  837. btrfs_release_path(path);
  838. mutex_unlock(&node->mutex);
  839. goto do_again;
  840. delete_fail:
  841. btrfs_release_path(path);
  842. mutex_unlock(&node->mutex);
  843. return ret;
  844. }
  845. static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
  846. {
  847. struct btrfs_delayed_root *delayed_root;
  848. if (delayed_node &&
  849. test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
  850. BUG_ON(!delayed_node->root);
  851. clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
  852. delayed_node->count--;
  853. delayed_root = delayed_node->root->fs_info->delayed_root;
  854. finish_one_item(delayed_root);
  855. }
  856. }
  857. static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
  858. {
  859. struct btrfs_delayed_root *delayed_root;
  860. ASSERT(delayed_node->root);
  861. clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
  862. delayed_node->count--;
  863. delayed_root = delayed_node->root->fs_info->delayed_root;
  864. finish_one_item(delayed_root);
  865. }
  866. static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
  867. struct btrfs_root *root,
  868. struct btrfs_path *path,
  869. struct btrfs_delayed_node *node)
  870. {
  871. struct btrfs_key key;
  872. struct btrfs_inode_item *inode_item;
  873. struct extent_buffer *leaf;
  874. int mod;
  875. int ret;
  876. key.objectid = node->inode_id;
  877. key.type = BTRFS_INODE_ITEM_KEY;
  878. key.offset = 0;
  879. if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
  880. mod = -1;
  881. else
  882. mod = 1;
  883. ret = btrfs_lookup_inode(trans, root, path, &key, mod);
  884. if (ret > 0) {
  885. btrfs_release_path(path);
  886. return -ENOENT;
  887. } else if (ret < 0) {
  888. return ret;
  889. }
  890. leaf = path->nodes[0];
  891. inode_item = btrfs_item_ptr(leaf, path->slots[0],
  892. struct btrfs_inode_item);
  893. write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
  894. sizeof(struct btrfs_inode_item));
  895. btrfs_mark_buffer_dirty(leaf);
  896. if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
  897. goto no_iref;
  898. path->slots[0]++;
  899. if (path->slots[0] >= btrfs_header_nritems(leaf))
  900. goto search;
  901. again:
  902. btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
  903. if (key.objectid != node->inode_id)
  904. goto out;
  905. if (key.type != BTRFS_INODE_REF_KEY &&
  906. key.type != BTRFS_INODE_EXTREF_KEY)
  907. goto out;
  908. /*
  909. * Delayed iref deletion is for the inode who has only one link,
  910. * so there is only one iref. The case that several irefs are
  911. * in the same item doesn't exist.
  912. */
  913. btrfs_del_item(trans, root, path);
  914. out:
  915. btrfs_release_delayed_iref(node);
  916. no_iref:
  917. btrfs_release_path(path);
  918. err_out:
  919. btrfs_delayed_inode_release_metadata(root, node);
  920. btrfs_release_delayed_inode(node);
  921. return ret;
  922. search:
  923. btrfs_release_path(path);
  924. key.type = BTRFS_INODE_EXTREF_KEY;
  925. key.offset = -1;
  926. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  927. if (ret < 0)
  928. goto err_out;
  929. ASSERT(ret);
  930. ret = 0;
  931. leaf = path->nodes[0];
  932. path->slots[0]--;
  933. goto again;
  934. }
  935. static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
  936. struct btrfs_root *root,
  937. struct btrfs_path *path,
  938. struct btrfs_delayed_node *node)
  939. {
  940. int ret;
  941. mutex_lock(&node->mutex);
  942. if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
  943. mutex_unlock(&node->mutex);
  944. return 0;
  945. }
  946. ret = __btrfs_update_delayed_inode(trans, root, path, node);
  947. mutex_unlock(&node->mutex);
  948. return ret;
  949. }
  950. static inline int
  951. __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
  952. struct btrfs_path *path,
  953. struct btrfs_delayed_node *node)
  954. {
  955. int ret;
  956. ret = btrfs_insert_delayed_items(trans, path, node->root, node);
  957. if (ret)
  958. return ret;
  959. ret = btrfs_delete_delayed_items(trans, path, node->root, node);
  960. if (ret)
  961. return ret;
  962. ret = btrfs_update_delayed_inode(trans, node->root, path, node);
  963. return ret;
  964. }
  965. /*
  966. * Called when committing the transaction.
  967. * Returns 0 on success.
  968. * Returns < 0 on error and returns with an aborted transaction with any
  969. * outstanding delayed items cleaned up.
  970. */
  971. static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
  972. struct btrfs_root *root, int nr)
  973. {
  974. struct btrfs_fs_info *fs_info = root->fs_info;
  975. struct btrfs_delayed_root *delayed_root;
  976. struct btrfs_delayed_node *curr_node, *prev_node;
  977. struct btrfs_path *path;
  978. struct btrfs_block_rsv *block_rsv;
  979. int ret = 0;
  980. bool count = (nr > 0);
  981. if (trans->aborted)
  982. return -EIO;
  983. path = btrfs_alloc_path();
  984. if (!path)
  985. return -ENOMEM;
  986. path->leave_spinning = 1;
  987. block_rsv = trans->block_rsv;
  988. trans->block_rsv = &fs_info->delayed_block_rsv;
  989. delayed_root = fs_info->delayed_root;
  990. curr_node = btrfs_first_delayed_node(delayed_root);
  991. while (curr_node && (!count || (count && nr--))) {
  992. ret = __btrfs_commit_inode_delayed_items(trans, path,
  993. curr_node);
  994. if (ret) {
  995. btrfs_release_delayed_node(curr_node);
  996. curr_node = NULL;
  997. btrfs_abort_transaction(trans, ret);
  998. break;
  999. }
  1000. prev_node = curr_node;
  1001. curr_node = btrfs_next_delayed_node(curr_node);
  1002. btrfs_release_delayed_node(prev_node);
  1003. }
  1004. if (curr_node)
  1005. btrfs_release_delayed_node(curr_node);
  1006. btrfs_free_path(path);
  1007. trans->block_rsv = block_rsv;
  1008. return ret;
  1009. }
  1010. int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
  1011. struct btrfs_root *root)
  1012. {
  1013. return __btrfs_run_delayed_items(trans, root, -1);
  1014. }
  1015. int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
  1016. struct btrfs_root *root, int nr)
  1017. {
  1018. return __btrfs_run_delayed_items(trans, root, nr);
  1019. }
  1020. int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
  1021. struct inode *inode)
  1022. {
  1023. struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
  1024. struct btrfs_path *path;
  1025. struct btrfs_block_rsv *block_rsv;
  1026. int ret;
  1027. if (!delayed_node)
  1028. return 0;
  1029. mutex_lock(&delayed_node->mutex);
  1030. if (!delayed_node->count) {
  1031. mutex_unlock(&delayed_node->mutex);
  1032. btrfs_release_delayed_node(delayed_node);
  1033. return 0;
  1034. }
  1035. mutex_unlock(&delayed_node->mutex);
  1036. path = btrfs_alloc_path();
  1037. if (!path) {
  1038. btrfs_release_delayed_node(delayed_node);
  1039. return -ENOMEM;
  1040. }
  1041. path->leave_spinning = 1;
  1042. block_rsv = trans->block_rsv;
  1043. trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
  1044. ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
  1045. btrfs_release_delayed_node(delayed_node);
  1046. btrfs_free_path(path);
  1047. trans->block_rsv = block_rsv;
  1048. return ret;
  1049. }
  1050. int btrfs_commit_inode_delayed_inode(struct inode *inode)
  1051. {
  1052. struct btrfs_trans_handle *trans;
  1053. struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
  1054. struct btrfs_path *path;
  1055. struct btrfs_block_rsv *block_rsv;
  1056. int ret;
  1057. if (!delayed_node)
  1058. return 0;
  1059. mutex_lock(&delayed_node->mutex);
  1060. if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
  1061. mutex_unlock(&delayed_node->mutex);
  1062. btrfs_release_delayed_node(delayed_node);
  1063. return 0;
  1064. }
  1065. mutex_unlock(&delayed_node->mutex);
  1066. trans = btrfs_join_transaction(delayed_node->root);
  1067. if (IS_ERR(trans)) {
  1068. ret = PTR_ERR(trans);
  1069. goto out;
  1070. }
  1071. path = btrfs_alloc_path();
  1072. if (!path) {
  1073. ret = -ENOMEM;
  1074. goto trans_out;
  1075. }
  1076. path->leave_spinning = 1;
  1077. block_rsv = trans->block_rsv;
  1078. trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
  1079. mutex_lock(&delayed_node->mutex);
  1080. if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
  1081. ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
  1082. path, delayed_node);
  1083. else
  1084. ret = 0;
  1085. mutex_unlock(&delayed_node->mutex);
  1086. btrfs_free_path(path);
  1087. trans->block_rsv = block_rsv;
  1088. trans_out:
  1089. btrfs_end_transaction(trans, delayed_node->root);
  1090. btrfs_btree_balance_dirty(delayed_node->root);
  1091. out:
  1092. btrfs_release_delayed_node(delayed_node);
  1093. return ret;
  1094. }
  1095. void btrfs_remove_delayed_node(struct inode *inode)
  1096. {
  1097. struct btrfs_delayed_node *delayed_node;
  1098. delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
  1099. if (!delayed_node)
  1100. return;
  1101. BTRFS_I(inode)->delayed_node = NULL;
  1102. btrfs_release_delayed_node(delayed_node);
  1103. }
  1104. struct btrfs_async_delayed_work {
  1105. struct btrfs_delayed_root *delayed_root;
  1106. int nr;
  1107. struct btrfs_work work;
  1108. };
  1109. static void btrfs_async_run_delayed_root(struct btrfs_work *work)
  1110. {
  1111. struct btrfs_async_delayed_work *async_work;
  1112. struct btrfs_delayed_root *delayed_root;
  1113. struct btrfs_trans_handle *trans;
  1114. struct btrfs_path *path;
  1115. struct btrfs_delayed_node *delayed_node = NULL;
  1116. struct btrfs_root *root;
  1117. struct btrfs_block_rsv *block_rsv;
  1118. int total_done = 0;
  1119. async_work = container_of(work, struct btrfs_async_delayed_work, work);
  1120. delayed_root = async_work->delayed_root;
  1121. path = btrfs_alloc_path();
  1122. if (!path)
  1123. goto out;
  1124. again:
  1125. if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND / 2)
  1126. goto free_path;
  1127. delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
  1128. if (!delayed_node)
  1129. goto free_path;
  1130. path->leave_spinning = 1;
  1131. root = delayed_node->root;
  1132. trans = btrfs_join_transaction(root);
  1133. if (IS_ERR(trans))
  1134. goto release_path;
  1135. block_rsv = trans->block_rsv;
  1136. trans->block_rsv = &root->fs_info->delayed_block_rsv;
  1137. __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
  1138. trans->block_rsv = block_rsv;
  1139. btrfs_end_transaction(trans, root);
  1140. btrfs_btree_balance_dirty_nodelay(root);
  1141. release_path:
  1142. btrfs_release_path(path);
  1143. total_done++;
  1144. btrfs_release_prepared_delayed_node(delayed_node);
  1145. if (async_work->nr == 0 || total_done < async_work->nr)
  1146. goto again;
  1147. free_path:
  1148. btrfs_free_path(path);
  1149. out:
  1150. wake_up(&delayed_root->wait);
  1151. kfree(async_work);
  1152. }
  1153. static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
  1154. struct btrfs_fs_info *fs_info, int nr)
  1155. {
  1156. struct btrfs_async_delayed_work *async_work;
  1157. if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
  1158. return 0;
  1159. async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
  1160. if (!async_work)
  1161. return -ENOMEM;
  1162. async_work->delayed_root = delayed_root;
  1163. btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
  1164. btrfs_async_run_delayed_root, NULL, NULL);
  1165. async_work->nr = nr;
  1166. btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
  1167. return 0;
  1168. }
  1169. void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
  1170. {
  1171. WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
  1172. }
  1173. static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
  1174. {
  1175. int val = atomic_read(&delayed_root->items_seq);
  1176. if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
  1177. return 1;
  1178. if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
  1179. return 1;
  1180. return 0;
  1181. }
  1182. void btrfs_balance_delayed_items(struct btrfs_root *root)
  1183. {
  1184. struct btrfs_delayed_root *delayed_root;
  1185. struct btrfs_fs_info *fs_info = root->fs_info;
  1186. delayed_root = fs_info->delayed_root;
  1187. if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
  1188. return;
  1189. if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
  1190. int seq;
  1191. int ret;
  1192. seq = atomic_read(&delayed_root->items_seq);
  1193. ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
  1194. if (ret)
  1195. return;
  1196. wait_event_interruptible(delayed_root->wait,
  1197. could_end_wait(delayed_root, seq));
  1198. return;
  1199. }
  1200. btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
  1201. }
  1202. /* Will return 0 or -ENOMEM */
  1203. int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
  1204. struct btrfs_root *root, const char *name,
  1205. int name_len, struct inode *dir,
  1206. struct btrfs_disk_key *disk_key, u8 type,
  1207. u64 index)
  1208. {
  1209. struct btrfs_delayed_node *delayed_node;
  1210. struct btrfs_delayed_item *delayed_item;
  1211. struct btrfs_dir_item *dir_item;
  1212. int ret;
  1213. delayed_node = btrfs_get_or_create_delayed_node(dir);
  1214. if (IS_ERR(delayed_node))
  1215. return PTR_ERR(delayed_node);
  1216. delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
  1217. if (!delayed_item) {
  1218. ret = -ENOMEM;
  1219. goto release_node;
  1220. }
  1221. delayed_item->key.objectid = btrfs_ino(dir);
  1222. delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
  1223. delayed_item->key.offset = index;
  1224. dir_item = (struct btrfs_dir_item *)delayed_item->data;
  1225. dir_item->location = *disk_key;
  1226. btrfs_set_stack_dir_transid(dir_item, trans->transid);
  1227. btrfs_set_stack_dir_data_len(dir_item, 0);
  1228. btrfs_set_stack_dir_name_len(dir_item, name_len);
  1229. btrfs_set_stack_dir_type(dir_item, type);
  1230. memcpy((char *)(dir_item + 1), name, name_len);
  1231. ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
  1232. /*
  1233. * we have reserved enough space when we start a new transaction,
  1234. * so reserving metadata failure is impossible
  1235. */
  1236. BUG_ON(ret);
  1237. mutex_lock(&delayed_node->mutex);
  1238. ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
  1239. if (unlikely(ret)) {
  1240. btrfs_err(root->fs_info,
  1241. "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
  1242. name_len, name, delayed_node->root->objectid,
  1243. delayed_node->inode_id, ret);
  1244. BUG();
  1245. }
  1246. mutex_unlock(&delayed_node->mutex);
  1247. release_node:
  1248. btrfs_release_delayed_node(delayed_node);
  1249. return ret;
  1250. }
  1251. static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
  1252. struct btrfs_delayed_node *node,
  1253. struct btrfs_key *key)
  1254. {
  1255. struct btrfs_delayed_item *item;
  1256. mutex_lock(&node->mutex);
  1257. item = __btrfs_lookup_delayed_insertion_item(node, key);
  1258. if (!item) {
  1259. mutex_unlock(&node->mutex);
  1260. return 1;
  1261. }
  1262. btrfs_delayed_item_release_metadata(root, item);
  1263. btrfs_release_delayed_item(item);
  1264. mutex_unlock(&node->mutex);
  1265. return 0;
  1266. }
  1267. int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
  1268. struct btrfs_root *root, struct inode *dir,
  1269. u64 index)
  1270. {
  1271. struct btrfs_delayed_node *node;
  1272. struct btrfs_delayed_item *item;
  1273. struct btrfs_key item_key;
  1274. int ret;
  1275. node = btrfs_get_or_create_delayed_node(dir);
  1276. if (IS_ERR(node))
  1277. return PTR_ERR(node);
  1278. item_key.objectid = btrfs_ino(dir);
  1279. item_key.type = BTRFS_DIR_INDEX_KEY;
  1280. item_key.offset = index;
  1281. ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
  1282. if (!ret)
  1283. goto end;
  1284. item = btrfs_alloc_delayed_item(0);
  1285. if (!item) {
  1286. ret = -ENOMEM;
  1287. goto end;
  1288. }
  1289. item->key = item_key;
  1290. ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
  1291. /*
  1292. * we have reserved enough space when we start a new transaction,
  1293. * so reserving metadata failure is impossible.
  1294. */
  1295. BUG_ON(ret);
  1296. mutex_lock(&node->mutex);
  1297. ret = __btrfs_add_delayed_deletion_item(node, item);
  1298. if (unlikely(ret)) {
  1299. btrfs_err(root->fs_info,
  1300. "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
  1301. index, node->root->objectid, node->inode_id, ret);
  1302. BUG();
  1303. }
  1304. mutex_unlock(&node->mutex);
  1305. end:
  1306. btrfs_release_delayed_node(node);
  1307. return ret;
  1308. }
  1309. int btrfs_inode_delayed_dir_index_count(struct inode *inode)
  1310. {
  1311. struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
  1312. if (!delayed_node)
  1313. return -ENOENT;
  1314. /*
  1315. * Since we have held i_mutex of this directory, it is impossible that
  1316. * a new directory index is added into the delayed node and index_cnt
  1317. * is updated now. So we needn't lock the delayed node.
  1318. */
  1319. if (!delayed_node->index_cnt) {
  1320. btrfs_release_delayed_node(delayed_node);
  1321. return -EINVAL;
  1322. }
  1323. BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
  1324. btrfs_release_delayed_node(delayed_node);
  1325. return 0;
  1326. }
  1327. bool btrfs_readdir_get_delayed_items(struct inode *inode,
  1328. struct list_head *ins_list,
  1329. struct list_head *del_list)
  1330. {
  1331. struct btrfs_delayed_node *delayed_node;
  1332. struct btrfs_delayed_item *item;
  1333. delayed_node = btrfs_get_delayed_node(inode);
  1334. if (!delayed_node)
  1335. return false;
  1336. /*
  1337. * We can only do one readdir with delayed items at a time because of
  1338. * item->readdir_list.
  1339. */
  1340. inode_unlock_shared(inode);
  1341. inode_lock(inode);
  1342. mutex_lock(&delayed_node->mutex);
  1343. item = __btrfs_first_delayed_insertion_item(delayed_node);
  1344. while (item) {
  1345. atomic_inc(&item->refs);
  1346. list_add_tail(&item->readdir_list, ins_list);
  1347. item = __btrfs_next_delayed_item(item);
  1348. }
  1349. item = __btrfs_first_delayed_deletion_item(delayed_node);
  1350. while (item) {
  1351. atomic_inc(&item->refs);
  1352. list_add_tail(&item->readdir_list, del_list);
  1353. item = __btrfs_next_delayed_item(item);
  1354. }
  1355. mutex_unlock(&delayed_node->mutex);
  1356. /*
  1357. * This delayed node is still cached in the btrfs inode, so refs
  1358. * must be > 1 now, and we needn't check it is going to be freed
  1359. * or not.
  1360. *
  1361. * Besides that, this function is used to read dir, we do not
  1362. * insert/delete delayed items in this period. So we also needn't
  1363. * requeue or dequeue this delayed node.
  1364. */
  1365. atomic_dec(&delayed_node->refs);
  1366. return true;
  1367. }
  1368. void btrfs_readdir_put_delayed_items(struct inode *inode,
  1369. struct list_head *ins_list,
  1370. struct list_head *del_list)
  1371. {
  1372. struct btrfs_delayed_item *curr, *next;
  1373. list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
  1374. list_del(&curr->readdir_list);
  1375. if (atomic_dec_and_test(&curr->refs))
  1376. kfree(curr);
  1377. }
  1378. list_for_each_entry_safe(curr, next, del_list, readdir_list) {
  1379. list_del(&curr->readdir_list);
  1380. if (atomic_dec_and_test(&curr->refs))
  1381. kfree(curr);
  1382. }
  1383. /*
  1384. * The VFS is going to do up_read(), so we need to downgrade back to a
  1385. * read lock.
  1386. */
  1387. downgrade_write(&inode->i_rwsem);
  1388. }
  1389. int btrfs_should_delete_dir_index(struct list_head *del_list,
  1390. u64 index)
  1391. {
  1392. struct btrfs_delayed_item *curr, *next;
  1393. int ret;
  1394. if (list_empty(del_list))
  1395. return 0;
  1396. list_for_each_entry_safe(curr, next, del_list, readdir_list) {
  1397. if (curr->key.offset > index)
  1398. break;
  1399. list_del(&curr->readdir_list);
  1400. ret = (curr->key.offset == index);
  1401. if (atomic_dec_and_test(&curr->refs))
  1402. kfree(curr);
  1403. if (ret)
  1404. return 1;
  1405. else
  1406. continue;
  1407. }
  1408. return 0;
  1409. }
  1410. /*
  1411. * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
  1412. *
  1413. */
  1414. int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
  1415. struct list_head *ins_list)
  1416. {
  1417. struct btrfs_dir_item *di;
  1418. struct btrfs_delayed_item *curr, *next;
  1419. struct btrfs_key location;
  1420. char *name;
  1421. int name_len;
  1422. int over = 0;
  1423. unsigned char d_type;
  1424. if (list_empty(ins_list))
  1425. return 0;
  1426. /*
  1427. * Changing the data of the delayed item is impossible. So
  1428. * we needn't lock them. And we have held i_mutex of the
  1429. * directory, nobody can delete any directory indexes now.
  1430. */
  1431. list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
  1432. list_del(&curr->readdir_list);
  1433. if (curr->key.offset < ctx->pos) {
  1434. if (atomic_dec_and_test(&curr->refs))
  1435. kfree(curr);
  1436. continue;
  1437. }
  1438. ctx->pos = curr->key.offset;
  1439. di = (struct btrfs_dir_item *)curr->data;
  1440. name = (char *)(di + 1);
  1441. name_len = btrfs_stack_dir_name_len(di);
  1442. d_type = btrfs_filetype_table[di->type];
  1443. btrfs_disk_key_to_cpu(&location, &di->location);
  1444. over = !dir_emit(ctx, name, name_len,
  1445. location.objectid, d_type);
  1446. if (atomic_dec_and_test(&curr->refs))
  1447. kfree(curr);
  1448. if (over)
  1449. return 1;
  1450. }
  1451. return 0;
  1452. }
  1453. static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
  1454. struct btrfs_inode_item *inode_item,
  1455. struct inode *inode)
  1456. {
  1457. btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
  1458. btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
  1459. btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
  1460. btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
  1461. btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
  1462. btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
  1463. btrfs_set_stack_inode_generation(inode_item,
  1464. BTRFS_I(inode)->generation);
  1465. btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
  1466. btrfs_set_stack_inode_transid(inode_item, trans->transid);
  1467. btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
  1468. btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
  1469. btrfs_set_stack_inode_block_group(inode_item, 0);
  1470. btrfs_set_stack_timespec_sec(&inode_item->atime,
  1471. inode->i_atime.tv_sec);
  1472. btrfs_set_stack_timespec_nsec(&inode_item->atime,
  1473. inode->i_atime.tv_nsec);
  1474. btrfs_set_stack_timespec_sec(&inode_item->mtime,
  1475. inode->i_mtime.tv_sec);
  1476. btrfs_set_stack_timespec_nsec(&inode_item->mtime,
  1477. inode->i_mtime.tv_nsec);
  1478. btrfs_set_stack_timespec_sec(&inode_item->ctime,
  1479. inode->i_ctime.tv_sec);
  1480. btrfs_set_stack_timespec_nsec(&inode_item->ctime,
  1481. inode->i_ctime.tv_nsec);
  1482. btrfs_set_stack_timespec_sec(&inode_item->otime,
  1483. BTRFS_I(inode)->i_otime.tv_sec);
  1484. btrfs_set_stack_timespec_nsec(&inode_item->otime,
  1485. BTRFS_I(inode)->i_otime.tv_nsec);
  1486. }
  1487. int btrfs_fill_inode(struct inode *inode, u32 *rdev)
  1488. {
  1489. struct btrfs_delayed_node *delayed_node;
  1490. struct btrfs_inode_item *inode_item;
  1491. delayed_node = btrfs_get_delayed_node(inode);
  1492. if (!delayed_node)
  1493. return -ENOENT;
  1494. mutex_lock(&delayed_node->mutex);
  1495. if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
  1496. mutex_unlock(&delayed_node->mutex);
  1497. btrfs_release_delayed_node(delayed_node);
  1498. return -ENOENT;
  1499. }
  1500. inode_item = &delayed_node->inode_item;
  1501. i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
  1502. i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
  1503. btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
  1504. inode->i_mode = btrfs_stack_inode_mode(inode_item);
  1505. set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
  1506. inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
  1507. BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
  1508. BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
  1509. inode->i_version = btrfs_stack_inode_sequence(inode_item);
  1510. inode->i_rdev = 0;
  1511. *rdev = btrfs_stack_inode_rdev(inode_item);
  1512. BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
  1513. inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
  1514. inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
  1515. inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
  1516. inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
  1517. inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
  1518. inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
  1519. BTRFS_I(inode)->i_otime.tv_sec =
  1520. btrfs_stack_timespec_sec(&inode_item->otime);
  1521. BTRFS_I(inode)->i_otime.tv_nsec =
  1522. btrfs_stack_timespec_nsec(&inode_item->otime);
  1523. inode->i_generation = BTRFS_I(inode)->generation;
  1524. BTRFS_I(inode)->index_cnt = (u64)-1;
  1525. mutex_unlock(&delayed_node->mutex);
  1526. btrfs_release_delayed_node(delayed_node);
  1527. return 0;
  1528. }
  1529. int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
  1530. struct btrfs_root *root, struct inode *inode)
  1531. {
  1532. struct btrfs_delayed_node *delayed_node;
  1533. int ret = 0;
  1534. delayed_node = btrfs_get_or_create_delayed_node(inode);
  1535. if (IS_ERR(delayed_node))
  1536. return PTR_ERR(delayed_node);
  1537. mutex_lock(&delayed_node->mutex);
  1538. if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
  1539. fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
  1540. goto release_node;
  1541. }
  1542. ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
  1543. delayed_node);
  1544. if (ret)
  1545. goto release_node;
  1546. fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
  1547. set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
  1548. delayed_node->count++;
  1549. atomic_inc(&root->fs_info->delayed_root->items);
  1550. release_node:
  1551. mutex_unlock(&delayed_node->mutex);
  1552. btrfs_release_delayed_node(delayed_node);
  1553. return ret;
  1554. }
  1555. int btrfs_delayed_delete_inode_ref(struct inode *inode)
  1556. {
  1557. struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
  1558. struct btrfs_delayed_node *delayed_node;
  1559. /*
  1560. * we don't do delayed inode updates during log recovery because it
  1561. * leads to enospc problems. This means we also can't do
  1562. * delayed inode refs
  1563. */
  1564. if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
  1565. return -EAGAIN;
  1566. delayed_node = btrfs_get_or_create_delayed_node(inode);
  1567. if (IS_ERR(delayed_node))
  1568. return PTR_ERR(delayed_node);
  1569. /*
  1570. * We don't reserve space for inode ref deletion is because:
  1571. * - We ONLY do async inode ref deletion for the inode who has only
  1572. * one link(i_nlink == 1), it means there is only one inode ref.
  1573. * And in most case, the inode ref and the inode item are in the
  1574. * same leaf, and we will deal with them at the same time.
  1575. * Since we are sure we will reserve the space for the inode item,
  1576. * it is unnecessary to reserve space for inode ref deletion.
  1577. * - If the inode ref and the inode item are not in the same leaf,
  1578. * We also needn't worry about enospc problem, because we reserve
  1579. * much more space for the inode update than it needs.
  1580. * - At the worst, we can steal some space from the global reservation.
  1581. * It is very rare.
  1582. */
  1583. mutex_lock(&delayed_node->mutex);
  1584. if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
  1585. goto release_node;
  1586. set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
  1587. delayed_node->count++;
  1588. atomic_inc(&fs_info->delayed_root->items);
  1589. release_node:
  1590. mutex_unlock(&delayed_node->mutex);
  1591. btrfs_release_delayed_node(delayed_node);
  1592. return 0;
  1593. }
  1594. static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
  1595. {
  1596. struct btrfs_root *root = delayed_node->root;
  1597. struct btrfs_delayed_item *curr_item, *prev_item;
  1598. mutex_lock(&delayed_node->mutex);
  1599. curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
  1600. while (curr_item) {
  1601. btrfs_delayed_item_release_metadata(root, curr_item);
  1602. prev_item = curr_item;
  1603. curr_item = __btrfs_next_delayed_item(prev_item);
  1604. btrfs_release_delayed_item(prev_item);
  1605. }
  1606. curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
  1607. while (curr_item) {
  1608. btrfs_delayed_item_release_metadata(root, curr_item);
  1609. prev_item = curr_item;
  1610. curr_item = __btrfs_next_delayed_item(prev_item);
  1611. btrfs_release_delayed_item(prev_item);
  1612. }
  1613. if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
  1614. btrfs_release_delayed_iref(delayed_node);
  1615. if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
  1616. btrfs_delayed_inode_release_metadata(root, delayed_node);
  1617. btrfs_release_delayed_inode(delayed_node);
  1618. }
  1619. mutex_unlock(&delayed_node->mutex);
  1620. }
  1621. void btrfs_kill_delayed_inode_items(struct inode *inode)
  1622. {
  1623. struct btrfs_delayed_node *delayed_node;
  1624. delayed_node = btrfs_get_delayed_node(inode);
  1625. if (!delayed_node)
  1626. return;
  1627. __btrfs_kill_delayed_node(delayed_node);
  1628. btrfs_release_delayed_node(delayed_node);
  1629. }
  1630. void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
  1631. {
  1632. u64 inode_id = 0;
  1633. struct btrfs_delayed_node *delayed_nodes[8];
  1634. int i, n;
  1635. while (1) {
  1636. spin_lock(&root->inode_lock);
  1637. n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
  1638. (void **)delayed_nodes, inode_id,
  1639. ARRAY_SIZE(delayed_nodes));
  1640. if (!n) {
  1641. spin_unlock(&root->inode_lock);
  1642. break;
  1643. }
  1644. inode_id = delayed_nodes[n - 1]->inode_id + 1;
  1645. for (i = 0; i < n; i++)
  1646. atomic_inc(&delayed_nodes[i]->refs);
  1647. spin_unlock(&root->inode_lock);
  1648. for (i = 0; i < n; i++) {
  1649. __btrfs_kill_delayed_node(delayed_nodes[i]);
  1650. btrfs_release_delayed_node(delayed_nodes[i]);
  1651. }
  1652. }
  1653. }
  1654. void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
  1655. {
  1656. struct btrfs_delayed_node *curr_node, *prev_node;
  1657. curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
  1658. while (curr_node) {
  1659. __btrfs_kill_delayed_node(curr_node);
  1660. prev_node = curr_node;
  1661. curr_node = btrfs_next_delayed_node(curr_node);
  1662. btrfs_release_delayed_node(prev_node);
  1663. }
  1664. }