qgroup.c 66 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702
  1. /*
  2. * Copyright (C) 2011 STRATO. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/sched.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/writeback.h>
  21. #include <linux/blkdev.h>
  22. #include <linux/rbtree.h>
  23. #include <linux/slab.h>
  24. #include <linux/workqueue.h>
  25. #include <linux/btrfs.h>
  26. #include "ctree.h"
  27. #include "transaction.h"
  28. #include "disk-io.h"
  29. #include "locking.h"
  30. #include "ulist.h"
  31. #include "backref.h"
  32. #include "extent_io.h"
  33. #include "qgroup.h"
  34. /* TODO XXX FIXME
  35. * - subvol delete -> delete when ref goes to 0? delete limits also?
  36. * - reorganize keys
  37. * - compressed
  38. * - sync
  39. * - copy also limits on subvol creation
  40. * - limit
  41. * - caches fuer ulists
  42. * - performance benchmarks
  43. * - check all ioctl parameters
  44. */
  45. /*
  46. * one struct for each qgroup, organized in fs_info->qgroup_tree.
  47. */
  48. struct btrfs_qgroup {
  49. u64 qgroupid;
  50. /*
  51. * state
  52. */
  53. u64 rfer; /* referenced */
  54. u64 rfer_cmpr; /* referenced compressed */
  55. u64 excl; /* exclusive */
  56. u64 excl_cmpr; /* exclusive compressed */
  57. /*
  58. * limits
  59. */
  60. u64 lim_flags; /* which limits are set */
  61. u64 max_rfer;
  62. u64 max_excl;
  63. u64 rsv_rfer;
  64. u64 rsv_excl;
  65. /*
  66. * reservation tracking
  67. */
  68. u64 reserved;
  69. /*
  70. * lists
  71. */
  72. struct list_head groups; /* groups this group is member of */
  73. struct list_head members; /* groups that are members of this group */
  74. struct list_head dirty; /* dirty groups */
  75. struct rb_node node; /* tree of qgroups */
  76. /*
  77. * temp variables for accounting operations
  78. * Refer to qgroup_shared_accounting() for details.
  79. */
  80. u64 old_refcnt;
  81. u64 new_refcnt;
  82. };
  83. static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
  84. int mod)
  85. {
  86. if (qg->old_refcnt < seq)
  87. qg->old_refcnt = seq;
  88. qg->old_refcnt += mod;
  89. }
  90. static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
  91. int mod)
  92. {
  93. if (qg->new_refcnt < seq)
  94. qg->new_refcnt = seq;
  95. qg->new_refcnt += mod;
  96. }
  97. static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq)
  98. {
  99. if (qg->old_refcnt < seq)
  100. return 0;
  101. return qg->old_refcnt - seq;
  102. }
  103. static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq)
  104. {
  105. if (qg->new_refcnt < seq)
  106. return 0;
  107. return qg->new_refcnt - seq;
  108. }
  109. /*
  110. * glue structure to represent the relations between qgroups.
  111. */
  112. struct btrfs_qgroup_list {
  113. struct list_head next_group;
  114. struct list_head next_member;
  115. struct btrfs_qgroup *group;
  116. struct btrfs_qgroup *member;
  117. };
  118. #define ptr_to_u64(x) ((u64)(uintptr_t)x)
  119. #define u64_to_ptr(x) ((struct btrfs_qgroup *)(uintptr_t)x)
  120. static int
  121. qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
  122. int init_flags);
  123. static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
  124. /* must be called with qgroup_ioctl_lock held */
  125. static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
  126. u64 qgroupid)
  127. {
  128. struct rb_node *n = fs_info->qgroup_tree.rb_node;
  129. struct btrfs_qgroup *qgroup;
  130. while (n) {
  131. qgroup = rb_entry(n, struct btrfs_qgroup, node);
  132. if (qgroup->qgroupid < qgroupid)
  133. n = n->rb_left;
  134. else if (qgroup->qgroupid > qgroupid)
  135. n = n->rb_right;
  136. else
  137. return qgroup;
  138. }
  139. return NULL;
  140. }
  141. /* must be called with qgroup_lock held */
  142. static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
  143. u64 qgroupid)
  144. {
  145. struct rb_node **p = &fs_info->qgroup_tree.rb_node;
  146. struct rb_node *parent = NULL;
  147. struct btrfs_qgroup *qgroup;
  148. while (*p) {
  149. parent = *p;
  150. qgroup = rb_entry(parent, struct btrfs_qgroup, node);
  151. if (qgroup->qgroupid < qgroupid)
  152. p = &(*p)->rb_left;
  153. else if (qgroup->qgroupid > qgroupid)
  154. p = &(*p)->rb_right;
  155. else
  156. return qgroup;
  157. }
  158. qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
  159. if (!qgroup)
  160. return ERR_PTR(-ENOMEM);
  161. qgroup->qgroupid = qgroupid;
  162. INIT_LIST_HEAD(&qgroup->groups);
  163. INIT_LIST_HEAD(&qgroup->members);
  164. INIT_LIST_HEAD(&qgroup->dirty);
  165. rb_link_node(&qgroup->node, parent, p);
  166. rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
  167. return qgroup;
  168. }
  169. static void __del_qgroup_rb(struct btrfs_qgroup *qgroup)
  170. {
  171. struct btrfs_qgroup_list *list;
  172. list_del(&qgroup->dirty);
  173. while (!list_empty(&qgroup->groups)) {
  174. list = list_first_entry(&qgroup->groups,
  175. struct btrfs_qgroup_list, next_group);
  176. list_del(&list->next_group);
  177. list_del(&list->next_member);
  178. kfree(list);
  179. }
  180. while (!list_empty(&qgroup->members)) {
  181. list = list_first_entry(&qgroup->members,
  182. struct btrfs_qgroup_list, next_member);
  183. list_del(&list->next_group);
  184. list_del(&list->next_member);
  185. kfree(list);
  186. }
  187. kfree(qgroup);
  188. }
  189. /* must be called with qgroup_lock held */
  190. static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
  191. {
  192. struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
  193. if (!qgroup)
  194. return -ENOENT;
  195. rb_erase(&qgroup->node, &fs_info->qgroup_tree);
  196. __del_qgroup_rb(qgroup);
  197. return 0;
  198. }
  199. /* must be called with qgroup_lock held */
  200. static int add_relation_rb(struct btrfs_fs_info *fs_info,
  201. u64 memberid, u64 parentid)
  202. {
  203. struct btrfs_qgroup *member;
  204. struct btrfs_qgroup *parent;
  205. struct btrfs_qgroup_list *list;
  206. member = find_qgroup_rb(fs_info, memberid);
  207. parent = find_qgroup_rb(fs_info, parentid);
  208. if (!member || !parent)
  209. return -ENOENT;
  210. list = kzalloc(sizeof(*list), GFP_ATOMIC);
  211. if (!list)
  212. return -ENOMEM;
  213. list->group = parent;
  214. list->member = member;
  215. list_add_tail(&list->next_group, &member->groups);
  216. list_add_tail(&list->next_member, &parent->members);
  217. return 0;
  218. }
  219. /* must be called with qgroup_lock held */
  220. static int del_relation_rb(struct btrfs_fs_info *fs_info,
  221. u64 memberid, u64 parentid)
  222. {
  223. struct btrfs_qgroup *member;
  224. struct btrfs_qgroup *parent;
  225. struct btrfs_qgroup_list *list;
  226. member = find_qgroup_rb(fs_info, memberid);
  227. parent = find_qgroup_rb(fs_info, parentid);
  228. if (!member || !parent)
  229. return -ENOENT;
  230. list_for_each_entry(list, &member->groups, next_group) {
  231. if (list->group == parent) {
  232. list_del(&list->next_group);
  233. list_del(&list->next_member);
  234. kfree(list);
  235. return 0;
  236. }
  237. }
  238. return -ENOENT;
  239. }
  240. #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
  241. int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
  242. u64 rfer, u64 excl)
  243. {
  244. struct btrfs_qgroup *qgroup;
  245. qgroup = find_qgroup_rb(fs_info, qgroupid);
  246. if (!qgroup)
  247. return -EINVAL;
  248. if (qgroup->rfer != rfer || qgroup->excl != excl)
  249. return -EINVAL;
  250. return 0;
  251. }
  252. #endif
  253. /*
  254. * The full config is read in one go, only called from open_ctree()
  255. * It doesn't use any locking, as at this point we're still single-threaded
  256. */
  257. int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
  258. {
  259. struct btrfs_key key;
  260. struct btrfs_key found_key;
  261. struct btrfs_root *quota_root = fs_info->quota_root;
  262. struct btrfs_path *path = NULL;
  263. struct extent_buffer *l;
  264. int slot;
  265. int ret = 0;
  266. u64 flags = 0;
  267. u64 rescan_progress = 0;
  268. if (!fs_info->quota_enabled)
  269. return 0;
  270. fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
  271. if (!fs_info->qgroup_ulist) {
  272. ret = -ENOMEM;
  273. goto out;
  274. }
  275. path = btrfs_alloc_path();
  276. if (!path) {
  277. ret = -ENOMEM;
  278. goto out;
  279. }
  280. /* default this to quota off, in case no status key is found */
  281. fs_info->qgroup_flags = 0;
  282. /*
  283. * pass 1: read status, all qgroup infos and limits
  284. */
  285. key.objectid = 0;
  286. key.type = 0;
  287. key.offset = 0;
  288. ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
  289. if (ret)
  290. goto out;
  291. while (1) {
  292. struct btrfs_qgroup *qgroup;
  293. slot = path->slots[0];
  294. l = path->nodes[0];
  295. btrfs_item_key_to_cpu(l, &found_key, slot);
  296. if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
  297. struct btrfs_qgroup_status_item *ptr;
  298. ptr = btrfs_item_ptr(l, slot,
  299. struct btrfs_qgroup_status_item);
  300. if (btrfs_qgroup_status_version(l, ptr) !=
  301. BTRFS_QGROUP_STATUS_VERSION) {
  302. btrfs_err(fs_info,
  303. "old qgroup version, quota disabled");
  304. goto out;
  305. }
  306. if (btrfs_qgroup_status_generation(l, ptr) !=
  307. fs_info->generation) {
  308. flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
  309. btrfs_err(fs_info,
  310. "qgroup generation mismatch, "
  311. "marked as inconsistent");
  312. }
  313. fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
  314. ptr);
  315. rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
  316. goto next1;
  317. }
  318. if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
  319. found_key.type != BTRFS_QGROUP_LIMIT_KEY)
  320. goto next1;
  321. qgroup = find_qgroup_rb(fs_info, found_key.offset);
  322. if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
  323. (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
  324. btrfs_err(fs_info, "inconsistent qgroup config");
  325. flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
  326. }
  327. if (!qgroup) {
  328. qgroup = add_qgroup_rb(fs_info, found_key.offset);
  329. if (IS_ERR(qgroup)) {
  330. ret = PTR_ERR(qgroup);
  331. goto out;
  332. }
  333. }
  334. switch (found_key.type) {
  335. case BTRFS_QGROUP_INFO_KEY: {
  336. struct btrfs_qgroup_info_item *ptr;
  337. ptr = btrfs_item_ptr(l, slot,
  338. struct btrfs_qgroup_info_item);
  339. qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
  340. qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
  341. qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
  342. qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
  343. /* generation currently unused */
  344. break;
  345. }
  346. case BTRFS_QGROUP_LIMIT_KEY: {
  347. struct btrfs_qgroup_limit_item *ptr;
  348. ptr = btrfs_item_ptr(l, slot,
  349. struct btrfs_qgroup_limit_item);
  350. qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
  351. qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
  352. qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
  353. qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
  354. qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
  355. break;
  356. }
  357. }
  358. next1:
  359. ret = btrfs_next_item(quota_root, path);
  360. if (ret < 0)
  361. goto out;
  362. if (ret)
  363. break;
  364. }
  365. btrfs_release_path(path);
  366. /*
  367. * pass 2: read all qgroup relations
  368. */
  369. key.objectid = 0;
  370. key.type = BTRFS_QGROUP_RELATION_KEY;
  371. key.offset = 0;
  372. ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
  373. if (ret)
  374. goto out;
  375. while (1) {
  376. slot = path->slots[0];
  377. l = path->nodes[0];
  378. btrfs_item_key_to_cpu(l, &found_key, slot);
  379. if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
  380. goto next2;
  381. if (found_key.objectid > found_key.offset) {
  382. /* parent <- member, not needed to build config */
  383. /* FIXME should we omit the key completely? */
  384. goto next2;
  385. }
  386. ret = add_relation_rb(fs_info, found_key.objectid,
  387. found_key.offset);
  388. if (ret == -ENOENT) {
  389. btrfs_warn(fs_info,
  390. "orphan qgroup relation 0x%llx->0x%llx",
  391. found_key.objectid, found_key.offset);
  392. ret = 0; /* ignore the error */
  393. }
  394. if (ret)
  395. goto out;
  396. next2:
  397. ret = btrfs_next_item(quota_root, path);
  398. if (ret < 0)
  399. goto out;
  400. if (ret)
  401. break;
  402. }
  403. out:
  404. fs_info->qgroup_flags |= flags;
  405. if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) {
  406. fs_info->quota_enabled = 0;
  407. fs_info->pending_quota_state = 0;
  408. } else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
  409. ret >= 0) {
  410. ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
  411. }
  412. btrfs_free_path(path);
  413. if (ret < 0) {
  414. ulist_free(fs_info->qgroup_ulist);
  415. fs_info->qgroup_ulist = NULL;
  416. fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
  417. }
  418. return ret < 0 ? ret : 0;
  419. }
  420. /*
  421. * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
  422. * first two are in single-threaded paths.And for the third one, we have set
  423. * quota_root to be null with qgroup_lock held before, so it is safe to clean
  424. * up the in-memory structures without qgroup_lock held.
  425. */
  426. void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
  427. {
  428. struct rb_node *n;
  429. struct btrfs_qgroup *qgroup;
  430. while ((n = rb_first(&fs_info->qgroup_tree))) {
  431. qgroup = rb_entry(n, struct btrfs_qgroup, node);
  432. rb_erase(n, &fs_info->qgroup_tree);
  433. __del_qgroup_rb(qgroup);
  434. }
  435. /*
  436. * we call btrfs_free_qgroup_config() when umounting
  437. * filesystem and disabling quota, so we set qgroup_ulist
  438. * to be null here to avoid double free.
  439. */
  440. ulist_free(fs_info->qgroup_ulist);
  441. fs_info->qgroup_ulist = NULL;
  442. }
  443. static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
  444. struct btrfs_root *quota_root,
  445. u64 src, u64 dst)
  446. {
  447. int ret;
  448. struct btrfs_path *path;
  449. struct btrfs_key key;
  450. path = btrfs_alloc_path();
  451. if (!path)
  452. return -ENOMEM;
  453. key.objectid = src;
  454. key.type = BTRFS_QGROUP_RELATION_KEY;
  455. key.offset = dst;
  456. ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
  457. btrfs_mark_buffer_dirty(path->nodes[0]);
  458. btrfs_free_path(path);
  459. return ret;
  460. }
  461. static int del_qgroup_relation_item(struct btrfs_trans_handle *trans,
  462. struct btrfs_root *quota_root,
  463. u64 src, u64 dst)
  464. {
  465. int ret;
  466. struct btrfs_path *path;
  467. struct btrfs_key key;
  468. path = btrfs_alloc_path();
  469. if (!path)
  470. return -ENOMEM;
  471. key.objectid = src;
  472. key.type = BTRFS_QGROUP_RELATION_KEY;
  473. key.offset = dst;
  474. ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
  475. if (ret < 0)
  476. goto out;
  477. if (ret > 0) {
  478. ret = -ENOENT;
  479. goto out;
  480. }
  481. ret = btrfs_del_item(trans, quota_root, path);
  482. out:
  483. btrfs_free_path(path);
  484. return ret;
  485. }
  486. static int add_qgroup_item(struct btrfs_trans_handle *trans,
  487. struct btrfs_root *quota_root, u64 qgroupid)
  488. {
  489. int ret;
  490. struct btrfs_path *path;
  491. struct btrfs_qgroup_info_item *qgroup_info;
  492. struct btrfs_qgroup_limit_item *qgroup_limit;
  493. struct extent_buffer *leaf;
  494. struct btrfs_key key;
  495. if (btrfs_test_is_dummy_root(quota_root))
  496. return 0;
  497. path = btrfs_alloc_path();
  498. if (!path)
  499. return -ENOMEM;
  500. key.objectid = 0;
  501. key.type = BTRFS_QGROUP_INFO_KEY;
  502. key.offset = qgroupid;
  503. /*
  504. * Avoid a transaction abort by catching -EEXIST here. In that
  505. * case, we proceed by re-initializing the existing structure
  506. * on disk.
  507. */
  508. ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
  509. sizeof(*qgroup_info));
  510. if (ret && ret != -EEXIST)
  511. goto out;
  512. leaf = path->nodes[0];
  513. qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
  514. struct btrfs_qgroup_info_item);
  515. btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
  516. btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
  517. btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
  518. btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
  519. btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
  520. btrfs_mark_buffer_dirty(leaf);
  521. btrfs_release_path(path);
  522. key.type = BTRFS_QGROUP_LIMIT_KEY;
  523. ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
  524. sizeof(*qgroup_limit));
  525. if (ret && ret != -EEXIST)
  526. goto out;
  527. leaf = path->nodes[0];
  528. qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
  529. struct btrfs_qgroup_limit_item);
  530. btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
  531. btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
  532. btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
  533. btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
  534. btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
  535. btrfs_mark_buffer_dirty(leaf);
  536. ret = 0;
  537. out:
  538. btrfs_free_path(path);
  539. return ret;
  540. }
  541. static int del_qgroup_item(struct btrfs_trans_handle *trans,
  542. struct btrfs_root *quota_root, u64 qgroupid)
  543. {
  544. int ret;
  545. struct btrfs_path *path;
  546. struct btrfs_key key;
  547. path = btrfs_alloc_path();
  548. if (!path)
  549. return -ENOMEM;
  550. key.objectid = 0;
  551. key.type = BTRFS_QGROUP_INFO_KEY;
  552. key.offset = qgroupid;
  553. ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
  554. if (ret < 0)
  555. goto out;
  556. if (ret > 0) {
  557. ret = -ENOENT;
  558. goto out;
  559. }
  560. ret = btrfs_del_item(trans, quota_root, path);
  561. if (ret)
  562. goto out;
  563. btrfs_release_path(path);
  564. key.type = BTRFS_QGROUP_LIMIT_KEY;
  565. ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
  566. if (ret < 0)
  567. goto out;
  568. if (ret > 0) {
  569. ret = -ENOENT;
  570. goto out;
  571. }
  572. ret = btrfs_del_item(trans, quota_root, path);
  573. out:
  574. btrfs_free_path(path);
  575. return ret;
  576. }
  577. static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
  578. struct btrfs_root *root,
  579. struct btrfs_qgroup *qgroup)
  580. {
  581. struct btrfs_path *path;
  582. struct btrfs_key key;
  583. struct extent_buffer *l;
  584. struct btrfs_qgroup_limit_item *qgroup_limit;
  585. int ret;
  586. int slot;
  587. key.objectid = 0;
  588. key.type = BTRFS_QGROUP_LIMIT_KEY;
  589. key.offset = qgroup->qgroupid;
  590. path = btrfs_alloc_path();
  591. if (!path)
  592. return -ENOMEM;
  593. ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
  594. if (ret > 0)
  595. ret = -ENOENT;
  596. if (ret)
  597. goto out;
  598. l = path->nodes[0];
  599. slot = path->slots[0];
  600. qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
  601. btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
  602. btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
  603. btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
  604. btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
  605. btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
  606. btrfs_mark_buffer_dirty(l);
  607. out:
  608. btrfs_free_path(path);
  609. return ret;
  610. }
  611. static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
  612. struct btrfs_root *root,
  613. struct btrfs_qgroup *qgroup)
  614. {
  615. struct btrfs_path *path;
  616. struct btrfs_key key;
  617. struct extent_buffer *l;
  618. struct btrfs_qgroup_info_item *qgroup_info;
  619. int ret;
  620. int slot;
  621. if (btrfs_test_is_dummy_root(root))
  622. return 0;
  623. key.objectid = 0;
  624. key.type = BTRFS_QGROUP_INFO_KEY;
  625. key.offset = qgroup->qgroupid;
  626. path = btrfs_alloc_path();
  627. if (!path)
  628. return -ENOMEM;
  629. ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
  630. if (ret > 0)
  631. ret = -ENOENT;
  632. if (ret)
  633. goto out;
  634. l = path->nodes[0];
  635. slot = path->slots[0];
  636. qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
  637. btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
  638. btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
  639. btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
  640. btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
  641. btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
  642. btrfs_mark_buffer_dirty(l);
  643. out:
  644. btrfs_free_path(path);
  645. return ret;
  646. }
  647. static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
  648. struct btrfs_fs_info *fs_info,
  649. struct btrfs_root *root)
  650. {
  651. struct btrfs_path *path;
  652. struct btrfs_key key;
  653. struct extent_buffer *l;
  654. struct btrfs_qgroup_status_item *ptr;
  655. int ret;
  656. int slot;
  657. key.objectid = 0;
  658. key.type = BTRFS_QGROUP_STATUS_KEY;
  659. key.offset = 0;
  660. path = btrfs_alloc_path();
  661. if (!path)
  662. return -ENOMEM;
  663. ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
  664. if (ret > 0)
  665. ret = -ENOENT;
  666. if (ret)
  667. goto out;
  668. l = path->nodes[0];
  669. slot = path->slots[0];
  670. ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
  671. btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
  672. btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
  673. btrfs_set_qgroup_status_rescan(l, ptr,
  674. fs_info->qgroup_rescan_progress.objectid);
  675. btrfs_mark_buffer_dirty(l);
  676. out:
  677. btrfs_free_path(path);
  678. return ret;
  679. }
  680. /*
  681. * called with qgroup_lock held
  682. */
  683. static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
  684. struct btrfs_root *root)
  685. {
  686. struct btrfs_path *path;
  687. struct btrfs_key key;
  688. struct extent_buffer *leaf = NULL;
  689. int ret;
  690. int nr = 0;
  691. path = btrfs_alloc_path();
  692. if (!path)
  693. return -ENOMEM;
  694. path->leave_spinning = 1;
  695. key.objectid = 0;
  696. key.offset = 0;
  697. key.type = 0;
  698. while (1) {
  699. ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
  700. if (ret < 0)
  701. goto out;
  702. leaf = path->nodes[0];
  703. nr = btrfs_header_nritems(leaf);
  704. if (!nr)
  705. break;
  706. /*
  707. * delete the leaf one by one
  708. * since the whole tree is going
  709. * to be deleted.
  710. */
  711. path->slots[0] = 0;
  712. ret = btrfs_del_items(trans, root, path, 0, nr);
  713. if (ret)
  714. goto out;
  715. btrfs_release_path(path);
  716. }
  717. ret = 0;
  718. out:
  719. root->fs_info->pending_quota_state = 0;
  720. btrfs_free_path(path);
  721. return ret;
  722. }
  723. int btrfs_quota_enable(struct btrfs_trans_handle *trans,
  724. struct btrfs_fs_info *fs_info)
  725. {
  726. struct btrfs_root *quota_root;
  727. struct btrfs_root *tree_root = fs_info->tree_root;
  728. struct btrfs_path *path = NULL;
  729. struct btrfs_qgroup_status_item *ptr;
  730. struct extent_buffer *leaf;
  731. struct btrfs_key key;
  732. struct btrfs_key found_key;
  733. struct btrfs_qgroup *qgroup = NULL;
  734. int ret = 0;
  735. int slot;
  736. mutex_lock(&fs_info->qgroup_ioctl_lock);
  737. if (fs_info->quota_root) {
  738. fs_info->pending_quota_state = 1;
  739. goto out;
  740. }
  741. fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
  742. if (!fs_info->qgroup_ulist) {
  743. ret = -ENOMEM;
  744. goto out;
  745. }
  746. /*
  747. * initially create the quota tree
  748. */
  749. quota_root = btrfs_create_tree(trans, fs_info,
  750. BTRFS_QUOTA_TREE_OBJECTID);
  751. if (IS_ERR(quota_root)) {
  752. ret = PTR_ERR(quota_root);
  753. goto out;
  754. }
  755. path = btrfs_alloc_path();
  756. if (!path) {
  757. ret = -ENOMEM;
  758. goto out_free_root;
  759. }
  760. key.objectid = 0;
  761. key.type = BTRFS_QGROUP_STATUS_KEY;
  762. key.offset = 0;
  763. ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
  764. sizeof(*ptr));
  765. if (ret)
  766. goto out_free_path;
  767. leaf = path->nodes[0];
  768. ptr = btrfs_item_ptr(leaf, path->slots[0],
  769. struct btrfs_qgroup_status_item);
  770. btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
  771. btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
  772. fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
  773. BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
  774. btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
  775. btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
  776. btrfs_mark_buffer_dirty(leaf);
  777. key.objectid = 0;
  778. key.type = BTRFS_ROOT_REF_KEY;
  779. key.offset = 0;
  780. btrfs_release_path(path);
  781. ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
  782. if (ret > 0)
  783. goto out_add_root;
  784. if (ret < 0)
  785. goto out_free_path;
  786. while (1) {
  787. slot = path->slots[0];
  788. leaf = path->nodes[0];
  789. btrfs_item_key_to_cpu(leaf, &found_key, slot);
  790. if (found_key.type == BTRFS_ROOT_REF_KEY) {
  791. ret = add_qgroup_item(trans, quota_root,
  792. found_key.offset);
  793. if (ret)
  794. goto out_free_path;
  795. qgroup = add_qgroup_rb(fs_info, found_key.offset);
  796. if (IS_ERR(qgroup)) {
  797. ret = PTR_ERR(qgroup);
  798. goto out_free_path;
  799. }
  800. }
  801. ret = btrfs_next_item(tree_root, path);
  802. if (ret < 0)
  803. goto out_free_path;
  804. if (ret)
  805. break;
  806. }
  807. out_add_root:
  808. btrfs_release_path(path);
  809. ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
  810. if (ret)
  811. goto out_free_path;
  812. qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
  813. if (IS_ERR(qgroup)) {
  814. ret = PTR_ERR(qgroup);
  815. goto out_free_path;
  816. }
  817. spin_lock(&fs_info->qgroup_lock);
  818. fs_info->quota_root = quota_root;
  819. fs_info->pending_quota_state = 1;
  820. spin_unlock(&fs_info->qgroup_lock);
  821. out_free_path:
  822. btrfs_free_path(path);
  823. out_free_root:
  824. if (ret) {
  825. free_extent_buffer(quota_root->node);
  826. free_extent_buffer(quota_root->commit_root);
  827. kfree(quota_root);
  828. }
  829. out:
  830. if (ret) {
  831. ulist_free(fs_info->qgroup_ulist);
  832. fs_info->qgroup_ulist = NULL;
  833. }
  834. mutex_unlock(&fs_info->qgroup_ioctl_lock);
  835. return ret;
  836. }
  837. int btrfs_quota_disable(struct btrfs_trans_handle *trans,
  838. struct btrfs_fs_info *fs_info)
  839. {
  840. struct btrfs_root *tree_root = fs_info->tree_root;
  841. struct btrfs_root *quota_root;
  842. int ret = 0;
  843. mutex_lock(&fs_info->qgroup_ioctl_lock);
  844. if (!fs_info->quota_root)
  845. goto out;
  846. fs_info->quota_enabled = 0;
  847. fs_info->pending_quota_state = 0;
  848. btrfs_qgroup_wait_for_completion(fs_info);
  849. spin_lock(&fs_info->qgroup_lock);
  850. quota_root = fs_info->quota_root;
  851. fs_info->quota_root = NULL;
  852. fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
  853. spin_unlock(&fs_info->qgroup_lock);
  854. btrfs_free_qgroup_config(fs_info);
  855. ret = btrfs_clean_quota_tree(trans, quota_root);
  856. if (ret)
  857. goto out;
  858. ret = btrfs_del_root(trans, tree_root, &quota_root->root_key);
  859. if (ret)
  860. goto out;
  861. list_del(&quota_root->dirty_list);
  862. btrfs_tree_lock(quota_root->node);
  863. clean_tree_block(trans, tree_root->fs_info, quota_root->node);
  864. btrfs_tree_unlock(quota_root->node);
  865. btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
  866. free_extent_buffer(quota_root->node);
  867. free_extent_buffer(quota_root->commit_root);
  868. kfree(quota_root);
  869. out:
  870. mutex_unlock(&fs_info->qgroup_ioctl_lock);
  871. return ret;
  872. }
  873. static void qgroup_dirty(struct btrfs_fs_info *fs_info,
  874. struct btrfs_qgroup *qgroup)
  875. {
  876. if (list_empty(&qgroup->dirty))
  877. list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
  878. }
  879. /*
  880. * The easy accounting, if we are adding/removing the only ref for an extent
  881. * then this qgroup and all of the parent qgroups get their reference and
  882. * exclusive counts adjusted.
  883. *
  884. * Caller should hold fs_info->qgroup_lock.
  885. */
  886. static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
  887. struct ulist *tmp, u64 ref_root,
  888. u64 num_bytes, int sign)
  889. {
  890. struct btrfs_qgroup *qgroup;
  891. struct btrfs_qgroup_list *glist;
  892. struct ulist_node *unode;
  893. struct ulist_iterator uiter;
  894. int ret = 0;
  895. qgroup = find_qgroup_rb(fs_info, ref_root);
  896. if (!qgroup)
  897. goto out;
  898. qgroup->rfer += sign * num_bytes;
  899. qgroup->rfer_cmpr += sign * num_bytes;
  900. WARN_ON(sign < 0 && qgroup->excl < num_bytes);
  901. qgroup->excl += sign * num_bytes;
  902. qgroup->excl_cmpr += sign * num_bytes;
  903. if (sign > 0)
  904. qgroup->reserved -= num_bytes;
  905. qgroup_dirty(fs_info, qgroup);
  906. /* Get all of the parent groups that contain this qgroup */
  907. list_for_each_entry(glist, &qgroup->groups, next_group) {
  908. ret = ulist_add(tmp, glist->group->qgroupid,
  909. ptr_to_u64(glist->group), GFP_ATOMIC);
  910. if (ret < 0)
  911. goto out;
  912. }
  913. /* Iterate all of the parents and adjust their reference counts */
  914. ULIST_ITER_INIT(&uiter);
  915. while ((unode = ulist_next(tmp, &uiter))) {
  916. qgroup = u64_to_ptr(unode->aux);
  917. qgroup->rfer += sign * num_bytes;
  918. qgroup->rfer_cmpr += sign * num_bytes;
  919. WARN_ON(sign < 0 && qgroup->excl < num_bytes);
  920. qgroup->excl += sign * num_bytes;
  921. if (sign > 0)
  922. qgroup->reserved -= num_bytes;
  923. qgroup->excl_cmpr += sign * num_bytes;
  924. qgroup_dirty(fs_info, qgroup);
  925. /* Add any parents of the parents */
  926. list_for_each_entry(glist, &qgroup->groups, next_group) {
  927. ret = ulist_add(tmp, glist->group->qgroupid,
  928. ptr_to_u64(glist->group), GFP_ATOMIC);
  929. if (ret < 0)
  930. goto out;
  931. }
  932. }
  933. ret = 0;
  934. out:
  935. return ret;
  936. }
  937. /*
  938. * Quick path for updating qgroup with only excl refs.
  939. *
  940. * In that case, just update all parent will be enough.
  941. * Or we needs to do a full rescan.
  942. * Caller should also hold fs_info->qgroup_lock.
  943. *
  944. * Return 0 for quick update, return >0 for need to full rescan
  945. * and mark INCONSISTENT flag.
  946. * Return < 0 for other error.
  947. */
  948. static int quick_update_accounting(struct btrfs_fs_info *fs_info,
  949. struct ulist *tmp, u64 src, u64 dst,
  950. int sign)
  951. {
  952. struct btrfs_qgroup *qgroup;
  953. int ret = 1;
  954. int err = 0;
  955. qgroup = find_qgroup_rb(fs_info, src);
  956. if (!qgroup)
  957. goto out;
  958. if (qgroup->excl == qgroup->rfer) {
  959. ret = 0;
  960. err = __qgroup_excl_accounting(fs_info, tmp, dst,
  961. qgroup->excl, sign);
  962. if (err < 0) {
  963. ret = err;
  964. goto out;
  965. }
  966. }
  967. out:
  968. if (ret)
  969. fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
  970. return ret;
  971. }
  972. int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
  973. struct btrfs_fs_info *fs_info, u64 src, u64 dst)
  974. {
  975. struct btrfs_root *quota_root;
  976. struct btrfs_qgroup *parent;
  977. struct btrfs_qgroup *member;
  978. struct btrfs_qgroup_list *list;
  979. struct ulist *tmp;
  980. int ret = 0;
  981. /* Check the level of src and dst first */
  982. if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
  983. return -EINVAL;
  984. tmp = ulist_alloc(GFP_NOFS);
  985. if (!tmp)
  986. return -ENOMEM;
  987. mutex_lock(&fs_info->qgroup_ioctl_lock);
  988. quota_root = fs_info->quota_root;
  989. if (!quota_root) {
  990. ret = -EINVAL;
  991. goto out;
  992. }
  993. member = find_qgroup_rb(fs_info, src);
  994. parent = find_qgroup_rb(fs_info, dst);
  995. if (!member || !parent) {
  996. ret = -EINVAL;
  997. goto out;
  998. }
  999. /* check if such qgroup relation exist firstly */
  1000. list_for_each_entry(list, &member->groups, next_group) {
  1001. if (list->group == parent) {
  1002. ret = -EEXIST;
  1003. goto out;
  1004. }
  1005. }
  1006. ret = add_qgroup_relation_item(trans, quota_root, src, dst);
  1007. if (ret)
  1008. goto out;
  1009. ret = add_qgroup_relation_item(trans, quota_root, dst, src);
  1010. if (ret) {
  1011. del_qgroup_relation_item(trans, quota_root, src, dst);
  1012. goto out;
  1013. }
  1014. spin_lock(&fs_info->qgroup_lock);
  1015. ret = add_relation_rb(quota_root->fs_info, src, dst);
  1016. if (ret < 0) {
  1017. spin_unlock(&fs_info->qgroup_lock);
  1018. goto out;
  1019. }
  1020. ret = quick_update_accounting(fs_info, tmp, src, dst, 1);
  1021. spin_unlock(&fs_info->qgroup_lock);
  1022. out:
  1023. mutex_unlock(&fs_info->qgroup_ioctl_lock);
  1024. ulist_free(tmp);
  1025. return ret;
  1026. }
  1027. int __del_qgroup_relation(struct btrfs_trans_handle *trans,
  1028. struct btrfs_fs_info *fs_info, u64 src, u64 dst)
  1029. {
  1030. struct btrfs_root *quota_root;
  1031. struct btrfs_qgroup *parent;
  1032. struct btrfs_qgroup *member;
  1033. struct btrfs_qgroup_list *list;
  1034. struct ulist *tmp;
  1035. int ret = 0;
  1036. int err;
  1037. tmp = ulist_alloc(GFP_NOFS);
  1038. if (!tmp)
  1039. return -ENOMEM;
  1040. quota_root = fs_info->quota_root;
  1041. if (!quota_root) {
  1042. ret = -EINVAL;
  1043. goto out;
  1044. }
  1045. member = find_qgroup_rb(fs_info, src);
  1046. parent = find_qgroup_rb(fs_info, dst);
  1047. if (!member || !parent) {
  1048. ret = -EINVAL;
  1049. goto out;
  1050. }
  1051. /* check if such qgroup relation exist firstly */
  1052. list_for_each_entry(list, &member->groups, next_group) {
  1053. if (list->group == parent)
  1054. goto exist;
  1055. }
  1056. ret = -ENOENT;
  1057. goto out;
  1058. exist:
  1059. ret = del_qgroup_relation_item(trans, quota_root, src, dst);
  1060. err = del_qgroup_relation_item(trans, quota_root, dst, src);
  1061. if (err && !ret)
  1062. ret = err;
  1063. spin_lock(&fs_info->qgroup_lock);
  1064. del_relation_rb(fs_info, src, dst);
  1065. ret = quick_update_accounting(fs_info, tmp, src, dst, -1);
  1066. spin_unlock(&fs_info->qgroup_lock);
  1067. out:
  1068. ulist_free(tmp);
  1069. return ret;
  1070. }
  1071. int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
  1072. struct btrfs_fs_info *fs_info, u64 src, u64 dst)
  1073. {
  1074. int ret = 0;
  1075. mutex_lock(&fs_info->qgroup_ioctl_lock);
  1076. ret = __del_qgroup_relation(trans, fs_info, src, dst);
  1077. mutex_unlock(&fs_info->qgroup_ioctl_lock);
  1078. return ret;
  1079. }
  1080. int btrfs_create_qgroup(struct btrfs_trans_handle *trans,
  1081. struct btrfs_fs_info *fs_info, u64 qgroupid)
  1082. {
  1083. struct btrfs_root *quota_root;
  1084. struct btrfs_qgroup *qgroup;
  1085. int ret = 0;
  1086. mutex_lock(&fs_info->qgroup_ioctl_lock);
  1087. quota_root = fs_info->quota_root;
  1088. if (!quota_root) {
  1089. ret = -EINVAL;
  1090. goto out;
  1091. }
  1092. qgroup = find_qgroup_rb(fs_info, qgroupid);
  1093. if (qgroup) {
  1094. ret = -EEXIST;
  1095. goto out;
  1096. }
  1097. ret = add_qgroup_item(trans, quota_root, qgroupid);
  1098. if (ret)
  1099. goto out;
  1100. spin_lock(&fs_info->qgroup_lock);
  1101. qgroup = add_qgroup_rb(fs_info, qgroupid);
  1102. spin_unlock(&fs_info->qgroup_lock);
  1103. if (IS_ERR(qgroup))
  1104. ret = PTR_ERR(qgroup);
  1105. out:
  1106. mutex_unlock(&fs_info->qgroup_ioctl_lock);
  1107. return ret;
  1108. }
  1109. int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
  1110. struct btrfs_fs_info *fs_info, u64 qgroupid)
  1111. {
  1112. struct btrfs_root *quota_root;
  1113. struct btrfs_qgroup *qgroup;
  1114. struct btrfs_qgroup_list *list;
  1115. int ret = 0;
  1116. mutex_lock(&fs_info->qgroup_ioctl_lock);
  1117. quota_root = fs_info->quota_root;
  1118. if (!quota_root) {
  1119. ret = -EINVAL;
  1120. goto out;
  1121. }
  1122. qgroup = find_qgroup_rb(fs_info, qgroupid);
  1123. if (!qgroup) {
  1124. ret = -ENOENT;
  1125. goto out;
  1126. } else {
  1127. /* check if there are no children of this qgroup */
  1128. if (!list_empty(&qgroup->members)) {
  1129. ret = -EBUSY;
  1130. goto out;
  1131. }
  1132. }
  1133. ret = del_qgroup_item(trans, quota_root, qgroupid);
  1134. while (!list_empty(&qgroup->groups)) {
  1135. list = list_first_entry(&qgroup->groups,
  1136. struct btrfs_qgroup_list, next_group);
  1137. ret = __del_qgroup_relation(trans, fs_info,
  1138. qgroupid,
  1139. list->group->qgroupid);
  1140. if (ret)
  1141. goto out;
  1142. }
  1143. spin_lock(&fs_info->qgroup_lock);
  1144. del_qgroup_rb(quota_root->fs_info, qgroupid);
  1145. spin_unlock(&fs_info->qgroup_lock);
  1146. out:
  1147. mutex_unlock(&fs_info->qgroup_ioctl_lock);
  1148. return ret;
  1149. }
  1150. int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
  1151. struct btrfs_fs_info *fs_info, u64 qgroupid,
  1152. struct btrfs_qgroup_limit *limit)
  1153. {
  1154. struct btrfs_root *quota_root;
  1155. struct btrfs_qgroup *qgroup;
  1156. int ret = 0;
  1157. /* Sometimes we would want to clear the limit on this qgroup.
  1158. * To meet this requirement, we treat the -1 as a special value
  1159. * which tell kernel to clear the limit on this qgroup.
  1160. */
  1161. const u64 CLEAR_VALUE = -1;
  1162. mutex_lock(&fs_info->qgroup_ioctl_lock);
  1163. quota_root = fs_info->quota_root;
  1164. if (!quota_root) {
  1165. ret = -EINVAL;
  1166. goto out;
  1167. }
  1168. qgroup = find_qgroup_rb(fs_info, qgroupid);
  1169. if (!qgroup) {
  1170. ret = -ENOENT;
  1171. goto out;
  1172. }
  1173. spin_lock(&fs_info->qgroup_lock);
  1174. if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
  1175. if (limit->max_rfer == CLEAR_VALUE) {
  1176. qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
  1177. limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
  1178. qgroup->max_rfer = 0;
  1179. } else {
  1180. qgroup->max_rfer = limit->max_rfer;
  1181. }
  1182. }
  1183. if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
  1184. if (limit->max_excl == CLEAR_VALUE) {
  1185. qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
  1186. limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
  1187. qgroup->max_excl = 0;
  1188. } else {
  1189. qgroup->max_excl = limit->max_excl;
  1190. }
  1191. }
  1192. if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
  1193. if (limit->rsv_rfer == CLEAR_VALUE) {
  1194. qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
  1195. limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
  1196. qgroup->rsv_rfer = 0;
  1197. } else {
  1198. qgroup->rsv_rfer = limit->rsv_rfer;
  1199. }
  1200. }
  1201. if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
  1202. if (limit->rsv_excl == CLEAR_VALUE) {
  1203. qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
  1204. limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
  1205. qgroup->rsv_excl = 0;
  1206. } else {
  1207. qgroup->rsv_excl = limit->rsv_excl;
  1208. }
  1209. }
  1210. qgroup->lim_flags |= limit->flags;
  1211. spin_unlock(&fs_info->qgroup_lock);
  1212. ret = update_qgroup_limit_item(trans, quota_root, qgroup);
  1213. if (ret) {
  1214. fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
  1215. btrfs_info(fs_info, "unable to update quota limit for %llu",
  1216. qgroupid);
  1217. }
  1218. out:
  1219. mutex_unlock(&fs_info->qgroup_ioctl_lock);
  1220. return ret;
  1221. }
  1222. int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
  1223. struct btrfs_fs_info *fs_info)
  1224. {
  1225. struct btrfs_qgroup_extent_record *record;
  1226. struct btrfs_delayed_ref_root *delayed_refs;
  1227. struct rb_node *node;
  1228. u64 qgroup_to_skip;
  1229. int ret = 0;
  1230. delayed_refs = &trans->transaction->delayed_refs;
  1231. qgroup_to_skip = delayed_refs->qgroup_to_skip;
  1232. /*
  1233. * No need to do lock, since this function will only be called in
  1234. * btrfs_commit_transaction().
  1235. */
  1236. node = rb_first(&delayed_refs->dirty_extent_root);
  1237. while (node) {
  1238. record = rb_entry(node, struct btrfs_qgroup_extent_record,
  1239. node);
  1240. ret = btrfs_find_all_roots(NULL, fs_info, record->bytenr, 0,
  1241. &record->old_roots);
  1242. if (ret < 0)
  1243. break;
  1244. if (qgroup_to_skip)
  1245. ulist_del(record->old_roots, qgroup_to_skip, 0);
  1246. node = rb_next(node);
  1247. }
  1248. return ret;
  1249. }
  1250. struct btrfs_qgroup_extent_record
  1251. *btrfs_qgroup_insert_dirty_extent(struct btrfs_delayed_ref_root *delayed_refs,
  1252. struct btrfs_qgroup_extent_record *record)
  1253. {
  1254. struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
  1255. struct rb_node *parent_node = NULL;
  1256. struct btrfs_qgroup_extent_record *entry;
  1257. u64 bytenr = record->bytenr;
  1258. assert_spin_locked(&delayed_refs->lock);
  1259. trace_btrfs_qgroup_insert_dirty_extent(record);
  1260. while (*p) {
  1261. parent_node = *p;
  1262. entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
  1263. node);
  1264. if (bytenr < entry->bytenr)
  1265. p = &(*p)->rb_left;
  1266. else if (bytenr > entry->bytenr)
  1267. p = &(*p)->rb_right;
  1268. else
  1269. return entry;
  1270. }
  1271. rb_link_node(&record->node, parent_node, p);
  1272. rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
  1273. return NULL;
  1274. }
  1275. #define UPDATE_NEW 0
  1276. #define UPDATE_OLD 1
  1277. /*
  1278. * Walk all of the roots that points to the bytenr and adjust their refcnts.
  1279. */
  1280. static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
  1281. struct ulist *roots, struct ulist *tmp,
  1282. struct ulist *qgroups, u64 seq, int update_old)
  1283. {
  1284. struct ulist_node *unode;
  1285. struct ulist_iterator uiter;
  1286. struct ulist_node *tmp_unode;
  1287. struct ulist_iterator tmp_uiter;
  1288. struct btrfs_qgroup *qg;
  1289. int ret = 0;
  1290. if (!roots)
  1291. return 0;
  1292. ULIST_ITER_INIT(&uiter);
  1293. while ((unode = ulist_next(roots, &uiter))) {
  1294. qg = find_qgroup_rb(fs_info, unode->val);
  1295. if (!qg)
  1296. continue;
  1297. ulist_reinit(tmp);
  1298. ret = ulist_add(qgroups, qg->qgroupid, ptr_to_u64(qg),
  1299. GFP_ATOMIC);
  1300. if (ret < 0)
  1301. return ret;
  1302. ret = ulist_add(tmp, qg->qgroupid, ptr_to_u64(qg), GFP_ATOMIC);
  1303. if (ret < 0)
  1304. return ret;
  1305. ULIST_ITER_INIT(&tmp_uiter);
  1306. while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
  1307. struct btrfs_qgroup_list *glist;
  1308. qg = u64_to_ptr(tmp_unode->aux);
  1309. if (update_old)
  1310. btrfs_qgroup_update_old_refcnt(qg, seq, 1);
  1311. else
  1312. btrfs_qgroup_update_new_refcnt(qg, seq, 1);
  1313. list_for_each_entry(glist, &qg->groups, next_group) {
  1314. ret = ulist_add(qgroups, glist->group->qgroupid,
  1315. ptr_to_u64(glist->group),
  1316. GFP_ATOMIC);
  1317. if (ret < 0)
  1318. return ret;
  1319. ret = ulist_add(tmp, glist->group->qgroupid,
  1320. ptr_to_u64(glist->group),
  1321. GFP_ATOMIC);
  1322. if (ret < 0)
  1323. return ret;
  1324. }
  1325. }
  1326. }
  1327. return 0;
  1328. }
  1329. /*
  1330. * Update qgroup rfer/excl counters.
  1331. * Rfer update is easy, codes can explain themselves.
  1332. *
  1333. * Excl update is tricky, the update is split into 2 part.
  1334. * Part 1: Possible exclusive <-> sharing detect:
  1335. * | A | !A |
  1336. * -------------------------------------
  1337. * B | * | - |
  1338. * -------------------------------------
  1339. * !B | + | ** |
  1340. * -------------------------------------
  1341. *
  1342. * Conditions:
  1343. * A: cur_old_roots < nr_old_roots (not exclusive before)
  1344. * !A: cur_old_roots == nr_old_roots (possible exclusive before)
  1345. * B: cur_new_roots < nr_new_roots (not exclusive now)
  1346. * !B: cur_new_roots == nr_new_roots (possible exclusive now)
  1347. *
  1348. * Results:
  1349. * +: Possible sharing -> exclusive -: Possible exclusive -> sharing
  1350. * *: Definitely not changed. **: Possible unchanged.
  1351. *
  1352. * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
  1353. *
  1354. * To make the logic clear, we first use condition A and B to split
  1355. * combination into 4 results.
  1356. *
  1357. * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
  1358. * only on variant maybe 0.
  1359. *
  1360. * Lastly, check result **, since there are 2 variants maybe 0, split them
  1361. * again(2x2).
  1362. * But this time we don't need to consider other things, the codes and logic
  1363. * is easy to understand now.
  1364. */
  1365. static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
  1366. struct ulist *qgroups,
  1367. u64 nr_old_roots,
  1368. u64 nr_new_roots,
  1369. u64 num_bytes, u64 seq)
  1370. {
  1371. struct ulist_node *unode;
  1372. struct ulist_iterator uiter;
  1373. struct btrfs_qgroup *qg;
  1374. u64 cur_new_count, cur_old_count;
  1375. ULIST_ITER_INIT(&uiter);
  1376. while ((unode = ulist_next(qgroups, &uiter))) {
  1377. bool dirty = false;
  1378. qg = u64_to_ptr(unode->aux);
  1379. cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
  1380. cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
  1381. trace_qgroup_update_counters(qg->qgroupid, cur_old_count,
  1382. cur_new_count);
  1383. /* Rfer update part */
  1384. if (cur_old_count == 0 && cur_new_count > 0) {
  1385. qg->rfer += num_bytes;
  1386. qg->rfer_cmpr += num_bytes;
  1387. dirty = true;
  1388. }
  1389. if (cur_old_count > 0 && cur_new_count == 0) {
  1390. qg->rfer -= num_bytes;
  1391. qg->rfer_cmpr -= num_bytes;
  1392. dirty = true;
  1393. }
  1394. /* Excl update part */
  1395. /* Exclusive/none -> shared case */
  1396. if (cur_old_count == nr_old_roots &&
  1397. cur_new_count < nr_new_roots) {
  1398. /* Exclusive -> shared */
  1399. if (cur_old_count != 0) {
  1400. qg->excl -= num_bytes;
  1401. qg->excl_cmpr -= num_bytes;
  1402. dirty = true;
  1403. }
  1404. }
  1405. /* Shared -> exclusive/none case */
  1406. if (cur_old_count < nr_old_roots &&
  1407. cur_new_count == nr_new_roots) {
  1408. /* Shared->exclusive */
  1409. if (cur_new_count != 0) {
  1410. qg->excl += num_bytes;
  1411. qg->excl_cmpr += num_bytes;
  1412. dirty = true;
  1413. }
  1414. }
  1415. /* Exclusive/none -> exclusive/none case */
  1416. if (cur_old_count == nr_old_roots &&
  1417. cur_new_count == nr_new_roots) {
  1418. if (cur_old_count == 0) {
  1419. /* None -> exclusive/none */
  1420. if (cur_new_count != 0) {
  1421. /* None -> exclusive */
  1422. qg->excl += num_bytes;
  1423. qg->excl_cmpr += num_bytes;
  1424. dirty = true;
  1425. }
  1426. /* None -> none, nothing changed */
  1427. } else {
  1428. /* Exclusive -> exclusive/none */
  1429. if (cur_new_count == 0) {
  1430. /* Exclusive -> none */
  1431. qg->excl -= num_bytes;
  1432. qg->excl_cmpr -= num_bytes;
  1433. dirty = true;
  1434. }
  1435. /* Exclusive -> exclusive, nothing changed */
  1436. }
  1437. }
  1438. if (dirty)
  1439. qgroup_dirty(fs_info, qg);
  1440. }
  1441. return 0;
  1442. }
  1443. int
  1444. btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
  1445. struct btrfs_fs_info *fs_info,
  1446. u64 bytenr, u64 num_bytes,
  1447. struct ulist *old_roots, struct ulist *new_roots)
  1448. {
  1449. struct ulist *qgroups = NULL;
  1450. struct ulist *tmp = NULL;
  1451. u64 seq;
  1452. u64 nr_new_roots = 0;
  1453. u64 nr_old_roots = 0;
  1454. int ret = 0;
  1455. if (new_roots)
  1456. nr_new_roots = new_roots->nnodes;
  1457. if (old_roots)
  1458. nr_old_roots = old_roots->nnodes;
  1459. if (!fs_info->quota_enabled)
  1460. goto out_free;
  1461. BUG_ON(!fs_info->quota_root);
  1462. trace_btrfs_qgroup_account_extent(bytenr, num_bytes, nr_old_roots,
  1463. nr_new_roots);
  1464. qgroups = ulist_alloc(GFP_NOFS);
  1465. if (!qgroups) {
  1466. ret = -ENOMEM;
  1467. goto out_free;
  1468. }
  1469. tmp = ulist_alloc(GFP_NOFS);
  1470. if (!tmp) {
  1471. ret = -ENOMEM;
  1472. goto out_free;
  1473. }
  1474. mutex_lock(&fs_info->qgroup_rescan_lock);
  1475. if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
  1476. if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
  1477. mutex_unlock(&fs_info->qgroup_rescan_lock);
  1478. ret = 0;
  1479. goto out_free;
  1480. }
  1481. }
  1482. mutex_unlock(&fs_info->qgroup_rescan_lock);
  1483. spin_lock(&fs_info->qgroup_lock);
  1484. seq = fs_info->qgroup_seq;
  1485. /* Update old refcnts using old_roots */
  1486. ret = qgroup_update_refcnt(fs_info, old_roots, tmp, qgroups, seq,
  1487. UPDATE_OLD);
  1488. if (ret < 0)
  1489. goto out;
  1490. /* Update new refcnts using new_roots */
  1491. ret = qgroup_update_refcnt(fs_info, new_roots, tmp, qgroups, seq,
  1492. UPDATE_NEW);
  1493. if (ret < 0)
  1494. goto out;
  1495. qgroup_update_counters(fs_info, qgroups, nr_old_roots, nr_new_roots,
  1496. num_bytes, seq);
  1497. /*
  1498. * Bump qgroup_seq to avoid seq overlap
  1499. */
  1500. fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
  1501. out:
  1502. spin_unlock(&fs_info->qgroup_lock);
  1503. out_free:
  1504. ulist_free(tmp);
  1505. ulist_free(qgroups);
  1506. ulist_free(old_roots);
  1507. ulist_free(new_roots);
  1508. return ret;
  1509. }
  1510. int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans,
  1511. struct btrfs_fs_info *fs_info)
  1512. {
  1513. struct btrfs_qgroup_extent_record *record;
  1514. struct btrfs_delayed_ref_root *delayed_refs;
  1515. struct ulist *new_roots = NULL;
  1516. struct rb_node *node;
  1517. u64 qgroup_to_skip;
  1518. int ret = 0;
  1519. delayed_refs = &trans->transaction->delayed_refs;
  1520. qgroup_to_skip = delayed_refs->qgroup_to_skip;
  1521. while ((node = rb_first(&delayed_refs->dirty_extent_root))) {
  1522. record = rb_entry(node, struct btrfs_qgroup_extent_record,
  1523. node);
  1524. trace_btrfs_qgroup_account_extents(record);
  1525. if (!ret) {
  1526. /*
  1527. * Use (u64)-1 as time_seq to do special search, which
  1528. * doesn't lock tree or delayed_refs and search current
  1529. * root. It's safe inside commit_transaction().
  1530. */
  1531. ret = btrfs_find_all_roots(trans, fs_info,
  1532. record->bytenr, (u64)-1, &new_roots);
  1533. if (ret < 0)
  1534. goto cleanup;
  1535. if (qgroup_to_skip)
  1536. ulist_del(new_roots, qgroup_to_skip, 0);
  1537. ret = btrfs_qgroup_account_extent(trans, fs_info,
  1538. record->bytenr, record->num_bytes,
  1539. record->old_roots, new_roots);
  1540. record->old_roots = NULL;
  1541. new_roots = NULL;
  1542. }
  1543. cleanup:
  1544. ulist_free(record->old_roots);
  1545. ulist_free(new_roots);
  1546. new_roots = NULL;
  1547. rb_erase(node, &delayed_refs->dirty_extent_root);
  1548. kfree(record);
  1549. }
  1550. return ret;
  1551. }
  1552. /*
  1553. * called from commit_transaction. Writes all changed qgroups to disk.
  1554. */
  1555. int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
  1556. struct btrfs_fs_info *fs_info)
  1557. {
  1558. struct btrfs_root *quota_root = fs_info->quota_root;
  1559. int ret = 0;
  1560. int start_rescan_worker = 0;
  1561. if (!quota_root)
  1562. goto out;
  1563. if (!fs_info->quota_enabled && fs_info->pending_quota_state)
  1564. start_rescan_worker = 1;
  1565. fs_info->quota_enabled = fs_info->pending_quota_state;
  1566. spin_lock(&fs_info->qgroup_lock);
  1567. while (!list_empty(&fs_info->dirty_qgroups)) {
  1568. struct btrfs_qgroup *qgroup;
  1569. qgroup = list_first_entry(&fs_info->dirty_qgroups,
  1570. struct btrfs_qgroup, dirty);
  1571. list_del_init(&qgroup->dirty);
  1572. spin_unlock(&fs_info->qgroup_lock);
  1573. ret = update_qgroup_info_item(trans, quota_root, qgroup);
  1574. if (ret)
  1575. fs_info->qgroup_flags |=
  1576. BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
  1577. ret = update_qgroup_limit_item(trans, quota_root, qgroup);
  1578. if (ret)
  1579. fs_info->qgroup_flags |=
  1580. BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
  1581. spin_lock(&fs_info->qgroup_lock);
  1582. }
  1583. if (fs_info->quota_enabled)
  1584. fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
  1585. else
  1586. fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
  1587. spin_unlock(&fs_info->qgroup_lock);
  1588. ret = update_qgroup_status_item(trans, fs_info, quota_root);
  1589. if (ret)
  1590. fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
  1591. if (!ret && start_rescan_worker) {
  1592. ret = qgroup_rescan_init(fs_info, 0, 1);
  1593. if (!ret) {
  1594. qgroup_rescan_zero_tracking(fs_info);
  1595. btrfs_queue_work(fs_info->qgroup_rescan_workers,
  1596. &fs_info->qgroup_rescan_work);
  1597. }
  1598. ret = 0;
  1599. }
  1600. out:
  1601. return ret;
  1602. }
  1603. /*
  1604. * Copy the accounting information between qgroups. This is necessary
  1605. * when a snapshot or a subvolume is created. Throwing an error will
  1606. * cause a transaction abort so we take extra care here to only error
  1607. * when a readonly fs is a reasonable outcome.
  1608. */
  1609. int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
  1610. struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid,
  1611. struct btrfs_qgroup_inherit *inherit)
  1612. {
  1613. int ret = 0;
  1614. int i;
  1615. u64 *i_qgroups;
  1616. struct btrfs_root *quota_root = fs_info->quota_root;
  1617. struct btrfs_qgroup *srcgroup;
  1618. struct btrfs_qgroup *dstgroup;
  1619. u32 level_size = 0;
  1620. u64 nums;
  1621. mutex_lock(&fs_info->qgroup_ioctl_lock);
  1622. if (!fs_info->quota_enabled)
  1623. goto out;
  1624. if (!quota_root) {
  1625. ret = -EINVAL;
  1626. goto out;
  1627. }
  1628. if (inherit) {
  1629. i_qgroups = (u64 *)(inherit + 1);
  1630. nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
  1631. 2 * inherit->num_excl_copies;
  1632. for (i = 0; i < nums; ++i) {
  1633. srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
  1634. /*
  1635. * Zero out invalid groups so we can ignore
  1636. * them later.
  1637. */
  1638. if (!srcgroup ||
  1639. ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
  1640. *i_qgroups = 0ULL;
  1641. ++i_qgroups;
  1642. }
  1643. }
  1644. /*
  1645. * create a tracking group for the subvol itself
  1646. */
  1647. ret = add_qgroup_item(trans, quota_root, objectid);
  1648. if (ret)
  1649. goto out;
  1650. if (srcid) {
  1651. struct btrfs_root *srcroot;
  1652. struct btrfs_key srckey;
  1653. srckey.objectid = srcid;
  1654. srckey.type = BTRFS_ROOT_ITEM_KEY;
  1655. srckey.offset = (u64)-1;
  1656. srcroot = btrfs_read_fs_root_no_name(fs_info, &srckey);
  1657. if (IS_ERR(srcroot)) {
  1658. ret = PTR_ERR(srcroot);
  1659. goto out;
  1660. }
  1661. rcu_read_lock();
  1662. level_size = srcroot->nodesize;
  1663. rcu_read_unlock();
  1664. }
  1665. /*
  1666. * add qgroup to all inherited groups
  1667. */
  1668. if (inherit) {
  1669. i_qgroups = (u64 *)(inherit + 1);
  1670. for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
  1671. if (*i_qgroups == 0)
  1672. continue;
  1673. ret = add_qgroup_relation_item(trans, quota_root,
  1674. objectid, *i_qgroups);
  1675. if (ret && ret != -EEXIST)
  1676. goto out;
  1677. ret = add_qgroup_relation_item(trans, quota_root,
  1678. *i_qgroups, objectid);
  1679. if (ret && ret != -EEXIST)
  1680. goto out;
  1681. }
  1682. ret = 0;
  1683. }
  1684. spin_lock(&fs_info->qgroup_lock);
  1685. dstgroup = add_qgroup_rb(fs_info, objectid);
  1686. if (IS_ERR(dstgroup)) {
  1687. ret = PTR_ERR(dstgroup);
  1688. goto unlock;
  1689. }
  1690. if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
  1691. dstgroup->lim_flags = inherit->lim.flags;
  1692. dstgroup->max_rfer = inherit->lim.max_rfer;
  1693. dstgroup->max_excl = inherit->lim.max_excl;
  1694. dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
  1695. dstgroup->rsv_excl = inherit->lim.rsv_excl;
  1696. ret = update_qgroup_limit_item(trans, quota_root, dstgroup);
  1697. if (ret) {
  1698. fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
  1699. btrfs_info(fs_info, "unable to update quota limit for %llu",
  1700. dstgroup->qgroupid);
  1701. goto unlock;
  1702. }
  1703. }
  1704. if (srcid) {
  1705. srcgroup = find_qgroup_rb(fs_info, srcid);
  1706. if (!srcgroup)
  1707. goto unlock;
  1708. /*
  1709. * We call inherit after we clone the root in order to make sure
  1710. * our counts don't go crazy, so at this point the only
  1711. * difference between the two roots should be the root node.
  1712. */
  1713. dstgroup->rfer = srcgroup->rfer;
  1714. dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
  1715. dstgroup->excl = level_size;
  1716. dstgroup->excl_cmpr = level_size;
  1717. srcgroup->excl = level_size;
  1718. srcgroup->excl_cmpr = level_size;
  1719. /* inherit the limit info */
  1720. dstgroup->lim_flags = srcgroup->lim_flags;
  1721. dstgroup->max_rfer = srcgroup->max_rfer;
  1722. dstgroup->max_excl = srcgroup->max_excl;
  1723. dstgroup->rsv_rfer = srcgroup->rsv_rfer;
  1724. dstgroup->rsv_excl = srcgroup->rsv_excl;
  1725. qgroup_dirty(fs_info, dstgroup);
  1726. qgroup_dirty(fs_info, srcgroup);
  1727. }
  1728. if (!inherit)
  1729. goto unlock;
  1730. i_qgroups = (u64 *)(inherit + 1);
  1731. for (i = 0; i < inherit->num_qgroups; ++i) {
  1732. if (*i_qgroups) {
  1733. ret = add_relation_rb(quota_root->fs_info, objectid,
  1734. *i_qgroups);
  1735. if (ret)
  1736. goto unlock;
  1737. }
  1738. ++i_qgroups;
  1739. }
  1740. for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) {
  1741. struct btrfs_qgroup *src;
  1742. struct btrfs_qgroup *dst;
  1743. if (!i_qgroups[0] || !i_qgroups[1])
  1744. continue;
  1745. src = find_qgroup_rb(fs_info, i_qgroups[0]);
  1746. dst = find_qgroup_rb(fs_info, i_qgroups[1]);
  1747. if (!src || !dst) {
  1748. ret = -EINVAL;
  1749. goto unlock;
  1750. }
  1751. dst->rfer = src->rfer - level_size;
  1752. dst->rfer_cmpr = src->rfer_cmpr - level_size;
  1753. }
  1754. for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) {
  1755. struct btrfs_qgroup *src;
  1756. struct btrfs_qgroup *dst;
  1757. if (!i_qgroups[0] || !i_qgroups[1])
  1758. continue;
  1759. src = find_qgroup_rb(fs_info, i_qgroups[0]);
  1760. dst = find_qgroup_rb(fs_info, i_qgroups[1]);
  1761. if (!src || !dst) {
  1762. ret = -EINVAL;
  1763. goto unlock;
  1764. }
  1765. dst->excl = src->excl + level_size;
  1766. dst->excl_cmpr = src->excl_cmpr + level_size;
  1767. }
  1768. unlock:
  1769. spin_unlock(&fs_info->qgroup_lock);
  1770. out:
  1771. mutex_unlock(&fs_info->qgroup_ioctl_lock);
  1772. return ret;
  1773. }
  1774. static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
  1775. {
  1776. struct btrfs_root *quota_root;
  1777. struct btrfs_qgroup *qgroup;
  1778. struct btrfs_fs_info *fs_info = root->fs_info;
  1779. u64 ref_root = root->root_key.objectid;
  1780. int ret = 0;
  1781. struct ulist_node *unode;
  1782. struct ulist_iterator uiter;
  1783. if (!is_fstree(ref_root))
  1784. return 0;
  1785. if (num_bytes == 0)
  1786. return 0;
  1787. spin_lock(&fs_info->qgroup_lock);
  1788. quota_root = fs_info->quota_root;
  1789. if (!quota_root)
  1790. goto out;
  1791. qgroup = find_qgroup_rb(fs_info, ref_root);
  1792. if (!qgroup)
  1793. goto out;
  1794. /*
  1795. * in a first step, we check all affected qgroups if any limits would
  1796. * be exceeded
  1797. */
  1798. ulist_reinit(fs_info->qgroup_ulist);
  1799. ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
  1800. (uintptr_t)qgroup, GFP_ATOMIC);
  1801. if (ret < 0)
  1802. goto out;
  1803. ULIST_ITER_INIT(&uiter);
  1804. while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
  1805. struct btrfs_qgroup *qg;
  1806. struct btrfs_qgroup_list *glist;
  1807. qg = u64_to_ptr(unode->aux);
  1808. if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
  1809. qg->reserved + (s64)qg->rfer + num_bytes >
  1810. qg->max_rfer) {
  1811. ret = -EDQUOT;
  1812. goto out;
  1813. }
  1814. if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
  1815. qg->reserved + (s64)qg->excl + num_bytes >
  1816. qg->max_excl) {
  1817. ret = -EDQUOT;
  1818. goto out;
  1819. }
  1820. list_for_each_entry(glist, &qg->groups, next_group) {
  1821. ret = ulist_add(fs_info->qgroup_ulist,
  1822. glist->group->qgroupid,
  1823. (uintptr_t)glist->group, GFP_ATOMIC);
  1824. if (ret < 0)
  1825. goto out;
  1826. }
  1827. }
  1828. ret = 0;
  1829. /*
  1830. * no limits exceeded, now record the reservation into all qgroups
  1831. */
  1832. ULIST_ITER_INIT(&uiter);
  1833. while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
  1834. struct btrfs_qgroup *qg;
  1835. qg = u64_to_ptr(unode->aux);
  1836. qg->reserved += num_bytes;
  1837. }
  1838. out:
  1839. spin_unlock(&fs_info->qgroup_lock);
  1840. return ret;
  1841. }
  1842. void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
  1843. u64 ref_root, u64 num_bytes)
  1844. {
  1845. struct btrfs_root *quota_root;
  1846. struct btrfs_qgroup *qgroup;
  1847. struct ulist_node *unode;
  1848. struct ulist_iterator uiter;
  1849. int ret = 0;
  1850. if (!is_fstree(ref_root))
  1851. return;
  1852. if (num_bytes == 0)
  1853. return;
  1854. spin_lock(&fs_info->qgroup_lock);
  1855. quota_root = fs_info->quota_root;
  1856. if (!quota_root)
  1857. goto out;
  1858. qgroup = find_qgroup_rb(fs_info, ref_root);
  1859. if (!qgroup)
  1860. goto out;
  1861. ulist_reinit(fs_info->qgroup_ulist);
  1862. ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
  1863. (uintptr_t)qgroup, GFP_ATOMIC);
  1864. if (ret < 0)
  1865. goto out;
  1866. ULIST_ITER_INIT(&uiter);
  1867. while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
  1868. struct btrfs_qgroup *qg;
  1869. struct btrfs_qgroup_list *glist;
  1870. qg = u64_to_ptr(unode->aux);
  1871. qg->reserved -= num_bytes;
  1872. list_for_each_entry(glist, &qg->groups, next_group) {
  1873. ret = ulist_add(fs_info->qgroup_ulist,
  1874. glist->group->qgroupid,
  1875. (uintptr_t)glist->group, GFP_ATOMIC);
  1876. if (ret < 0)
  1877. goto out;
  1878. }
  1879. }
  1880. out:
  1881. spin_unlock(&fs_info->qgroup_lock);
  1882. }
  1883. static inline void qgroup_free(struct btrfs_root *root, u64 num_bytes)
  1884. {
  1885. return btrfs_qgroup_free_refroot(root->fs_info, root->objectid,
  1886. num_bytes);
  1887. }
  1888. void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
  1889. {
  1890. if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
  1891. return;
  1892. btrfs_err(trans->root->fs_info,
  1893. "qgroups not uptodate in trans handle %p: list is%s empty, "
  1894. "seq is %#x.%x",
  1895. trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
  1896. (u32)(trans->delayed_ref_elem.seq >> 32),
  1897. (u32)trans->delayed_ref_elem.seq);
  1898. BUG();
  1899. }
  1900. /*
  1901. * returns < 0 on error, 0 when more leafs are to be scanned.
  1902. * returns 1 when done.
  1903. */
  1904. static int
  1905. qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
  1906. struct btrfs_trans_handle *trans)
  1907. {
  1908. struct btrfs_key found;
  1909. struct extent_buffer *scratch_leaf = NULL;
  1910. struct ulist *roots = NULL;
  1911. struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
  1912. u64 num_bytes;
  1913. int slot;
  1914. int ret;
  1915. mutex_lock(&fs_info->qgroup_rescan_lock);
  1916. ret = btrfs_search_slot_for_read(fs_info->extent_root,
  1917. &fs_info->qgroup_rescan_progress,
  1918. path, 1, 0);
  1919. pr_debug("current progress key (%llu %u %llu), search_slot ret %d\n",
  1920. fs_info->qgroup_rescan_progress.objectid,
  1921. fs_info->qgroup_rescan_progress.type,
  1922. fs_info->qgroup_rescan_progress.offset, ret);
  1923. if (ret) {
  1924. /*
  1925. * The rescan is about to end, we will not be scanning any
  1926. * further blocks. We cannot unset the RESCAN flag here, because
  1927. * we want to commit the transaction if everything went well.
  1928. * To make the live accounting work in this phase, we set our
  1929. * scan progress pointer such that every real extent objectid
  1930. * will be smaller.
  1931. */
  1932. fs_info->qgroup_rescan_progress.objectid = (u64)-1;
  1933. btrfs_release_path(path);
  1934. mutex_unlock(&fs_info->qgroup_rescan_lock);
  1935. return ret;
  1936. }
  1937. btrfs_item_key_to_cpu(path->nodes[0], &found,
  1938. btrfs_header_nritems(path->nodes[0]) - 1);
  1939. fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
  1940. btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
  1941. scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]);
  1942. if (!scratch_leaf) {
  1943. ret = -ENOMEM;
  1944. mutex_unlock(&fs_info->qgroup_rescan_lock);
  1945. goto out;
  1946. }
  1947. extent_buffer_get(scratch_leaf);
  1948. btrfs_tree_read_lock(scratch_leaf);
  1949. btrfs_set_lock_blocking_rw(scratch_leaf, BTRFS_READ_LOCK);
  1950. slot = path->slots[0];
  1951. btrfs_release_path(path);
  1952. mutex_unlock(&fs_info->qgroup_rescan_lock);
  1953. for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
  1954. btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
  1955. if (found.type != BTRFS_EXTENT_ITEM_KEY &&
  1956. found.type != BTRFS_METADATA_ITEM_KEY)
  1957. continue;
  1958. if (found.type == BTRFS_METADATA_ITEM_KEY)
  1959. num_bytes = fs_info->extent_root->nodesize;
  1960. else
  1961. num_bytes = found.offset;
  1962. ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
  1963. &roots);
  1964. if (ret < 0)
  1965. goto out;
  1966. /* For rescan, just pass old_roots as NULL */
  1967. ret = btrfs_qgroup_account_extent(trans, fs_info,
  1968. found.objectid, num_bytes, NULL, roots);
  1969. if (ret < 0)
  1970. goto out;
  1971. }
  1972. out:
  1973. if (scratch_leaf) {
  1974. btrfs_tree_read_unlock_blocking(scratch_leaf);
  1975. free_extent_buffer(scratch_leaf);
  1976. }
  1977. btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
  1978. return ret;
  1979. }
  1980. static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
  1981. {
  1982. struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
  1983. qgroup_rescan_work);
  1984. struct btrfs_path *path;
  1985. struct btrfs_trans_handle *trans = NULL;
  1986. int err = -ENOMEM;
  1987. int ret = 0;
  1988. path = btrfs_alloc_path();
  1989. if (!path)
  1990. goto out;
  1991. err = 0;
  1992. while (!err && !btrfs_fs_closing(fs_info)) {
  1993. trans = btrfs_start_transaction(fs_info->fs_root, 0);
  1994. if (IS_ERR(trans)) {
  1995. err = PTR_ERR(trans);
  1996. break;
  1997. }
  1998. if (!fs_info->quota_enabled) {
  1999. err = -EINTR;
  2000. } else {
  2001. err = qgroup_rescan_leaf(fs_info, path, trans);
  2002. }
  2003. if (err > 0)
  2004. btrfs_commit_transaction(trans, fs_info->fs_root);
  2005. else
  2006. btrfs_end_transaction(trans, fs_info->fs_root);
  2007. }
  2008. out:
  2009. btrfs_free_path(path);
  2010. mutex_lock(&fs_info->qgroup_rescan_lock);
  2011. if (!btrfs_fs_closing(fs_info))
  2012. fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
  2013. if (err > 0 &&
  2014. fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
  2015. fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
  2016. } else if (err < 0) {
  2017. fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
  2018. }
  2019. mutex_unlock(&fs_info->qgroup_rescan_lock);
  2020. /*
  2021. * only update status, since the previous part has already updated the
  2022. * qgroup info.
  2023. */
  2024. trans = btrfs_start_transaction(fs_info->quota_root, 1);
  2025. if (IS_ERR(trans)) {
  2026. err = PTR_ERR(trans);
  2027. btrfs_err(fs_info,
  2028. "fail to start transaction for status update: %d\n",
  2029. err);
  2030. goto done;
  2031. }
  2032. ret = update_qgroup_status_item(trans, fs_info, fs_info->quota_root);
  2033. if (ret < 0) {
  2034. err = ret;
  2035. btrfs_err(fs_info, "fail to update qgroup status: %d\n", err);
  2036. }
  2037. btrfs_end_transaction(trans, fs_info->quota_root);
  2038. if (btrfs_fs_closing(fs_info)) {
  2039. btrfs_info(fs_info, "qgroup scan paused");
  2040. } else if (err >= 0) {
  2041. btrfs_info(fs_info, "qgroup scan completed%s",
  2042. err > 0 ? " (inconsistency flag cleared)" : "");
  2043. } else {
  2044. btrfs_err(fs_info, "qgroup scan failed with %d", err);
  2045. }
  2046. done:
  2047. complete_all(&fs_info->qgroup_rescan_completion);
  2048. }
  2049. /*
  2050. * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
  2051. * memory required for the rescan context.
  2052. */
  2053. static int
  2054. qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
  2055. int init_flags)
  2056. {
  2057. int ret = 0;
  2058. if (!init_flags &&
  2059. (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) ||
  2060. !(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))) {
  2061. ret = -EINVAL;
  2062. goto err;
  2063. }
  2064. mutex_lock(&fs_info->qgroup_rescan_lock);
  2065. spin_lock(&fs_info->qgroup_lock);
  2066. if (init_flags) {
  2067. if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
  2068. ret = -EINPROGRESS;
  2069. else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
  2070. ret = -EINVAL;
  2071. if (ret) {
  2072. spin_unlock(&fs_info->qgroup_lock);
  2073. mutex_unlock(&fs_info->qgroup_rescan_lock);
  2074. goto err;
  2075. }
  2076. fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
  2077. }
  2078. memset(&fs_info->qgroup_rescan_progress, 0,
  2079. sizeof(fs_info->qgroup_rescan_progress));
  2080. fs_info->qgroup_rescan_progress.objectid = progress_objectid;
  2081. init_completion(&fs_info->qgroup_rescan_completion);
  2082. spin_unlock(&fs_info->qgroup_lock);
  2083. mutex_unlock(&fs_info->qgroup_rescan_lock);
  2084. memset(&fs_info->qgroup_rescan_work, 0,
  2085. sizeof(fs_info->qgroup_rescan_work));
  2086. btrfs_init_work(&fs_info->qgroup_rescan_work,
  2087. btrfs_qgroup_rescan_helper,
  2088. btrfs_qgroup_rescan_worker, NULL, NULL);
  2089. if (ret) {
  2090. err:
  2091. btrfs_info(fs_info, "qgroup_rescan_init failed with %d", ret);
  2092. return ret;
  2093. }
  2094. return 0;
  2095. }
  2096. static void
  2097. qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
  2098. {
  2099. struct rb_node *n;
  2100. struct btrfs_qgroup *qgroup;
  2101. spin_lock(&fs_info->qgroup_lock);
  2102. /* clear all current qgroup tracking information */
  2103. for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
  2104. qgroup = rb_entry(n, struct btrfs_qgroup, node);
  2105. qgroup->rfer = 0;
  2106. qgroup->rfer_cmpr = 0;
  2107. qgroup->excl = 0;
  2108. qgroup->excl_cmpr = 0;
  2109. }
  2110. spin_unlock(&fs_info->qgroup_lock);
  2111. }
  2112. int
  2113. btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
  2114. {
  2115. int ret = 0;
  2116. struct btrfs_trans_handle *trans;
  2117. ret = qgroup_rescan_init(fs_info, 0, 1);
  2118. if (ret)
  2119. return ret;
  2120. /*
  2121. * We have set the rescan_progress to 0, which means no more
  2122. * delayed refs will be accounted by btrfs_qgroup_account_ref.
  2123. * However, btrfs_qgroup_account_ref may be right after its call
  2124. * to btrfs_find_all_roots, in which case it would still do the
  2125. * accounting.
  2126. * To solve this, we're committing the transaction, which will
  2127. * ensure we run all delayed refs and only after that, we are
  2128. * going to clear all tracking information for a clean start.
  2129. */
  2130. trans = btrfs_join_transaction(fs_info->fs_root);
  2131. if (IS_ERR(trans)) {
  2132. fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
  2133. return PTR_ERR(trans);
  2134. }
  2135. ret = btrfs_commit_transaction(trans, fs_info->fs_root);
  2136. if (ret) {
  2137. fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
  2138. return ret;
  2139. }
  2140. qgroup_rescan_zero_tracking(fs_info);
  2141. btrfs_queue_work(fs_info->qgroup_rescan_workers,
  2142. &fs_info->qgroup_rescan_work);
  2143. return 0;
  2144. }
  2145. int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info)
  2146. {
  2147. int running;
  2148. int ret = 0;
  2149. mutex_lock(&fs_info->qgroup_rescan_lock);
  2150. spin_lock(&fs_info->qgroup_lock);
  2151. running = fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN;
  2152. spin_unlock(&fs_info->qgroup_lock);
  2153. mutex_unlock(&fs_info->qgroup_rescan_lock);
  2154. if (running)
  2155. ret = wait_for_completion_interruptible(
  2156. &fs_info->qgroup_rescan_completion);
  2157. return ret;
  2158. }
  2159. /*
  2160. * this is only called from open_ctree where we're still single threaded, thus
  2161. * locking is omitted here.
  2162. */
  2163. void
  2164. btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
  2165. {
  2166. if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
  2167. btrfs_queue_work(fs_info->qgroup_rescan_workers,
  2168. &fs_info->qgroup_rescan_work);
  2169. }
  2170. /*
  2171. * Reserve qgroup space for range [start, start + len).
  2172. *
  2173. * This function will either reserve space from related qgroups or doing
  2174. * nothing if the range is already reserved.
  2175. *
  2176. * Return 0 for successful reserve
  2177. * Return <0 for error (including -EQUOT)
  2178. *
  2179. * NOTE: this function may sleep for memory allocation.
  2180. */
  2181. int btrfs_qgroup_reserve_data(struct inode *inode, u64 start, u64 len)
  2182. {
  2183. struct btrfs_root *root = BTRFS_I(inode)->root;
  2184. struct extent_changeset changeset;
  2185. struct ulist_node *unode;
  2186. struct ulist_iterator uiter;
  2187. int ret;
  2188. if (!root->fs_info->quota_enabled || !is_fstree(root->objectid) ||
  2189. len == 0)
  2190. return 0;
  2191. changeset.bytes_changed = 0;
  2192. changeset.range_changed = ulist_alloc(GFP_NOFS);
  2193. ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
  2194. start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
  2195. trace_btrfs_qgroup_reserve_data(inode, start, len,
  2196. changeset.bytes_changed,
  2197. QGROUP_RESERVE);
  2198. if (ret < 0)
  2199. goto cleanup;
  2200. ret = qgroup_reserve(root, changeset.bytes_changed);
  2201. if (ret < 0)
  2202. goto cleanup;
  2203. ulist_free(changeset.range_changed);
  2204. return ret;
  2205. cleanup:
  2206. /* cleanup already reserved ranges */
  2207. ULIST_ITER_INIT(&uiter);
  2208. while ((unode = ulist_next(changeset.range_changed, &uiter)))
  2209. clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
  2210. unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL,
  2211. GFP_NOFS);
  2212. ulist_free(changeset.range_changed);
  2213. return ret;
  2214. }
  2215. static int __btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len,
  2216. int free)
  2217. {
  2218. struct extent_changeset changeset;
  2219. int trace_op = QGROUP_RELEASE;
  2220. int ret;
  2221. changeset.bytes_changed = 0;
  2222. changeset.range_changed = ulist_alloc(GFP_NOFS);
  2223. if (!changeset.range_changed)
  2224. return -ENOMEM;
  2225. ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
  2226. start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
  2227. if (ret < 0)
  2228. goto out;
  2229. if (free) {
  2230. qgroup_free(BTRFS_I(inode)->root, changeset.bytes_changed);
  2231. trace_op = QGROUP_FREE;
  2232. }
  2233. trace_btrfs_qgroup_release_data(inode, start, len,
  2234. changeset.bytes_changed, trace_op);
  2235. out:
  2236. ulist_free(changeset.range_changed);
  2237. return ret;
  2238. }
  2239. /*
  2240. * Free a reserved space range from io_tree and related qgroups
  2241. *
  2242. * Should be called when a range of pages get invalidated before reaching disk.
  2243. * Or for error cleanup case.
  2244. *
  2245. * For data written to disk, use btrfs_qgroup_release_data().
  2246. *
  2247. * NOTE: This function may sleep for memory allocation.
  2248. */
  2249. int btrfs_qgroup_free_data(struct inode *inode, u64 start, u64 len)
  2250. {
  2251. return __btrfs_qgroup_release_data(inode, start, len, 1);
  2252. }
  2253. /*
  2254. * Release a reserved space range from io_tree only.
  2255. *
  2256. * Should be called when a range of pages get written to disk and corresponding
  2257. * FILE_EXTENT is inserted into corresponding root.
  2258. *
  2259. * Since new qgroup accounting framework will only update qgroup numbers at
  2260. * commit_transaction() time, its reserved space shouldn't be freed from
  2261. * related qgroups.
  2262. *
  2263. * But we should release the range from io_tree, to allow further write to be
  2264. * COWed.
  2265. *
  2266. * NOTE: This function may sleep for memory allocation.
  2267. */
  2268. int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len)
  2269. {
  2270. return __btrfs_qgroup_release_data(inode, start, len, 0);
  2271. }
  2272. int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes)
  2273. {
  2274. int ret;
  2275. if (!root->fs_info->quota_enabled || !is_fstree(root->objectid) ||
  2276. num_bytes == 0)
  2277. return 0;
  2278. BUG_ON(num_bytes != round_down(num_bytes, root->nodesize));
  2279. ret = qgroup_reserve(root, num_bytes);
  2280. if (ret < 0)
  2281. return ret;
  2282. atomic_add(num_bytes, &root->qgroup_meta_rsv);
  2283. return ret;
  2284. }
  2285. void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
  2286. {
  2287. int reserved;
  2288. if (!root->fs_info->quota_enabled || !is_fstree(root->objectid))
  2289. return;
  2290. reserved = atomic_xchg(&root->qgroup_meta_rsv, 0);
  2291. if (reserved == 0)
  2292. return;
  2293. qgroup_free(root, reserved);
  2294. }
  2295. void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
  2296. {
  2297. if (!root->fs_info->quota_enabled || !is_fstree(root->objectid))
  2298. return;
  2299. BUG_ON(num_bytes != round_down(num_bytes, root->nodesize));
  2300. WARN_ON(atomic_read(&root->qgroup_meta_rsv) < num_bytes);
  2301. atomic_sub(num_bytes, &root->qgroup_meta_rsv);
  2302. qgroup_free(root, num_bytes);
  2303. }
  2304. /*
  2305. * Check qgroup reserved space leaking, normally at destroy inode
  2306. * time
  2307. */
  2308. void btrfs_qgroup_check_reserved_leak(struct inode *inode)
  2309. {
  2310. struct extent_changeset changeset;
  2311. struct ulist_node *unode;
  2312. struct ulist_iterator iter;
  2313. int ret;
  2314. changeset.bytes_changed = 0;
  2315. changeset.range_changed = ulist_alloc(GFP_NOFS);
  2316. if (WARN_ON(!changeset.range_changed))
  2317. return;
  2318. ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
  2319. EXTENT_QGROUP_RESERVED, &changeset);
  2320. WARN_ON(ret < 0);
  2321. if (WARN_ON(changeset.bytes_changed)) {
  2322. ULIST_ITER_INIT(&iter);
  2323. while ((unode = ulist_next(changeset.range_changed, &iter))) {
  2324. btrfs_warn(BTRFS_I(inode)->root->fs_info,
  2325. "leaking qgroup reserved space, ino: %lu, start: %llu, end: %llu",
  2326. inode->i_ino, unode->val, unode->aux);
  2327. }
  2328. qgroup_free(BTRFS_I(inode)->root, changeset.bytes_changed);
  2329. }
  2330. ulist_free(changeset.range_changed);
  2331. }