xfs_qm.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966
  1. /*
  2. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_fs.h"
  20. #include "xfs_shared.h"
  21. #include "xfs_format.h"
  22. #include "xfs_log_format.h"
  23. #include "xfs_trans_resv.h"
  24. #include "xfs_bit.h"
  25. #include "xfs_sb.h"
  26. #include "xfs_ag.h"
  27. #include "xfs_mount.h"
  28. #include "xfs_inode.h"
  29. #include "xfs_ialloc.h"
  30. #include "xfs_itable.h"
  31. #include "xfs_quota.h"
  32. #include "xfs_error.h"
  33. #include "xfs_bmap.h"
  34. #include "xfs_bmap_btree.h"
  35. #include "xfs_trans.h"
  36. #include "xfs_trans_space.h"
  37. #include "xfs_qm.h"
  38. #include "xfs_trace.h"
  39. #include "xfs_icache.h"
  40. #include "xfs_cksum.h"
  41. #include "xfs_dinode.h"
  42. /*
  43. * The global quota manager. There is only one of these for the entire
  44. * system, _not_ one per file system. XQM keeps track of the overall
  45. * quota functionality, including maintaining the freelist and hash
  46. * tables of dquots.
  47. */
  48. STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
  49. STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
  50. STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
  51. /*
  52. * We use the batch lookup interface to iterate over the dquots as it
  53. * currently is the only interface into the radix tree code that allows
  54. * fuzzy lookups instead of exact matches. Holding the lock over multiple
  55. * operations is fine as all callers are used either during mount/umount
  56. * or quotaoff.
  57. */
  58. #define XFS_DQ_LOOKUP_BATCH 32
  59. STATIC int
  60. xfs_qm_dquot_walk(
  61. struct xfs_mount *mp,
  62. int type,
  63. int (*execute)(struct xfs_dquot *dqp, void *data),
  64. void *data)
  65. {
  66. struct xfs_quotainfo *qi = mp->m_quotainfo;
  67. struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
  68. uint32_t next_index;
  69. int last_error = 0;
  70. int skipped;
  71. int nr_found;
  72. restart:
  73. skipped = 0;
  74. next_index = 0;
  75. nr_found = 0;
  76. while (1) {
  77. struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
  78. int error = 0;
  79. int i;
  80. mutex_lock(&qi->qi_tree_lock);
  81. nr_found = radix_tree_gang_lookup(tree, (void **)batch,
  82. next_index, XFS_DQ_LOOKUP_BATCH);
  83. if (!nr_found) {
  84. mutex_unlock(&qi->qi_tree_lock);
  85. break;
  86. }
  87. for (i = 0; i < nr_found; i++) {
  88. struct xfs_dquot *dqp = batch[i];
  89. next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
  90. error = execute(batch[i], data);
  91. if (error == EAGAIN) {
  92. skipped++;
  93. continue;
  94. }
  95. if (error && last_error != EFSCORRUPTED)
  96. last_error = error;
  97. }
  98. mutex_unlock(&qi->qi_tree_lock);
  99. /* bail out if the filesystem is corrupted. */
  100. if (last_error == EFSCORRUPTED) {
  101. skipped = 0;
  102. break;
  103. }
  104. }
  105. if (skipped) {
  106. delay(1);
  107. goto restart;
  108. }
  109. return last_error;
  110. }
  111. /*
  112. * Purge a dquot from all tracking data structures and free it.
  113. */
  114. STATIC int
  115. xfs_qm_dqpurge(
  116. struct xfs_dquot *dqp,
  117. void *data)
  118. {
  119. struct xfs_mount *mp = dqp->q_mount;
  120. struct xfs_quotainfo *qi = mp->m_quotainfo;
  121. xfs_dqlock(dqp);
  122. if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
  123. xfs_dqunlock(dqp);
  124. return EAGAIN;
  125. }
  126. dqp->dq_flags |= XFS_DQ_FREEING;
  127. xfs_dqflock(dqp);
  128. /*
  129. * If we are turning this type of quotas off, we don't care
  130. * about the dirty metadata sitting in this dquot. OTOH, if
  131. * we're unmounting, we do care, so we flush it and wait.
  132. */
  133. if (XFS_DQ_IS_DIRTY(dqp)) {
  134. struct xfs_buf *bp = NULL;
  135. int error;
  136. /*
  137. * We don't care about getting disk errors here. We need
  138. * to purge this dquot anyway, so we go ahead regardless.
  139. */
  140. error = xfs_qm_dqflush(dqp, &bp);
  141. if (error) {
  142. xfs_warn(mp, "%s: dquot %p flush failed",
  143. __func__, dqp);
  144. } else {
  145. error = xfs_bwrite(bp);
  146. xfs_buf_relse(bp);
  147. }
  148. xfs_dqflock(dqp);
  149. }
  150. ASSERT(atomic_read(&dqp->q_pincount) == 0);
  151. ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
  152. !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
  153. xfs_dqfunlock(dqp);
  154. xfs_dqunlock(dqp);
  155. radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
  156. be32_to_cpu(dqp->q_core.d_id));
  157. qi->qi_dquots--;
  158. /*
  159. * We move dquots to the freelist as soon as their reference count
  160. * hits zero, so it really should be on the freelist here.
  161. */
  162. ASSERT(!list_empty(&dqp->q_lru));
  163. list_lru_del(&qi->qi_lru, &dqp->q_lru);
  164. XFS_STATS_DEC(xs_qm_dquot_unused);
  165. xfs_qm_dqdestroy(dqp);
  166. return 0;
  167. }
  168. /*
  169. * Purge the dquot cache.
  170. */
  171. void
  172. xfs_qm_dqpurge_all(
  173. struct xfs_mount *mp,
  174. uint flags)
  175. {
  176. if (flags & XFS_QMOPT_UQUOTA)
  177. xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
  178. if (flags & XFS_QMOPT_GQUOTA)
  179. xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
  180. if (flags & XFS_QMOPT_PQUOTA)
  181. xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
  182. }
  183. /*
  184. * Just destroy the quotainfo structure.
  185. */
  186. void
  187. xfs_qm_unmount(
  188. struct xfs_mount *mp)
  189. {
  190. if (mp->m_quotainfo) {
  191. xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
  192. xfs_qm_destroy_quotainfo(mp);
  193. }
  194. }
  195. /*
  196. * This is called from xfs_mountfs to start quotas and initialize all
  197. * necessary data structures like quotainfo. This is also responsible for
  198. * running a quotacheck as necessary. We are guaranteed that the superblock
  199. * is consistently read in at this point.
  200. *
  201. * If we fail here, the mount will continue with quota turned off. We don't
  202. * need to inidicate success or failure at all.
  203. */
  204. void
  205. xfs_qm_mount_quotas(
  206. xfs_mount_t *mp)
  207. {
  208. int error = 0;
  209. uint sbf;
  210. /*
  211. * If quotas on realtime volumes is not supported, we disable
  212. * quotas immediately.
  213. */
  214. if (mp->m_sb.sb_rextents) {
  215. xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
  216. mp->m_qflags = 0;
  217. goto write_changes;
  218. }
  219. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  220. /*
  221. * Allocate the quotainfo structure inside the mount struct, and
  222. * create quotainode(s), and change/rev superblock if necessary.
  223. */
  224. error = xfs_qm_init_quotainfo(mp);
  225. if (error) {
  226. /*
  227. * We must turn off quotas.
  228. */
  229. ASSERT(mp->m_quotainfo == NULL);
  230. mp->m_qflags = 0;
  231. goto write_changes;
  232. }
  233. /*
  234. * If any of the quotas are not consistent, do a quotacheck.
  235. */
  236. if (XFS_QM_NEED_QUOTACHECK(mp)) {
  237. error = xfs_qm_quotacheck(mp);
  238. if (error) {
  239. /* Quotacheck failed and disabled quotas. */
  240. return;
  241. }
  242. }
  243. /*
  244. * If one type of quotas is off, then it will lose its
  245. * quotachecked status, since we won't be doing accounting for
  246. * that type anymore.
  247. */
  248. if (!XFS_IS_UQUOTA_ON(mp))
  249. mp->m_qflags &= ~XFS_UQUOTA_CHKD;
  250. if (!XFS_IS_GQUOTA_ON(mp))
  251. mp->m_qflags &= ~XFS_GQUOTA_CHKD;
  252. if (!XFS_IS_PQUOTA_ON(mp))
  253. mp->m_qflags &= ~XFS_PQUOTA_CHKD;
  254. write_changes:
  255. /*
  256. * We actually don't have to acquire the m_sb_lock at all.
  257. * This can only be called from mount, and that's single threaded. XXX
  258. */
  259. spin_lock(&mp->m_sb_lock);
  260. sbf = mp->m_sb.sb_qflags;
  261. mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
  262. spin_unlock(&mp->m_sb_lock);
  263. if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
  264. if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) {
  265. /*
  266. * We could only have been turning quotas off.
  267. * We aren't in very good shape actually because
  268. * the incore structures are convinced that quotas are
  269. * off, but the on disk superblock doesn't know that !
  270. */
  271. ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
  272. xfs_alert(mp, "%s: Superblock update failed!",
  273. __func__);
  274. }
  275. }
  276. if (error) {
  277. xfs_warn(mp, "Failed to initialize disk quotas.");
  278. return;
  279. }
  280. }
  281. /*
  282. * Called from the vfsops layer.
  283. */
  284. void
  285. xfs_qm_unmount_quotas(
  286. xfs_mount_t *mp)
  287. {
  288. /*
  289. * Release the dquots that root inode, et al might be holding,
  290. * before we flush quotas and blow away the quotainfo structure.
  291. */
  292. ASSERT(mp->m_rootip);
  293. xfs_qm_dqdetach(mp->m_rootip);
  294. if (mp->m_rbmip)
  295. xfs_qm_dqdetach(mp->m_rbmip);
  296. if (mp->m_rsumip)
  297. xfs_qm_dqdetach(mp->m_rsumip);
  298. /*
  299. * Release the quota inodes.
  300. */
  301. if (mp->m_quotainfo) {
  302. if (mp->m_quotainfo->qi_uquotaip) {
  303. IRELE(mp->m_quotainfo->qi_uquotaip);
  304. mp->m_quotainfo->qi_uquotaip = NULL;
  305. }
  306. if (mp->m_quotainfo->qi_gquotaip) {
  307. IRELE(mp->m_quotainfo->qi_gquotaip);
  308. mp->m_quotainfo->qi_gquotaip = NULL;
  309. }
  310. if (mp->m_quotainfo->qi_pquotaip) {
  311. IRELE(mp->m_quotainfo->qi_pquotaip);
  312. mp->m_quotainfo->qi_pquotaip = NULL;
  313. }
  314. }
  315. }
  316. STATIC int
  317. xfs_qm_dqattach_one(
  318. xfs_inode_t *ip,
  319. xfs_dqid_t id,
  320. uint type,
  321. uint doalloc,
  322. xfs_dquot_t **IO_idqpp)
  323. {
  324. xfs_dquot_t *dqp;
  325. int error;
  326. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  327. error = 0;
  328. /*
  329. * See if we already have it in the inode itself. IO_idqpp is &i_udquot
  330. * or &i_gdquot. This made the code look weird, but made the logic a lot
  331. * simpler.
  332. */
  333. dqp = *IO_idqpp;
  334. if (dqp) {
  335. trace_xfs_dqattach_found(dqp);
  336. return 0;
  337. }
  338. /*
  339. * Find the dquot from somewhere. This bumps the reference count of
  340. * dquot and returns it locked. This can return ENOENT if dquot didn't
  341. * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
  342. * turned off suddenly.
  343. */
  344. error = xfs_qm_dqget(ip->i_mount, ip, id, type,
  345. doalloc | XFS_QMOPT_DOWARN, &dqp);
  346. if (error)
  347. return error;
  348. trace_xfs_dqattach_get(dqp);
  349. /*
  350. * dqget may have dropped and re-acquired the ilock, but it guarantees
  351. * that the dquot returned is the one that should go in the inode.
  352. */
  353. *IO_idqpp = dqp;
  354. xfs_dqunlock(dqp);
  355. return 0;
  356. }
  357. static bool
  358. xfs_qm_need_dqattach(
  359. struct xfs_inode *ip)
  360. {
  361. struct xfs_mount *mp = ip->i_mount;
  362. if (!XFS_IS_QUOTA_RUNNING(mp))
  363. return false;
  364. if (!XFS_IS_QUOTA_ON(mp))
  365. return false;
  366. if (!XFS_NOT_DQATTACHED(mp, ip))
  367. return false;
  368. if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
  369. return false;
  370. return true;
  371. }
  372. /*
  373. * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
  374. * into account.
  375. * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
  376. * Inode may get unlocked and relocked in here, and the caller must deal with
  377. * the consequences.
  378. */
  379. int
  380. xfs_qm_dqattach_locked(
  381. xfs_inode_t *ip,
  382. uint flags)
  383. {
  384. xfs_mount_t *mp = ip->i_mount;
  385. int error = 0;
  386. if (!xfs_qm_need_dqattach(ip))
  387. return 0;
  388. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  389. if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
  390. error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
  391. flags & XFS_QMOPT_DQALLOC,
  392. &ip->i_udquot);
  393. if (error)
  394. goto done;
  395. ASSERT(ip->i_udquot);
  396. }
  397. if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
  398. error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
  399. flags & XFS_QMOPT_DQALLOC,
  400. &ip->i_gdquot);
  401. if (error)
  402. goto done;
  403. ASSERT(ip->i_gdquot);
  404. }
  405. if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
  406. error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
  407. flags & XFS_QMOPT_DQALLOC,
  408. &ip->i_pdquot);
  409. if (error)
  410. goto done;
  411. ASSERT(ip->i_pdquot);
  412. }
  413. done:
  414. /*
  415. * Don't worry about the dquots that we may have attached before any
  416. * error - they'll get detached later if it has not already been done.
  417. */
  418. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  419. return error;
  420. }
  421. int
  422. xfs_qm_dqattach(
  423. struct xfs_inode *ip,
  424. uint flags)
  425. {
  426. int error;
  427. if (!xfs_qm_need_dqattach(ip))
  428. return 0;
  429. xfs_ilock(ip, XFS_ILOCK_EXCL);
  430. error = xfs_qm_dqattach_locked(ip, flags);
  431. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  432. return error;
  433. }
  434. /*
  435. * Release dquots (and their references) if any.
  436. * The inode should be locked EXCL except when this's called by
  437. * xfs_ireclaim.
  438. */
  439. void
  440. xfs_qm_dqdetach(
  441. xfs_inode_t *ip)
  442. {
  443. if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
  444. return;
  445. trace_xfs_dquot_dqdetach(ip);
  446. ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
  447. if (ip->i_udquot) {
  448. xfs_qm_dqrele(ip->i_udquot);
  449. ip->i_udquot = NULL;
  450. }
  451. if (ip->i_gdquot) {
  452. xfs_qm_dqrele(ip->i_gdquot);
  453. ip->i_gdquot = NULL;
  454. }
  455. if (ip->i_pdquot) {
  456. xfs_qm_dqrele(ip->i_pdquot);
  457. ip->i_pdquot = NULL;
  458. }
  459. }
  460. struct xfs_qm_isolate {
  461. struct list_head buffers;
  462. struct list_head dispose;
  463. };
  464. static enum lru_status
  465. xfs_qm_dquot_isolate(
  466. struct list_head *item,
  467. spinlock_t *lru_lock,
  468. void *arg)
  469. {
  470. struct xfs_dquot *dqp = container_of(item,
  471. struct xfs_dquot, q_lru);
  472. struct xfs_qm_isolate *isol = arg;
  473. if (!xfs_dqlock_nowait(dqp))
  474. goto out_miss_busy;
  475. /*
  476. * This dquot has acquired a reference in the meantime remove it from
  477. * the freelist and try again.
  478. */
  479. if (dqp->q_nrefs) {
  480. xfs_dqunlock(dqp);
  481. XFS_STATS_INC(xs_qm_dqwants);
  482. trace_xfs_dqreclaim_want(dqp);
  483. list_del_init(&dqp->q_lru);
  484. XFS_STATS_DEC(xs_qm_dquot_unused);
  485. return LRU_REMOVED;
  486. }
  487. /*
  488. * If the dquot is dirty, flush it. If it's already being flushed, just
  489. * skip it so there is time for the IO to complete before we try to
  490. * reclaim it again on the next LRU pass.
  491. */
  492. if (!xfs_dqflock_nowait(dqp)) {
  493. xfs_dqunlock(dqp);
  494. goto out_miss_busy;
  495. }
  496. if (XFS_DQ_IS_DIRTY(dqp)) {
  497. struct xfs_buf *bp = NULL;
  498. int error;
  499. trace_xfs_dqreclaim_dirty(dqp);
  500. /* we have to drop the LRU lock to flush the dquot */
  501. spin_unlock(lru_lock);
  502. error = xfs_qm_dqflush(dqp, &bp);
  503. if (error) {
  504. xfs_warn(dqp->q_mount, "%s: dquot %p flush failed",
  505. __func__, dqp);
  506. goto out_unlock_dirty;
  507. }
  508. xfs_buf_delwri_queue(bp, &isol->buffers);
  509. xfs_buf_relse(bp);
  510. goto out_unlock_dirty;
  511. }
  512. xfs_dqfunlock(dqp);
  513. /*
  514. * Prevent lookups now that we are past the point of no return.
  515. */
  516. dqp->dq_flags |= XFS_DQ_FREEING;
  517. xfs_dqunlock(dqp);
  518. ASSERT(dqp->q_nrefs == 0);
  519. list_move_tail(&dqp->q_lru, &isol->dispose);
  520. XFS_STATS_DEC(xs_qm_dquot_unused);
  521. trace_xfs_dqreclaim_done(dqp);
  522. XFS_STATS_INC(xs_qm_dqreclaims);
  523. return LRU_REMOVED;
  524. out_miss_busy:
  525. trace_xfs_dqreclaim_busy(dqp);
  526. XFS_STATS_INC(xs_qm_dqreclaim_misses);
  527. return LRU_SKIP;
  528. out_unlock_dirty:
  529. trace_xfs_dqreclaim_busy(dqp);
  530. XFS_STATS_INC(xs_qm_dqreclaim_misses);
  531. xfs_dqunlock(dqp);
  532. spin_lock(lru_lock);
  533. return LRU_RETRY;
  534. }
  535. static unsigned long
  536. xfs_qm_shrink_scan(
  537. struct shrinker *shrink,
  538. struct shrink_control *sc)
  539. {
  540. struct xfs_quotainfo *qi = container_of(shrink,
  541. struct xfs_quotainfo, qi_shrinker);
  542. struct xfs_qm_isolate isol;
  543. unsigned long freed;
  544. int error;
  545. unsigned long nr_to_scan = sc->nr_to_scan;
  546. if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT))
  547. return 0;
  548. INIT_LIST_HEAD(&isol.buffers);
  549. INIT_LIST_HEAD(&isol.dispose);
  550. freed = list_lru_walk_node(&qi->qi_lru, sc->nid, xfs_qm_dquot_isolate, &isol,
  551. &nr_to_scan);
  552. error = xfs_buf_delwri_submit(&isol.buffers);
  553. if (error)
  554. xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
  555. while (!list_empty(&isol.dispose)) {
  556. struct xfs_dquot *dqp;
  557. dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
  558. list_del_init(&dqp->q_lru);
  559. xfs_qm_dqfree_one(dqp);
  560. }
  561. return freed;
  562. }
  563. static unsigned long
  564. xfs_qm_shrink_count(
  565. struct shrinker *shrink,
  566. struct shrink_control *sc)
  567. {
  568. struct xfs_quotainfo *qi = container_of(shrink,
  569. struct xfs_quotainfo, qi_shrinker);
  570. return list_lru_count_node(&qi->qi_lru, sc->nid);
  571. }
  572. /*
  573. * This initializes all the quota information that's kept in the
  574. * mount structure
  575. */
  576. STATIC int
  577. xfs_qm_init_quotainfo(
  578. xfs_mount_t *mp)
  579. {
  580. xfs_quotainfo_t *qinf;
  581. int error;
  582. xfs_dquot_t *dqp;
  583. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  584. qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
  585. error = -list_lru_init(&qinf->qi_lru);
  586. if (error)
  587. goto out_free_qinf;
  588. /*
  589. * See if quotainodes are setup, and if not, allocate them,
  590. * and change the superblock accordingly.
  591. */
  592. error = xfs_qm_init_quotainos(mp);
  593. if (error)
  594. goto out_free_lru;
  595. INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
  596. INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
  597. INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
  598. mutex_init(&qinf->qi_tree_lock);
  599. /* mutex used to serialize quotaoffs */
  600. mutex_init(&qinf->qi_quotaofflock);
  601. /* Precalc some constants */
  602. qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
  603. qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
  604. mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
  605. /*
  606. * We try to get the limits from the superuser's limits fields.
  607. * This is quite hacky, but it is standard quota practice.
  608. *
  609. * We look at the USR dquot with id == 0 first, but if user quotas
  610. * are not enabled we goto the GRP dquot with id == 0.
  611. * We don't really care to keep separate default limits for user
  612. * and group quotas, at least not at this point.
  613. *
  614. * Since we may not have done a quotacheck by this point, just read
  615. * the dquot without attaching it to any hashtables or lists.
  616. */
  617. error = xfs_qm_dqread(mp, 0,
  618. XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
  619. (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
  620. XFS_DQ_PROJ),
  621. XFS_QMOPT_DOWARN, &dqp);
  622. if (!error) {
  623. xfs_disk_dquot_t *ddqp = &dqp->q_core;
  624. /*
  625. * The warnings and timers set the grace period given to
  626. * a user or group before he or she can not perform any
  627. * more writing. If it is zero, a default is used.
  628. */
  629. qinf->qi_btimelimit = ddqp->d_btimer ?
  630. be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;
  631. qinf->qi_itimelimit = ddqp->d_itimer ?
  632. be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;
  633. qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?
  634. be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;
  635. qinf->qi_bwarnlimit = ddqp->d_bwarns ?
  636. be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;
  637. qinf->qi_iwarnlimit = ddqp->d_iwarns ?
  638. be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
  639. qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
  640. be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
  641. qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
  642. qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
  643. qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
  644. qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
  645. qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
  646. qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
  647. xfs_qm_dqdestroy(dqp);
  648. } else {
  649. qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
  650. qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
  651. qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
  652. qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
  653. qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
  654. qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
  655. }
  656. qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
  657. qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
  658. qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
  659. qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
  660. register_shrinker(&qinf->qi_shrinker);
  661. return 0;
  662. out_free_lru:
  663. list_lru_destroy(&qinf->qi_lru);
  664. out_free_qinf:
  665. kmem_free(qinf);
  666. mp->m_quotainfo = NULL;
  667. return error;
  668. }
  669. /*
  670. * Gets called when unmounting a filesystem or when all quotas get
  671. * turned off.
  672. * This purges the quota inodes, destroys locks and frees itself.
  673. */
  674. void
  675. xfs_qm_destroy_quotainfo(
  676. xfs_mount_t *mp)
  677. {
  678. xfs_quotainfo_t *qi;
  679. qi = mp->m_quotainfo;
  680. ASSERT(qi != NULL);
  681. unregister_shrinker(&qi->qi_shrinker);
  682. list_lru_destroy(&qi->qi_lru);
  683. if (qi->qi_uquotaip) {
  684. IRELE(qi->qi_uquotaip);
  685. qi->qi_uquotaip = NULL; /* paranoia */
  686. }
  687. if (qi->qi_gquotaip) {
  688. IRELE(qi->qi_gquotaip);
  689. qi->qi_gquotaip = NULL;
  690. }
  691. if (qi->qi_pquotaip) {
  692. IRELE(qi->qi_pquotaip);
  693. qi->qi_pquotaip = NULL;
  694. }
  695. mutex_destroy(&qi->qi_quotaofflock);
  696. kmem_free(qi);
  697. mp->m_quotainfo = NULL;
  698. }
  699. /*
  700. * Create an inode and return with a reference already taken, but unlocked
  701. * This is how we create quota inodes
  702. */
  703. STATIC int
  704. xfs_qm_qino_alloc(
  705. xfs_mount_t *mp,
  706. xfs_inode_t **ip,
  707. __int64_t sbfields,
  708. uint flags)
  709. {
  710. xfs_trans_t *tp;
  711. int error;
  712. int committed;
  713. *ip = NULL;
  714. /*
  715. * With superblock that doesn't have separate pquotino, we
  716. * share an inode between gquota and pquota. If the on-disk
  717. * superblock has GQUOTA and the filesystem is now mounted
  718. * with PQUOTA, just use sb_gquotino for sb_pquotino and
  719. * vice-versa.
  720. */
  721. if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
  722. (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
  723. xfs_ino_t ino = NULLFSINO;
  724. if ((flags & XFS_QMOPT_PQUOTA) &&
  725. (mp->m_sb.sb_gquotino != NULLFSINO)) {
  726. ino = mp->m_sb.sb_gquotino;
  727. ASSERT(mp->m_sb.sb_pquotino == NULLFSINO);
  728. } else if ((flags & XFS_QMOPT_GQUOTA) &&
  729. (mp->m_sb.sb_pquotino != NULLFSINO)) {
  730. ino = mp->m_sb.sb_pquotino;
  731. ASSERT(mp->m_sb.sb_gquotino == NULLFSINO);
  732. }
  733. if (ino != NULLFSINO) {
  734. error = xfs_iget(mp, NULL, ino, 0, 0, ip);
  735. if (error)
  736. return error;
  737. mp->m_sb.sb_gquotino = NULLFSINO;
  738. mp->m_sb.sb_pquotino = NULLFSINO;
  739. }
  740. }
  741. tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
  742. error = xfs_trans_reserve(tp, &M_RES(mp)->tr_create,
  743. XFS_QM_QINOCREATE_SPACE_RES(mp), 0);
  744. if (error) {
  745. xfs_trans_cancel(tp, 0);
  746. return error;
  747. }
  748. if (!*ip) {
  749. error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip,
  750. &committed);
  751. if (error) {
  752. xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
  753. XFS_TRANS_ABORT);
  754. return error;
  755. }
  756. }
  757. /*
  758. * Make the changes in the superblock, and log those too.
  759. * sbfields arg may contain fields other than *QUOTINO;
  760. * VERSIONNUM for example.
  761. */
  762. spin_lock(&mp->m_sb_lock);
  763. if (flags & XFS_QMOPT_SBVERSION) {
  764. ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
  765. ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
  766. XFS_SB_GQUOTINO | XFS_SB_PQUOTINO | XFS_SB_QFLAGS)) ==
  767. (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
  768. XFS_SB_GQUOTINO | XFS_SB_PQUOTINO |
  769. XFS_SB_QFLAGS));
  770. xfs_sb_version_addquota(&mp->m_sb);
  771. mp->m_sb.sb_uquotino = NULLFSINO;
  772. mp->m_sb.sb_gquotino = NULLFSINO;
  773. mp->m_sb.sb_pquotino = NULLFSINO;
  774. /* qflags will get updated fully _after_ quotacheck */
  775. mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
  776. }
  777. if (flags & XFS_QMOPT_UQUOTA)
  778. mp->m_sb.sb_uquotino = (*ip)->i_ino;
  779. else if (flags & XFS_QMOPT_GQUOTA)
  780. mp->m_sb.sb_gquotino = (*ip)->i_ino;
  781. else
  782. mp->m_sb.sb_pquotino = (*ip)->i_ino;
  783. spin_unlock(&mp->m_sb_lock);
  784. xfs_mod_sb(tp, sbfields);
  785. if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
  786. xfs_alert(mp, "%s failed (error %d)!", __func__, error);
  787. return error;
  788. }
  789. return 0;
  790. }
  791. STATIC void
  792. xfs_qm_reset_dqcounts(
  793. xfs_mount_t *mp,
  794. xfs_buf_t *bp,
  795. xfs_dqid_t id,
  796. uint type)
  797. {
  798. struct xfs_dqblk *dqb;
  799. int j;
  800. trace_xfs_reset_dqcounts(bp, _RET_IP_);
  801. /*
  802. * Reset all counters and timers. They'll be
  803. * started afresh by xfs_qm_quotacheck.
  804. */
  805. #ifdef DEBUG
  806. j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
  807. do_div(j, sizeof(xfs_dqblk_t));
  808. ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
  809. #endif
  810. dqb = bp->b_addr;
  811. for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
  812. struct xfs_disk_dquot *ddq;
  813. ddq = (struct xfs_disk_dquot *)&dqb[j];
  814. /*
  815. * Do a sanity check, and if needed, repair the dqblk. Don't
  816. * output any warnings because it's perfectly possible to
  817. * find uninitialised dquot blks. See comment in xfs_dqcheck.
  818. */
  819. xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
  820. "xfs_quotacheck");
  821. ddq->d_bcount = 0;
  822. ddq->d_icount = 0;
  823. ddq->d_rtbcount = 0;
  824. ddq->d_btimer = 0;
  825. ddq->d_itimer = 0;
  826. ddq->d_rtbtimer = 0;
  827. ddq->d_bwarns = 0;
  828. ddq->d_iwarns = 0;
  829. ddq->d_rtbwarns = 0;
  830. if (xfs_sb_version_hascrc(&mp->m_sb)) {
  831. xfs_update_cksum((char *)&dqb[j],
  832. sizeof(struct xfs_dqblk),
  833. XFS_DQUOT_CRC_OFF);
  834. }
  835. }
  836. }
  837. STATIC int
  838. xfs_qm_dqiter_bufs(
  839. struct xfs_mount *mp,
  840. xfs_dqid_t firstid,
  841. xfs_fsblock_t bno,
  842. xfs_filblks_t blkcnt,
  843. uint flags,
  844. struct list_head *buffer_list)
  845. {
  846. struct xfs_buf *bp;
  847. int error;
  848. int type;
  849. ASSERT(blkcnt > 0);
  850. type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
  851. (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
  852. error = 0;
  853. /*
  854. * Blkcnt arg can be a very big number, and might even be
  855. * larger than the log itself. So, we have to break it up into
  856. * manageable-sized transactions.
  857. * Note that we don't start a permanent transaction here; we might
  858. * not be able to get a log reservation for the whole thing up front,
  859. * and we don't really care to either, because we just discard
  860. * everything if we were to crash in the middle of this loop.
  861. */
  862. while (blkcnt--) {
  863. error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
  864. XFS_FSB_TO_DADDR(mp, bno),
  865. mp->m_quotainfo->qi_dqchunklen, 0, &bp,
  866. &xfs_dquot_buf_ops);
  867. /*
  868. * CRC and validation errors will return a EFSCORRUPTED here. If
  869. * this occurs, re-read without CRC validation so that we can
  870. * repair the damage via xfs_qm_reset_dqcounts(). This process
  871. * will leave a trace in the log indicating corruption has
  872. * been detected.
  873. */
  874. if (error == EFSCORRUPTED) {
  875. error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
  876. XFS_FSB_TO_DADDR(mp, bno),
  877. mp->m_quotainfo->qi_dqchunklen, 0, &bp,
  878. NULL);
  879. }
  880. if (error)
  881. break;
  882. xfs_qm_reset_dqcounts(mp, bp, firstid, type);
  883. xfs_buf_delwri_queue(bp, buffer_list);
  884. xfs_buf_relse(bp);
  885. /* goto the next block. */
  886. bno++;
  887. firstid += mp->m_quotainfo->qi_dqperchunk;
  888. }
  889. return error;
  890. }
  891. /*
  892. * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
  893. * caller supplied function for every chunk of dquots that we find.
  894. */
  895. STATIC int
  896. xfs_qm_dqiterate(
  897. struct xfs_mount *mp,
  898. struct xfs_inode *qip,
  899. uint flags,
  900. struct list_head *buffer_list)
  901. {
  902. struct xfs_bmbt_irec *map;
  903. int i, nmaps; /* number of map entries */
  904. int error; /* return value */
  905. xfs_fileoff_t lblkno;
  906. xfs_filblks_t maxlblkcnt;
  907. xfs_dqid_t firstid;
  908. xfs_fsblock_t rablkno;
  909. xfs_filblks_t rablkcnt;
  910. error = 0;
  911. /*
  912. * This looks racy, but we can't keep an inode lock across a
  913. * trans_reserve. But, this gets called during quotacheck, and that
  914. * happens only at mount time which is single threaded.
  915. */
  916. if (qip->i_d.di_nblocks == 0)
  917. return 0;
  918. map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
  919. lblkno = 0;
  920. maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
  921. do {
  922. uint lock_mode;
  923. nmaps = XFS_DQITER_MAP_SIZE;
  924. /*
  925. * We aren't changing the inode itself. Just changing
  926. * some of its data. No new blocks are added here, and
  927. * the inode is never added to the transaction.
  928. */
  929. lock_mode = xfs_ilock_data_map_shared(qip);
  930. error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
  931. map, &nmaps, 0);
  932. xfs_iunlock(qip, lock_mode);
  933. if (error)
  934. break;
  935. ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
  936. for (i = 0; i < nmaps; i++) {
  937. ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
  938. ASSERT(map[i].br_blockcount);
  939. lblkno += map[i].br_blockcount;
  940. if (map[i].br_startblock == HOLESTARTBLOCK)
  941. continue;
  942. firstid = (xfs_dqid_t) map[i].br_startoff *
  943. mp->m_quotainfo->qi_dqperchunk;
  944. /*
  945. * Do a read-ahead on the next extent.
  946. */
  947. if ((i+1 < nmaps) &&
  948. (map[i+1].br_startblock != HOLESTARTBLOCK)) {
  949. rablkcnt = map[i+1].br_blockcount;
  950. rablkno = map[i+1].br_startblock;
  951. while (rablkcnt--) {
  952. xfs_buf_readahead(mp->m_ddev_targp,
  953. XFS_FSB_TO_DADDR(mp, rablkno),
  954. mp->m_quotainfo->qi_dqchunklen,
  955. NULL);
  956. rablkno++;
  957. }
  958. }
  959. /*
  960. * Iterate thru all the blks in the extent and
  961. * reset the counters of all the dquots inside them.
  962. */
  963. error = xfs_qm_dqiter_bufs(mp, firstid,
  964. map[i].br_startblock,
  965. map[i].br_blockcount,
  966. flags, buffer_list);
  967. if (error)
  968. goto out;
  969. }
  970. } while (nmaps > 0);
  971. out:
  972. kmem_free(map);
  973. return error;
  974. }
  975. /*
  976. * Called by dqusage_adjust in doing a quotacheck.
  977. *
  978. * Given the inode, and a dquot id this updates both the incore dqout as well
  979. * as the buffer copy. This is so that once the quotacheck is done, we can
  980. * just log all the buffers, as opposed to logging numerous updates to
  981. * individual dquots.
  982. */
  983. STATIC int
  984. xfs_qm_quotacheck_dqadjust(
  985. struct xfs_inode *ip,
  986. xfs_dqid_t id,
  987. uint type,
  988. xfs_qcnt_t nblks,
  989. xfs_qcnt_t rtblks)
  990. {
  991. struct xfs_mount *mp = ip->i_mount;
  992. struct xfs_dquot *dqp;
  993. int error;
  994. error = xfs_qm_dqget(mp, ip, id, type,
  995. XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
  996. if (error) {
  997. /*
  998. * Shouldn't be able to turn off quotas here.
  999. */
  1000. ASSERT(error != ESRCH);
  1001. ASSERT(error != ENOENT);
  1002. return error;
  1003. }
  1004. trace_xfs_dqadjust(dqp);
  1005. /*
  1006. * Adjust the inode count and the block count to reflect this inode's
  1007. * resource usage.
  1008. */
  1009. be64_add_cpu(&dqp->q_core.d_icount, 1);
  1010. dqp->q_res_icount++;
  1011. if (nblks) {
  1012. be64_add_cpu(&dqp->q_core.d_bcount, nblks);
  1013. dqp->q_res_bcount += nblks;
  1014. }
  1015. if (rtblks) {
  1016. be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
  1017. dqp->q_res_rtbcount += rtblks;
  1018. }
  1019. /*
  1020. * Set default limits, adjust timers (since we changed usages)
  1021. *
  1022. * There are no timers for the default values set in the root dquot.
  1023. */
  1024. if (dqp->q_core.d_id) {
  1025. xfs_qm_adjust_dqlimits(mp, dqp);
  1026. xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
  1027. }
  1028. dqp->dq_flags |= XFS_DQ_DIRTY;
  1029. xfs_qm_dqput(dqp);
  1030. return 0;
  1031. }
  1032. STATIC int
  1033. xfs_qm_get_rtblks(
  1034. xfs_inode_t *ip,
  1035. xfs_qcnt_t *O_rtblks)
  1036. {
  1037. xfs_filblks_t rtblks; /* total rt blks */
  1038. xfs_extnum_t idx; /* extent record index */
  1039. xfs_ifork_t *ifp; /* inode fork pointer */
  1040. xfs_extnum_t nextents; /* number of extent entries */
  1041. int error;
  1042. ASSERT(XFS_IS_REALTIME_INODE(ip));
  1043. ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
  1044. if (!(ifp->if_flags & XFS_IFEXTENTS)) {
  1045. if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK)))
  1046. return error;
  1047. }
  1048. rtblks = 0;
  1049. nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
  1050. for (idx = 0; idx < nextents; idx++)
  1051. rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));
  1052. *O_rtblks = (xfs_qcnt_t)rtblks;
  1053. return 0;
  1054. }
  1055. /*
  1056. * callback routine supplied to bulkstat(). Given an inumber, find its
  1057. * dquots and update them to account for resources taken by that inode.
  1058. */
  1059. /* ARGSUSED */
  1060. STATIC int
  1061. xfs_qm_dqusage_adjust(
  1062. xfs_mount_t *mp, /* mount point for filesystem */
  1063. xfs_ino_t ino, /* inode number to get data for */
  1064. void __user *buffer, /* not used */
  1065. int ubsize, /* not used */
  1066. int *ubused, /* not used */
  1067. int *res) /* result code value */
  1068. {
  1069. xfs_inode_t *ip;
  1070. xfs_qcnt_t nblks, rtblks = 0;
  1071. int error;
  1072. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  1073. /*
  1074. * rootino must have its resources accounted for, not so with the quota
  1075. * inodes.
  1076. */
  1077. if (xfs_is_quota_inode(&mp->m_sb, ino)) {
  1078. *res = BULKSTAT_RV_NOTHING;
  1079. return XFS_ERROR(EINVAL);
  1080. }
  1081. /*
  1082. * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
  1083. * interface expects the inode to be exclusively locked because that's
  1084. * the case in all other instances. It's OK that we do this because
  1085. * quotacheck is done only at mount time.
  1086. */
  1087. error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
  1088. if (error) {
  1089. *res = BULKSTAT_RV_NOTHING;
  1090. return error;
  1091. }
  1092. ASSERT(ip->i_delayed_blks == 0);
  1093. if (XFS_IS_REALTIME_INODE(ip)) {
  1094. /*
  1095. * Walk thru the extent list and count the realtime blocks.
  1096. */
  1097. error = xfs_qm_get_rtblks(ip, &rtblks);
  1098. if (error)
  1099. goto error0;
  1100. }
  1101. nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
  1102. /*
  1103. * Add the (disk blocks and inode) resources occupied by this
  1104. * inode to its dquots. We do this adjustment in the incore dquot,
  1105. * and also copy the changes to its buffer.
  1106. * We don't care about putting these changes in a transaction
  1107. * envelope because if we crash in the middle of a 'quotacheck'
  1108. * we have to start from the beginning anyway.
  1109. * Once we're done, we'll log all the dquot bufs.
  1110. *
  1111. * The *QUOTA_ON checks below may look pretty racy, but quotachecks
  1112. * and quotaoffs don't race. (Quotachecks happen at mount time only).
  1113. */
  1114. if (XFS_IS_UQUOTA_ON(mp)) {
  1115. error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid,
  1116. XFS_DQ_USER, nblks, rtblks);
  1117. if (error)
  1118. goto error0;
  1119. }
  1120. if (XFS_IS_GQUOTA_ON(mp)) {
  1121. error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid,
  1122. XFS_DQ_GROUP, nblks, rtblks);
  1123. if (error)
  1124. goto error0;
  1125. }
  1126. if (XFS_IS_PQUOTA_ON(mp)) {
  1127. error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip),
  1128. XFS_DQ_PROJ, nblks, rtblks);
  1129. if (error)
  1130. goto error0;
  1131. }
  1132. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1133. IRELE(ip);
  1134. *res = BULKSTAT_RV_DIDONE;
  1135. return 0;
  1136. error0:
  1137. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1138. IRELE(ip);
  1139. *res = BULKSTAT_RV_GIVEUP;
  1140. return error;
  1141. }
  1142. STATIC int
  1143. xfs_qm_flush_one(
  1144. struct xfs_dquot *dqp,
  1145. void *data)
  1146. {
  1147. struct list_head *buffer_list = data;
  1148. struct xfs_buf *bp = NULL;
  1149. int error = 0;
  1150. xfs_dqlock(dqp);
  1151. if (dqp->dq_flags & XFS_DQ_FREEING)
  1152. goto out_unlock;
  1153. if (!XFS_DQ_IS_DIRTY(dqp))
  1154. goto out_unlock;
  1155. xfs_dqflock(dqp);
  1156. error = xfs_qm_dqflush(dqp, &bp);
  1157. if (error)
  1158. goto out_unlock;
  1159. xfs_buf_delwri_queue(bp, buffer_list);
  1160. xfs_buf_relse(bp);
  1161. out_unlock:
  1162. xfs_dqunlock(dqp);
  1163. return error;
  1164. }
  1165. /*
  1166. * Walk thru all the filesystem inodes and construct a consistent view
  1167. * of the disk quota world. If the quotacheck fails, disable quotas.
  1168. */
  1169. int
  1170. xfs_qm_quotacheck(
  1171. xfs_mount_t *mp)
  1172. {
  1173. int done, count, error, error2;
  1174. xfs_ino_t lastino;
  1175. size_t structsz;
  1176. uint flags;
  1177. LIST_HEAD (buffer_list);
  1178. struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
  1179. struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
  1180. struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
  1181. count = INT_MAX;
  1182. structsz = 1;
  1183. lastino = 0;
  1184. flags = 0;
  1185. ASSERT(uip || gip || pip);
  1186. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  1187. xfs_notice(mp, "Quotacheck needed: Please wait.");
  1188. /*
  1189. * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
  1190. * their counters to zero. We need a clean slate.
  1191. * We don't log our changes till later.
  1192. */
  1193. if (uip) {
  1194. error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA,
  1195. &buffer_list);
  1196. if (error)
  1197. goto error_return;
  1198. flags |= XFS_UQUOTA_CHKD;
  1199. }
  1200. if (gip) {
  1201. error = xfs_qm_dqiterate(mp, gip, XFS_QMOPT_GQUOTA,
  1202. &buffer_list);
  1203. if (error)
  1204. goto error_return;
  1205. flags |= XFS_GQUOTA_CHKD;
  1206. }
  1207. if (pip) {
  1208. error = xfs_qm_dqiterate(mp, pip, XFS_QMOPT_PQUOTA,
  1209. &buffer_list);
  1210. if (error)
  1211. goto error_return;
  1212. flags |= XFS_PQUOTA_CHKD;
  1213. }
  1214. do {
  1215. /*
  1216. * Iterate thru all the inodes in the file system,
  1217. * adjusting the corresponding dquot counters in core.
  1218. */
  1219. error = xfs_bulkstat(mp, &lastino, &count,
  1220. xfs_qm_dqusage_adjust,
  1221. structsz, NULL, &done);
  1222. if (error)
  1223. break;
  1224. } while (!done);
  1225. /*
  1226. * We've made all the changes that we need to make incore. Flush them
  1227. * down to disk buffers if everything was updated successfully.
  1228. */
  1229. if (XFS_IS_UQUOTA_ON(mp)) {
  1230. error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
  1231. &buffer_list);
  1232. }
  1233. if (XFS_IS_GQUOTA_ON(mp)) {
  1234. error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
  1235. &buffer_list);
  1236. if (!error)
  1237. error = error2;
  1238. }
  1239. if (XFS_IS_PQUOTA_ON(mp)) {
  1240. error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
  1241. &buffer_list);
  1242. if (!error)
  1243. error = error2;
  1244. }
  1245. error2 = xfs_buf_delwri_submit(&buffer_list);
  1246. if (!error)
  1247. error = error2;
  1248. /*
  1249. * We can get this error if we couldn't do a dquot allocation inside
  1250. * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
  1251. * dirty dquots that might be cached, we just want to get rid of them
  1252. * and turn quotaoff. The dquots won't be attached to any of the inodes
  1253. * at this point (because we intentionally didn't in dqget_noattach).
  1254. */
  1255. if (error) {
  1256. xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
  1257. goto error_return;
  1258. }
  1259. /*
  1260. * If one type of quotas is off, then it will lose its
  1261. * quotachecked status, since we won't be doing accounting for
  1262. * that type anymore.
  1263. */
  1264. mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
  1265. mp->m_qflags |= flags;
  1266. error_return:
  1267. while (!list_empty(&buffer_list)) {
  1268. struct xfs_buf *bp =
  1269. list_first_entry(&buffer_list, struct xfs_buf, b_list);
  1270. list_del_init(&bp->b_list);
  1271. xfs_buf_relse(bp);
  1272. }
  1273. if (error) {
  1274. xfs_warn(mp,
  1275. "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
  1276. error);
  1277. /*
  1278. * We must turn off quotas.
  1279. */
  1280. ASSERT(mp->m_quotainfo != NULL);
  1281. xfs_qm_destroy_quotainfo(mp);
  1282. if (xfs_mount_reset_sbqflags(mp)) {
  1283. xfs_warn(mp,
  1284. "Quotacheck: Failed to reset quota flags.");
  1285. }
  1286. } else
  1287. xfs_notice(mp, "Quotacheck: Done.");
  1288. return (error);
  1289. }
  1290. /*
  1291. * This is called after the superblock has been read in and we're ready to
  1292. * iget the quota inodes.
  1293. */
  1294. STATIC int
  1295. xfs_qm_init_quotainos(
  1296. xfs_mount_t *mp)
  1297. {
  1298. struct xfs_inode *uip = NULL;
  1299. struct xfs_inode *gip = NULL;
  1300. struct xfs_inode *pip = NULL;
  1301. int error;
  1302. __int64_t sbflags = 0;
  1303. uint flags = 0;
  1304. ASSERT(mp->m_quotainfo);
  1305. /*
  1306. * Get the uquota and gquota inodes
  1307. */
  1308. if (xfs_sb_version_hasquota(&mp->m_sb)) {
  1309. if (XFS_IS_UQUOTA_ON(mp) &&
  1310. mp->m_sb.sb_uquotino != NULLFSINO) {
  1311. ASSERT(mp->m_sb.sb_uquotino > 0);
  1312. error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
  1313. 0, 0, &uip);
  1314. if (error)
  1315. return XFS_ERROR(error);
  1316. }
  1317. if (XFS_IS_GQUOTA_ON(mp) &&
  1318. mp->m_sb.sb_gquotino != NULLFSINO) {
  1319. ASSERT(mp->m_sb.sb_gquotino > 0);
  1320. error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
  1321. 0, 0, &gip);
  1322. if (error)
  1323. goto error_rele;
  1324. }
  1325. if (XFS_IS_PQUOTA_ON(mp) &&
  1326. mp->m_sb.sb_pquotino != NULLFSINO) {
  1327. ASSERT(mp->m_sb.sb_pquotino > 0);
  1328. error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
  1329. 0, 0, &pip);
  1330. if (error)
  1331. goto error_rele;
  1332. }
  1333. } else {
  1334. flags |= XFS_QMOPT_SBVERSION;
  1335. sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
  1336. XFS_SB_GQUOTINO | XFS_SB_PQUOTINO |
  1337. XFS_SB_QFLAGS);
  1338. }
  1339. /*
  1340. * Create the three inodes, if they don't exist already. The changes
  1341. * made above will get added to a transaction and logged in one of
  1342. * the qino_alloc calls below. If the device is readonly,
  1343. * temporarily switch to read-write to do this.
  1344. */
  1345. if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
  1346. error = xfs_qm_qino_alloc(mp, &uip,
  1347. sbflags | XFS_SB_UQUOTINO,
  1348. flags | XFS_QMOPT_UQUOTA);
  1349. if (error)
  1350. goto error_rele;
  1351. flags &= ~XFS_QMOPT_SBVERSION;
  1352. }
  1353. if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
  1354. error = xfs_qm_qino_alloc(mp, &gip,
  1355. sbflags | XFS_SB_GQUOTINO,
  1356. flags | XFS_QMOPT_GQUOTA);
  1357. if (error)
  1358. goto error_rele;
  1359. flags &= ~XFS_QMOPT_SBVERSION;
  1360. }
  1361. if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
  1362. error = xfs_qm_qino_alloc(mp, &pip,
  1363. sbflags | XFS_SB_PQUOTINO,
  1364. flags | XFS_QMOPT_PQUOTA);
  1365. if (error)
  1366. goto error_rele;
  1367. }
  1368. mp->m_quotainfo->qi_uquotaip = uip;
  1369. mp->m_quotainfo->qi_gquotaip = gip;
  1370. mp->m_quotainfo->qi_pquotaip = pip;
  1371. return 0;
  1372. error_rele:
  1373. if (uip)
  1374. IRELE(uip);
  1375. if (gip)
  1376. IRELE(gip);
  1377. if (pip)
  1378. IRELE(pip);
  1379. return XFS_ERROR(error);
  1380. }
  1381. STATIC void
  1382. xfs_qm_dqfree_one(
  1383. struct xfs_dquot *dqp)
  1384. {
  1385. struct xfs_mount *mp = dqp->q_mount;
  1386. struct xfs_quotainfo *qi = mp->m_quotainfo;
  1387. mutex_lock(&qi->qi_tree_lock);
  1388. radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
  1389. be32_to_cpu(dqp->q_core.d_id));
  1390. qi->qi_dquots--;
  1391. mutex_unlock(&qi->qi_tree_lock);
  1392. xfs_qm_dqdestroy(dqp);
  1393. }
  1394. /*
  1395. * Start a transaction and write the incore superblock changes to
  1396. * disk. flags parameter indicates which fields have changed.
  1397. */
  1398. int
  1399. xfs_qm_write_sb_changes(
  1400. xfs_mount_t *mp,
  1401. __int64_t flags)
  1402. {
  1403. xfs_trans_t *tp;
  1404. int error;
  1405. tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
  1406. error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0);
  1407. if (error) {
  1408. xfs_trans_cancel(tp, 0);
  1409. return error;
  1410. }
  1411. xfs_mod_sb(tp, flags);
  1412. error = xfs_trans_commit(tp, 0);
  1413. return error;
  1414. }
  1415. /* --------------- utility functions for vnodeops ---------------- */
  1416. /*
  1417. * Given an inode, a uid, gid and prid make sure that we have
  1418. * allocated relevant dquot(s) on disk, and that we won't exceed inode
  1419. * quotas by creating this file.
  1420. * This also attaches dquot(s) to the given inode after locking it,
  1421. * and returns the dquots corresponding to the uid and/or gid.
  1422. *
  1423. * in : inode (unlocked)
  1424. * out : udquot, gdquot with references taken and unlocked
  1425. */
  1426. int
  1427. xfs_qm_vop_dqalloc(
  1428. struct xfs_inode *ip,
  1429. xfs_dqid_t uid,
  1430. xfs_dqid_t gid,
  1431. prid_t prid,
  1432. uint flags,
  1433. struct xfs_dquot **O_udqpp,
  1434. struct xfs_dquot **O_gdqpp,
  1435. struct xfs_dquot **O_pdqpp)
  1436. {
  1437. struct xfs_mount *mp = ip->i_mount;
  1438. struct xfs_dquot *uq = NULL;
  1439. struct xfs_dquot *gq = NULL;
  1440. struct xfs_dquot *pq = NULL;
  1441. int error;
  1442. uint lockflags;
  1443. if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
  1444. return 0;
  1445. lockflags = XFS_ILOCK_EXCL;
  1446. xfs_ilock(ip, lockflags);
  1447. if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
  1448. gid = ip->i_d.di_gid;
  1449. /*
  1450. * Attach the dquot(s) to this inode, doing a dquot allocation
  1451. * if necessary. The dquot(s) will not be locked.
  1452. */
  1453. if (XFS_NOT_DQATTACHED(mp, ip)) {
  1454. error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
  1455. if (error) {
  1456. xfs_iunlock(ip, lockflags);
  1457. return error;
  1458. }
  1459. }
  1460. if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
  1461. if (ip->i_d.di_uid != uid) {
  1462. /*
  1463. * What we need is the dquot that has this uid, and
  1464. * if we send the inode to dqget, the uid of the inode
  1465. * takes priority over what's sent in the uid argument.
  1466. * We must unlock inode here before calling dqget if
  1467. * we're not sending the inode, because otherwise
  1468. * we'll deadlock by doing trans_reserve while
  1469. * holding ilock.
  1470. */
  1471. xfs_iunlock(ip, lockflags);
  1472. error = xfs_qm_dqget(mp, NULL, uid,
  1473. XFS_DQ_USER,
  1474. XFS_QMOPT_DQALLOC |
  1475. XFS_QMOPT_DOWARN,
  1476. &uq);
  1477. if (error) {
  1478. ASSERT(error != ENOENT);
  1479. return error;
  1480. }
  1481. /*
  1482. * Get the ilock in the right order.
  1483. */
  1484. xfs_dqunlock(uq);
  1485. lockflags = XFS_ILOCK_SHARED;
  1486. xfs_ilock(ip, lockflags);
  1487. } else {
  1488. /*
  1489. * Take an extra reference, because we'll return
  1490. * this to caller
  1491. */
  1492. ASSERT(ip->i_udquot);
  1493. uq = xfs_qm_dqhold(ip->i_udquot);
  1494. }
  1495. }
  1496. if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
  1497. if (ip->i_d.di_gid != gid) {
  1498. xfs_iunlock(ip, lockflags);
  1499. error = xfs_qm_dqget(mp, NULL, gid,
  1500. XFS_DQ_GROUP,
  1501. XFS_QMOPT_DQALLOC |
  1502. XFS_QMOPT_DOWARN,
  1503. &gq);
  1504. if (error) {
  1505. ASSERT(error != ENOENT);
  1506. goto error_rele;
  1507. }
  1508. xfs_dqunlock(gq);
  1509. lockflags = XFS_ILOCK_SHARED;
  1510. xfs_ilock(ip, lockflags);
  1511. } else {
  1512. ASSERT(ip->i_gdquot);
  1513. gq = xfs_qm_dqhold(ip->i_gdquot);
  1514. }
  1515. }
  1516. if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
  1517. if (xfs_get_projid(ip) != prid) {
  1518. xfs_iunlock(ip, lockflags);
  1519. error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
  1520. XFS_DQ_PROJ,
  1521. XFS_QMOPT_DQALLOC |
  1522. XFS_QMOPT_DOWARN,
  1523. &pq);
  1524. if (error) {
  1525. ASSERT(error != ENOENT);
  1526. goto error_rele;
  1527. }
  1528. xfs_dqunlock(pq);
  1529. lockflags = XFS_ILOCK_SHARED;
  1530. xfs_ilock(ip, lockflags);
  1531. } else {
  1532. ASSERT(ip->i_pdquot);
  1533. pq = xfs_qm_dqhold(ip->i_pdquot);
  1534. }
  1535. }
  1536. if (uq)
  1537. trace_xfs_dquot_dqalloc(ip);
  1538. xfs_iunlock(ip, lockflags);
  1539. if (O_udqpp)
  1540. *O_udqpp = uq;
  1541. else if (uq)
  1542. xfs_qm_dqrele(uq);
  1543. if (O_gdqpp)
  1544. *O_gdqpp = gq;
  1545. else if (gq)
  1546. xfs_qm_dqrele(gq);
  1547. if (O_pdqpp)
  1548. *O_pdqpp = pq;
  1549. else if (pq)
  1550. xfs_qm_dqrele(pq);
  1551. return 0;
  1552. error_rele:
  1553. if (gq)
  1554. xfs_qm_dqrele(gq);
  1555. if (uq)
  1556. xfs_qm_dqrele(uq);
  1557. return error;
  1558. }
  1559. /*
  1560. * Actually transfer ownership, and do dquot modifications.
  1561. * These were already reserved.
  1562. */
  1563. xfs_dquot_t *
  1564. xfs_qm_vop_chown(
  1565. xfs_trans_t *tp,
  1566. xfs_inode_t *ip,
  1567. xfs_dquot_t **IO_olddq,
  1568. xfs_dquot_t *newdq)
  1569. {
  1570. xfs_dquot_t *prevdq;
  1571. uint bfield = XFS_IS_REALTIME_INODE(ip) ?
  1572. XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
  1573. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  1574. ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
  1575. /* old dquot */
  1576. prevdq = *IO_olddq;
  1577. ASSERT(prevdq);
  1578. ASSERT(prevdq != newdq);
  1579. xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
  1580. xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
  1581. /* the sparkling new dquot */
  1582. xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
  1583. xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
  1584. /*
  1585. * Take an extra reference, because the inode is going to keep
  1586. * this dquot pointer even after the trans_commit.
  1587. */
  1588. *IO_olddq = xfs_qm_dqhold(newdq);
  1589. return prevdq;
  1590. }
  1591. /*
  1592. * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
  1593. */
  1594. int
  1595. xfs_qm_vop_chown_reserve(
  1596. struct xfs_trans *tp,
  1597. struct xfs_inode *ip,
  1598. struct xfs_dquot *udqp,
  1599. struct xfs_dquot *gdqp,
  1600. struct xfs_dquot *pdqp,
  1601. uint flags)
  1602. {
  1603. struct xfs_mount *mp = ip->i_mount;
  1604. uint delblks, blkflags, prjflags = 0;
  1605. struct xfs_dquot *udq_unres = NULL;
  1606. struct xfs_dquot *gdq_unres = NULL;
  1607. struct xfs_dquot *pdq_unres = NULL;
  1608. struct xfs_dquot *udq_delblks = NULL;
  1609. struct xfs_dquot *gdq_delblks = NULL;
  1610. struct xfs_dquot *pdq_delblks = NULL;
  1611. int error;
  1612. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
  1613. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  1614. delblks = ip->i_delayed_blks;
  1615. blkflags = XFS_IS_REALTIME_INODE(ip) ?
  1616. XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
  1617. if (XFS_IS_UQUOTA_ON(mp) && udqp &&
  1618. ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) {
  1619. udq_delblks = udqp;
  1620. /*
  1621. * If there are delayed allocation blocks, then we have to
  1622. * unreserve those from the old dquot, and add them to the
  1623. * new dquot.
  1624. */
  1625. if (delblks) {
  1626. ASSERT(ip->i_udquot);
  1627. udq_unres = ip->i_udquot;
  1628. }
  1629. }
  1630. if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
  1631. ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) {
  1632. gdq_delblks = gdqp;
  1633. if (delblks) {
  1634. ASSERT(ip->i_gdquot);
  1635. gdq_unres = ip->i_gdquot;
  1636. }
  1637. }
  1638. if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
  1639. xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) {
  1640. prjflags = XFS_QMOPT_ENOSPC;
  1641. pdq_delblks = pdqp;
  1642. if (delblks) {
  1643. ASSERT(ip->i_pdquot);
  1644. pdq_unres = ip->i_pdquot;
  1645. }
  1646. }
  1647. error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
  1648. udq_delblks, gdq_delblks, pdq_delblks,
  1649. ip->i_d.di_nblocks, 1,
  1650. flags | blkflags | prjflags);
  1651. if (error)
  1652. return error;
  1653. /*
  1654. * Do the delayed blks reservations/unreservations now. Since, these
  1655. * are done without the help of a transaction, if a reservation fails
  1656. * its previous reservations won't be automatically undone by trans
  1657. * code. So, we have to do it manually here.
  1658. */
  1659. if (delblks) {
  1660. /*
  1661. * Do the reservations first. Unreservation can't fail.
  1662. */
  1663. ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
  1664. ASSERT(udq_unres || gdq_unres || pdq_unres);
  1665. error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
  1666. udq_delblks, gdq_delblks, pdq_delblks,
  1667. (xfs_qcnt_t)delblks, 0,
  1668. flags | blkflags | prjflags);
  1669. if (error)
  1670. return error;
  1671. xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
  1672. udq_unres, gdq_unres, pdq_unres,
  1673. -((xfs_qcnt_t)delblks), 0, blkflags);
  1674. }
  1675. return (0);
  1676. }
  1677. int
  1678. xfs_qm_vop_rename_dqattach(
  1679. struct xfs_inode **i_tab)
  1680. {
  1681. struct xfs_mount *mp = i_tab[0]->i_mount;
  1682. int i;
  1683. if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
  1684. return 0;
  1685. for (i = 0; (i < 4 && i_tab[i]); i++) {
  1686. struct xfs_inode *ip = i_tab[i];
  1687. int error;
  1688. /*
  1689. * Watch out for duplicate entries in the table.
  1690. */
  1691. if (i == 0 || ip != i_tab[i-1]) {
  1692. if (XFS_NOT_DQATTACHED(mp, ip)) {
  1693. error = xfs_qm_dqattach(ip, 0);
  1694. if (error)
  1695. return error;
  1696. }
  1697. }
  1698. }
  1699. return 0;
  1700. }
  1701. void
  1702. xfs_qm_vop_create_dqattach(
  1703. struct xfs_trans *tp,
  1704. struct xfs_inode *ip,
  1705. struct xfs_dquot *udqp,
  1706. struct xfs_dquot *gdqp,
  1707. struct xfs_dquot *pdqp)
  1708. {
  1709. struct xfs_mount *mp = tp->t_mountp;
  1710. if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
  1711. return;
  1712. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  1713. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  1714. if (udqp && XFS_IS_UQUOTA_ON(mp)) {
  1715. ASSERT(ip->i_udquot == NULL);
  1716. ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
  1717. ip->i_udquot = xfs_qm_dqhold(udqp);
  1718. xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
  1719. }
  1720. if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
  1721. ASSERT(ip->i_gdquot == NULL);
  1722. ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
  1723. ip->i_gdquot = xfs_qm_dqhold(gdqp);
  1724. xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
  1725. }
  1726. if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
  1727. ASSERT(ip->i_pdquot == NULL);
  1728. ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
  1729. ip->i_pdquot = xfs_qm_dqhold(pdqp);
  1730. xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
  1731. }
  1732. }