quota.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. /*
  10. * Quota change tags are associated with each transaction that allocates or
  11. * deallocates space. Those changes are accumulated locally to each node (in a
  12. * per-node file) and then are periodically synced to the quota file. This
  13. * avoids the bottleneck of constantly touching the quota file, but introduces
  14. * fuzziness in the current usage value of IDs that are being used on different
  15. * nodes in the cluster simultaneously. So, it is possible for a user on
  16. * multiple nodes to overrun their quota, but that overrun is controlable.
  17. * Since quota tags are part of transactions, there is no need for a quota check
  18. * program to be run on node crashes or anything like that.
  19. *
  20. * There are couple of knobs that let the administrator manage the quota
  21. * fuzziness. "quota_quantum" sets the maximum time a quota change can be
  22. * sitting on one node before being synced to the quota file. (The default is
  23. * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
  24. * of quota file syncs increases as the user moves closer to their limit. The
  25. * more frequent the syncs, the more accurate the quota enforcement, but that
  26. * means that there is more contention between the nodes for the quota file.
  27. * The default value is one. This sets the maximum theoretical quota overrun
  28. * (with infinite node with infinite bandwidth) to twice the user's limit. (In
  29. * practice, the maximum overrun you see should be much less.) A "quota_scale"
  30. * number greater than one makes quota syncs more frequent and reduces the
  31. * maximum overrun. Numbers less than one (but greater than zero) make quota
  32. * syncs less frequent.
  33. *
  34. * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
  35. * the quota file, so it is not being constantly read.
  36. */
  37. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  38. #include <linux/sched.h>
  39. #include <linux/slab.h>
  40. #include <linux/mm.h>
  41. #include <linux/spinlock.h>
  42. #include <linux/completion.h>
  43. #include <linux/buffer_head.h>
  44. #include <linux/sort.h>
  45. #include <linux/fs.h>
  46. #include <linux/bio.h>
  47. #include <linux/gfs2_ondisk.h>
  48. #include <linux/kthread.h>
  49. #include <linux/freezer.h>
  50. #include <linux/quota.h>
  51. #include <linux/dqblk_xfs.h>
  52. #include <linux/lockref.h>
  53. #include <linux/list_lru.h>
  54. #include <linux/rcupdate.h>
  55. #include <linux/rculist_bl.h>
  56. #include <linux/bit_spinlock.h>
  57. #include <linux/jhash.h>
  58. #include <linux/vmalloc.h>
  59. #include "gfs2.h"
  60. #include "incore.h"
  61. #include "bmap.h"
  62. #include "glock.h"
  63. #include "glops.h"
  64. #include "log.h"
  65. #include "meta_io.h"
  66. #include "quota.h"
  67. #include "rgrp.h"
  68. #include "super.h"
  69. #include "trans.h"
  70. #include "inode.h"
  71. #include "util.h"
  72. #define GFS2_QD_HASH_SHIFT 12
  73. #define GFS2_QD_HASH_SIZE (1 << GFS2_QD_HASH_SHIFT)
  74. #define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1)
  75. /* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
  76. /* -> sd_bitmap_lock */
  77. static DEFINE_SPINLOCK(qd_lock);
  78. struct list_lru gfs2_qd_lru;
  79. static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];
  80. static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
  81. const struct kqid qid)
  82. {
  83. unsigned int h;
  84. h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
  85. h = jhash(&qid, sizeof(struct kqid), h);
  86. return h & GFS2_QD_HASH_MASK;
  87. }
  88. static inline void spin_lock_bucket(unsigned int hash)
  89. {
  90. hlist_bl_lock(&qd_hash_table[hash]);
  91. }
  92. static inline void spin_unlock_bucket(unsigned int hash)
  93. {
  94. hlist_bl_unlock(&qd_hash_table[hash]);
  95. }
  96. static void gfs2_qd_dealloc(struct rcu_head *rcu)
  97. {
  98. struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
  99. kmem_cache_free(gfs2_quotad_cachep, qd);
  100. }
  101. static void gfs2_qd_dispose(struct list_head *list)
  102. {
  103. struct gfs2_quota_data *qd;
  104. struct gfs2_sbd *sdp;
  105. while (!list_empty(list)) {
  106. qd = list_entry(list->next, struct gfs2_quota_data, qd_lru);
  107. sdp = qd->qd_gl->gl_sbd;
  108. list_del(&qd->qd_lru);
  109. /* Free from the filesystem-specific list */
  110. spin_lock(&qd_lock);
  111. list_del(&qd->qd_list);
  112. spin_unlock(&qd_lock);
  113. spin_lock_bucket(qd->qd_hash);
  114. hlist_bl_del_rcu(&qd->qd_hlist);
  115. spin_unlock_bucket(qd->qd_hash);
  116. gfs2_assert_warn(sdp, !qd->qd_change);
  117. gfs2_assert_warn(sdp, !qd->qd_slot_count);
  118. gfs2_assert_warn(sdp, !qd->qd_bh_count);
  119. gfs2_glock_put(qd->qd_gl);
  120. atomic_dec(&sdp->sd_quota_count);
  121. /* Delete it from the common reclaim list */
  122. call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
  123. }
  124. }
  125. static enum lru_status gfs2_qd_isolate(struct list_head *item, spinlock_t *lock, void *arg)
  126. {
  127. struct list_head *dispose = arg;
  128. struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
  129. if (!spin_trylock(&qd->qd_lockref.lock))
  130. return LRU_SKIP;
  131. if (qd->qd_lockref.count == 0) {
  132. lockref_mark_dead(&qd->qd_lockref);
  133. list_move(&qd->qd_lru, dispose);
  134. }
  135. spin_unlock(&qd->qd_lockref.lock);
  136. return LRU_REMOVED;
  137. }
  138. static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
  139. struct shrink_control *sc)
  140. {
  141. LIST_HEAD(dispose);
  142. unsigned long freed;
  143. if (!(sc->gfp_mask & __GFP_FS))
  144. return SHRINK_STOP;
  145. freed = list_lru_walk_node(&gfs2_qd_lru, sc->nid, gfs2_qd_isolate,
  146. &dispose, &sc->nr_to_scan);
  147. gfs2_qd_dispose(&dispose);
  148. return freed;
  149. }
  150. static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
  151. struct shrink_control *sc)
  152. {
  153. return vfs_pressure_ratio(list_lru_count_node(&gfs2_qd_lru, sc->nid));
  154. }
  155. struct shrinker gfs2_qd_shrinker = {
  156. .count_objects = gfs2_qd_shrink_count,
  157. .scan_objects = gfs2_qd_shrink_scan,
  158. .seeks = DEFAULT_SEEKS,
  159. .flags = SHRINKER_NUMA_AWARE,
  160. };
  161. static u64 qd2index(struct gfs2_quota_data *qd)
  162. {
  163. struct kqid qid = qd->qd_id;
  164. return (2 * (u64)from_kqid(&init_user_ns, qid)) +
  165. ((qid.type == USRQUOTA) ? 0 : 1);
  166. }
  167. static u64 qd2offset(struct gfs2_quota_data *qd)
  168. {
  169. u64 offset;
  170. offset = qd2index(qd);
  171. offset *= sizeof(struct gfs2_quota);
  172. return offset;
  173. }
  174. static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
  175. {
  176. struct gfs2_quota_data *qd;
  177. int error;
  178. qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
  179. if (!qd)
  180. return NULL;
  181. qd->qd_sbd = sdp;
  182. qd->qd_lockref.count = 1;
  183. spin_lock_init(&qd->qd_lockref.lock);
  184. qd->qd_id = qid;
  185. qd->qd_slot = -1;
  186. INIT_LIST_HEAD(&qd->qd_lru);
  187. qd->qd_hash = hash;
  188. error = gfs2_glock_get(sdp, qd2index(qd),
  189. &gfs2_quota_glops, CREATE, &qd->qd_gl);
  190. if (error)
  191. goto fail;
  192. return qd;
  193. fail:
  194. kmem_cache_free(gfs2_quotad_cachep, qd);
  195. return NULL;
  196. }
  197. static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
  198. const struct gfs2_sbd *sdp,
  199. struct kqid qid)
  200. {
  201. struct gfs2_quota_data *qd;
  202. struct hlist_bl_node *h;
  203. hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
  204. if (!qid_eq(qd->qd_id, qid))
  205. continue;
  206. if (qd->qd_sbd != sdp)
  207. continue;
  208. if (lockref_get_not_dead(&qd->qd_lockref)) {
  209. list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
  210. return qd;
  211. }
  212. }
  213. return NULL;
  214. }
  215. static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
  216. struct gfs2_quota_data **qdp)
  217. {
  218. struct gfs2_quota_data *qd, *new_qd;
  219. unsigned int hash = gfs2_qd_hash(sdp, qid);
  220. rcu_read_lock();
  221. *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
  222. rcu_read_unlock();
  223. if (qd)
  224. return 0;
  225. new_qd = qd_alloc(hash, sdp, qid);
  226. if (!new_qd)
  227. return -ENOMEM;
  228. spin_lock(&qd_lock);
  229. spin_lock_bucket(hash);
  230. *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
  231. if (qd == NULL) {
  232. *qdp = new_qd;
  233. list_add(&new_qd->qd_list, &sdp->sd_quota_list);
  234. hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
  235. atomic_inc(&sdp->sd_quota_count);
  236. }
  237. spin_unlock_bucket(hash);
  238. spin_unlock(&qd_lock);
  239. if (qd) {
  240. gfs2_glock_put(new_qd->qd_gl);
  241. kmem_cache_free(gfs2_quotad_cachep, new_qd);
  242. }
  243. return 0;
  244. }
  245. static void qd_hold(struct gfs2_quota_data *qd)
  246. {
  247. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  248. gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
  249. lockref_get(&qd->qd_lockref);
  250. }
  251. static void qd_put(struct gfs2_quota_data *qd)
  252. {
  253. if (lockref_put_or_lock(&qd->qd_lockref))
  254. return;
  255. qd->qd_lockref.count = 0;
  256. list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
  257. spin_unlock(&qd->qd_lockref.lock);
  258. }
  259. static int slot_get(struct gfs2_quota_data *qd)
  260. {
  261. struct gfs2_sbd *sdp = qd->qd_sbd;
  262. unsigned int bit;
  263. int error = 0;
  264. spin_lock(&sdp->sd_bitmap_lock);
  265. if (qd->qd_slot_count != 0)
  266. goto out;
  267. error = -ENOSPC;
  268. bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots);
  269. if (bit < sdp->sd_quota_slots) {
  270. set_bit(bit, sdp->sd_quota_bitmap);
  271. qd->qd_slot = bit;
  272. error = 0;
  273. out:
  274. qd->qd_slot_count++;
  275. }
  276. spin_unlock(&sdp->sd_bitmap_lock);
  277. return error;
  278. }
  279. static void slot_hold(struct gfs2_quota_data *qd)
  280. {
  281. struct gfs2_sbd *sdp = qd->qd_sbd;
  282. spin_lock(&sdp->sd_bitmap_lock);
  283. gfs2_assert(sdp, qd->qd_slot_count);
  284. qd->qd_slot_count++;
  285. spin_unlock(&sdp->sd_bitmap_lock);
  286. }
  287. static void slot_put(struct gfs2_quota_data *qd)
  288. {
  289. struct gfs2_sbd *sdp = qd->qd_sbd;
  290. spin_lock(&sdp->sd_bitmap_lock);
  291. gfs2_assert(sdp, qd->qd_slot_count);
  292. if (!--qd->qd_slot_count) {
  293. BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
  294. qd->qd_slot = -1;
  295. }
  296. spin_unlock(&sdp->sd_bitmap_lock);
  297. }
  298. static int bh_get(struct gfs2_quota_data *qd)
  299. {
  300. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  301. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  302. unsigned int block, offset;
  303. struct buffer_head *bh;
  304. int error;
  305. struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
  306. mutex_lock(&sdp->sd_quota_mutex);
  307. if (qd->qd_bh_count++) {
  308. mutex_unlock(&sdp->sd_quota_mutex);
  309. return 0;
  310. }
  311. block = qd->qd_slot / sdp->sd_qc_per_block;
  312. offset = qd->qd_slot % sdp->sd_qc_per_block;
  313. bh_map.b_size = 1 << ip->i_inode.i_blkbits;
  314. error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
  315. if (error)
  316. goto fail;
  317. error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
  318. if (error)
  319. goto fail;
  320. error = -EIO;
  321. if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
  322. goto fail_brelse;
  323. qd->qd_bh = bh;
  324. qd->qd_bh_qc = (struct gfs2_quota_change *)
  325. (bh->b_data + sizeof(struct gfs2_meta_header) +
  326. offset * sizeof(struct gfs2_quota_change));
  327. mutex_unlock(&sdp->sd_quota_mutex);
  328. return 0;
  329. fail_brelse:
  330. brelse(bh);
  331. fail:
  332. qd->qd_bh_count--;
  333. mutex_unlock(&sdp->sd_quota_mutex);
  334. return error;
  335. }
  336. static void bh_put(struct gfs2_quota_data *qd)
  337. {
  338. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  339. mutex_lock(&sdp->sd_quota_mutex);
  340. gfs2_assert(sdp, qd->qd_bh_count);
  341. if (!--qd->qd_bh_count) {
  342. brelse(qd->qd_bh);
  343. qd->qd_bh = NULL;
  344. qd->qd_bh_qc = NULL;
  345. }
  346. mutex_unlock(&sdp->sd_quota_mutex);
  347. }
  348. static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
  349. u64 *sync_gen)
  350. {
  351. if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
  352. !test_bit(QDF_CHANGE, &qd->qd_flags) ||
  353. (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
  354. return 0;
  355. if (!lockref_get_not_dead(&qd->qd_lockref))
  356. return 0;
  357. list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
  358. set_bit(QDF_LOCKED, &qd->qd_flags);
  359. qd->qd_change_sync = qd->qd_change;
  360. slot_hold(qd);
  361. return 1;
  362. }
  363. static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
  364. {
  365. struct gfs2_quota_data *qd = NULL;
  366. int error;
  367. int found = 0;
  368. *qdp = NULL;
  369. if (sdp->sd_vfs->s_flags & MS_RDONLY)
  370. return 0;
  371. spin_lock(&qd_lock);
  372. list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
  373. found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen);
  374. if (found)
  375. break;
  376. }
  377. if (!found)
  378. qd = NULL;
  379. spin_unlock(&qd_lock);
  380. if (qd) {
  381. gfs2_assert_warn(sdp, qd->qd_change_sync);
  382. error = bh_get(qd);
  383. if (error) {
  384. clear_bit(QDF_LOCKED, &qd->qd_flags);
  385. slot_put(qd);
  386. qd_put(qd);
  387. return error;
  388. }
  389. }
  390. *qdp = qd;
  391. return 0;
  392. }
  393. static void qd_unlock(struct gfs2_quota_data *qd)
  394. {
  395. gfs2_assert_warn(qd->qd_gl->gl_sbd,
  396. test_bit(QDF_LOCKED, &qd->qd_flags));
  397. clear_bit(QDF_LOCKED, &qd->qd_flags);
  398. bh_put(qd);
  399. slot_put(qd);
  400. qd_put(qd);
  401. }
  402. static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
  403. struct gfs2_quota_data **qdp)
  404. {
  405. int error;
  406. error = qd_get(sdp, qid, qdp);
  407. if (error)
  408. return error;
  409. error = slot_get(*qdp);
  410. if (error)
  411. goto fail;
  412. error = bh_get(*qdp);
  413. if (error)
  414. goto fail_slot;
  415. return 0;
  416. fail_slot:
  417. slot_put(*qdp);
  418. fail:
  419. qd_put(*qdp);
  420. return error;
  421. }
  422. static void qdsb_put(struct gfs2_quota_data *qd)
  423. {
  424. bh_put(qd);
  425. slot_put(qd);
  426. qd_put(qd);
  427. }
  428. int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
  429. {
  430. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  431. struct gfs2_quota_data **qd;
  432. int error;
  433. if (ip->i_res == NULL) {
  434. error = gfs2_rs_alloc(ip);
  435. if (error)
  436. return error;
  437. }
  438. qd = ip->i_res->rs_qa_qd;
  439. if (gfs2_assert_warn(sdp, !ip->i_res->rs_qa_qd_num) ||
  440. gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
  441. return -EIO;
  442. if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
  443. return 0;
  444. error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
  445. if (error)
  446. goto out;
  447. ip->i_res->rs_qa_qd_num++;
  448. qd++;
  449. error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
  450. if (error)
  451. goto out;
  452. ip->i_res->rs_qa_qd_num++;
  453. qd++;
  454. if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
  455. !uid_eq(uid, ip->i_inode.i_uid)) {
  456. error = qdsb_get(sdp, make_kqid_uid(uid), qd);
  457. if (error)
  458. goto out;
  459. ip->i_res->rs_qa_qd_num++;
  460. qd++;
  461. }
  462. if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
  463. !gid_eq(gid, ip->i_inode.i_gid)) {
  464. error = qdsb_get(sdp, make_kqid_gid(gid), qd);
  465. if (error)
  466. goto out;
  467. ip->i_res->rs_qa_qd_num++;
  468. qd++;
  469. }
  470. out:
  471. if (error)
  472. gfs2_quota_unhold(ip);
  473. return error;
  474. }
  475. void gfs2_quota_unhold(struct gfs2_inode *ip)
  476. {
  477. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  478. unsigned int x;
  479. if (ip->i_res == NULL)
  480. return;
  481. gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
  482. for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
  483. qdsb_put(ip->i_res->rs_qa_qd[x]);
  484. ip->i_res->rs_qa_qd[x] = NULL;
  485. }
  486. ip->i_res->rs_qa_qd_num = 0;
  487. }
  488. static int sort_qd(const void *a, const void *b)
  489. {
  490. const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
  491. const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
  492. if (qid_lt(qd_a->qd_id, qd_b->qd_id))
  493. return -1;
  494. if (qid_lt(qd_b->qd_id, qd_a->qd_id))
  495. return 1;
  496. return 0;
  497. }
  498. static void do_qc(struct gfs2_quota_data *qd, s64 change)
  499. {
  500. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  501. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  502. struct gfs2_quota_change *qc = qd->qd_bh_qc;
  503. s64 x;
  504. mutex_lock(&sdp->sd_quota_mutex);
  505. gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
  506. if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
  507. qc->qc_change = 0;
  508. qc->qc_flags = 0;
  509. if (qd->qd_id.type == USRQUOTA)
  510. qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
  511. qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
  512. }
  513. x = be64_to_cpu(qc->qc_change) + change;
  514. qc->qc_change = cpu_to_be64(x);
  515. spin_lock(&qd_lock);
  516. qd->qd_change = x;
  517. spin_unlock(&qd_lock);
  518. if (!x) {
  519. gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
  520. clear_bit(QDF_CHANGE, &qd->qd_flags);
  521. qc->qc_flags = 0;
  522. qc->qc_id = 0;
  523. slot_put(qd);
  524. qd_put(qd);
  525. } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
  526. qd_hold(qd);
  527. slot_hold(qd);
  528. }
  529. mutex_unlock(&sdp->sd_quota_mutex);
  530. }
  531. /**
  532. * gfs2_adjust_quota - adjust record of current block usage
  533. * @ip: The quota inode
  534. * @loc: Offset of the entry in the quota file
  535. * @change: The amount of usage change to record
  536. * @qd: The quota data
  537. * @fdq: The updated limits to record
  538. *
  539. * This function was mostly borrowed from gfs2_block_truncate_page which was
  540. * in turn mostly borrowed from ext3
  541. *
  542. * Returns: 0 or -ve on error
  543. */
  544. static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
  545. s64 change, struct gfs2_quota_data *qd,
  546. struct fs_disk_quota *fdq)
  547. {
  548. struct inode *inode = &ip->i_inode;
  549. struct gfs2_sbd *sdp = GFS2_SB(inode);
  550. struct address_space *mapping = inode->i_mapping;
  551. unsigned long index = loc >> PAGE_CACHE_SHIFT;
  552. unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
  553. unsigned blocksize, iblock, pos;
  554. struct buffer_head *bh;
  555. struct page *page;
  556. void *kaddr, *ptr;
  557. struct gfs2_quota q;
  558. int err, nbytes;
  559. u64 size;
  560. if (gfs2_is_stuffed(ip)) {
  561. err = gfs2_unstuff_dinode(ip, NULL);
  562. if (err)
  563. return err;
  564. }
  565. memset(&q, 0, sizeof(struct gfs2_quota));
  566. err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
  567. if (err < 0)
  568. return err;
  569. err = -EIO;
  570. be64_add_cpu(&q.qu_value, change);
  571. qd->qd_qb.qb_value = q.qu_value;
  572. if (fdq) {
  573. if (fdq->d_fieldmask & FS_DQ_BSOFT) {
  574. q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
  575. qd->qd_qb.qb_warn = q.qu_warn;
  576. }
  577. if (fdq->d_fieldmask & FS_DQ_BHARD) {
  578. q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
  579. qd->qd_qb.qb_limit = q.qu_limit;
  580. }
  581. if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
  582. q.qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
  583. qd->qd_qb.qb_value = q.qu_value;
  584. }
  585. }
  586. /* Write the quota into the quota file on disk */
  587. ptr = &q;
  588. nbytes = sizeof(struct gfs2_quota);
  589. get_a_page:
  590. page = find_or_create_page(mapping, index, GFP_NOFS);
  591. if (!page)
  592. return -ENOMEM;
  593. blocksize = inode->i_sb->s_blocksize;
  594. iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
  595. if (!page_has_buffers(page))
  596. create_empty_buffers(page, blocksize, 0);
  597. bh = page_buffers(page);
  598. pos = blocksize;
  599. while (offset >= pos) {
  600. bh = bh->b_this_page;
  601. iblock++;
  602. pos += blocksize;
  603. }
  604. if (!buffer_mapped(bh)) {
  605. gfs2_block_map(inode, iblock, bh, 1);
  606. if (!buffer_mapped(bh))
  607. goto unlock_out;
  608. /* If it's a newly allocated disk block for quota, zero it */
  609. if (buffer_new(bh))
  610. zero_user(page, pos - blocksize, bh->b_size);
  611. }
  612. if (PageUptodate(page))
  613. set_buffer_uptodate(bh);
  614. if (!buffer_uptodate(bh)) {
  615. ll_rw_block(READ | REQ_META, 1, &bh);
  616. wait_on_buffer(bh);
  617. if (!buffer_uptodate(bh))
  618. goto unlock_out;
  619. }
  620. gfs2_trans_add_data(ip->i_gl, bh);
  621. kaddr = kmap_atomic(page);
  622. if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
  623. nbytes = PAGE_CACHE_SIZE - offset;
  624. memcpy(kaddr + offset, ptr, nbytes);
  625. flush_dcache_page(page);
  626. kunmap_atomic(kaddr);
  627. unlock_page(page);
  628. page_cache_release(page);
  629. /* If quota straddles page boundary, we need to update the rest of the
  630. * quota at the beginning of the next page */
  631. if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) {
  632. ptr = ptr + nbytes;
  633. nbytes = sizeof(struct gfs2_quota) - nbytes;
  634. offset = 0;
  635. index++;
  636. goto get_a_page;
  637. }
  638. size = loc + sizeof(struct gfs2_quota);
  639. if (size > inode->i_size)
  640. i_size_write(inode, size);
  641. inode->i_mtime = inode->i_atime = CURRENT_TIME;
  642. mark_inode_dirty(inode);
  643. set_bit(QDF_REFRESH, &qd->qd_flags);
  644. return 0;
  645. unlock_out:
  646. unlock_page(page);
  647. page_cache_release(page);
  648. return err;
  649. }
  650. static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
  651. {
  652. struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
  653. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  654. struct gfs2_alloc_parms ap = { .aflags = 0, };
  655. unsigned int data_blocks, ind_blocks;
  656. struct gfs2_holder *ghs, i_gh;
  657. unsigned int qx, x;
  658. struct gfs2_quota_data *qd;
  659. unsigned reserved;
  660. loff_t offset;
  661. unsigned int nalloc = 0, blocks;
  662. int error;
  663. error = gfs2_rs_alloc(ip);
  664. if (error)
  665. return error;
  666. gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
  667. &data_blocks, &ind_blocks);
  668. ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
  669. if (!ghs)
  670. return -ENOMEM;
  671. sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
  672. mutex_lock(&ip->i_inode.i_mutex);
  673. for (qx = 0; qx < num_qd; qx++) {
  674. error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
  675. GL_NOCACHE, &ghs[qx]);
  676. if (error)
  677. goto out;
  678. }
  679. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
  680. if (error)
  681. goto out;
  682. for (x = 0; x < num_qd; x++) {
  683. offset = qd2offset(qda[x]);
  684. if (gfs2_write_alloc_required(ip, offset,
  685. sizeof(struct gfs2_quota)))
  686. nalloc++;
  687. }
  688. /*
  689. * 1 blk for unstuffing inode if stuffed. We add this extra
  690. * block to the reservation unconditionally. If the inode
  691. * doesn't need unstuffing, the block will be released to the
  692. * rgrp since it won't be allocated during the transaction
  693. */
  694. /* +3 in the end for unstuffing block, inode size update block
  695. * and another block in case quota straddles page boundary and
  696. * two blocks need to be updated instead of 1 */
  697. blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
  698. reserved = 1 + (nalloc * (data_blocks + ind_blocks));
  699. ap.target = reserved;
  700. error = gfs2_inplace_reserve(ip, &ap);
  701. if (error)
  702. goto out_alloc;
  703. if (nalloc)
  704. blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
  705. error = gfs2_trans_begin(sdp, blocks, 0);
  706. if (error)
  707. goto out_ipres;
  708. for (x = 0; x < num_qd; x++) {
  709. qd = qda[x];
  710. offset = qd2offset(qd);
  711. error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
  712. if (error)
  713. goto out_end_trans;
  714. do_qc(qd, -qd->qd_change_sync);
  715. set_bit(QDF_REFRESH, &qd->qd_flags);
  716. }
  717. error = 0;
  718. out_end_trans:
  719. gfs2_trans_end(sdp);
  720. out_ipres:
  721. gfs2_inplace_release(ip);
  722. out_alloc:
  723. gfs2_glock_dq_uninit(&i_gh);
  724. out:
  725. while (qx--)
  726. gfs2_glock_dq_uninit(&ghs[qx]);
  727. mutex_unlock(&ip->i_inode.i_mutex);
  728. kfree(ghs);
  729. gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl, NORMAL_FLUSH);
  730. return error;
  731. }
  732. static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
  733. {
  734. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  735. struct gfs2_quota q;
  736. struct gfs2_quota_lvb *qlvb;
  737. loff_t pos;
  738. int error;
  739. memset(&q, 0, sizeof(struct gfs2_quota));
  740. pos = qd2offset(qd);
  741. error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
  742. if (error < 0)
  743. return error;
  744. qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
  745. qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
  746. qlvb->__pad = 0;
  747. qlvb->qb_limit = q.qu_limit;
  748. qlvb->qb_warn = q.qu_warn;
  749. qlvb->qb_value = q.qu_value;
  750. qd->qd_qb = *qlvb;
  751. return 0;
  752. }
  753. static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
  754. struct gfs2_holder *q_gh)
  755. {
  756. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  757. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  758. struct gfs2_holder i_gh;
  759. int error;
  760. restart:
  761. error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
  762. if (error)
  763. return error;
  764. qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
  765. if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
  766. gfs2_glock_dq_uninit(q_gh);
  767. error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
  768. GL_NOCACHE, q_gh);
  769. if (error)
  770. return error;
  771. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
  772. if (error)
  773. goto fail;
  774. error = update_qd(sdp, qd);
  775. if (error)
  776. goto fail_gunlock;
  777. gfs2_glock_dq_uninit(&i_gh);
  778. gfs2_glock_dq_uninit(q_gh);
  779. force_refresh = 0;
  780. goto restart;
  781. }
  782. return 0;
  783. fail_gunlock:
  784. gfs2_glock_dq_uninit(&i_gh);
  785. fail:
  786. gfs2_glock_dq_uninit(q_gh);
  787. return error;
  788. }
  789. int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
  790. {
  791. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  792. struct gfs2_quota_data *qd;
  793. unsigned int x;
  794. int error = 0;
  795. error = gfs2_quota_hold(ip, uid, gid);
  796. if (error)
  797. return error;
  798. if (capable(CAP_SYS_RESOURCE) ||
  799. sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
  800. return 0;
  801. sort(ip->i_res->rs_qa_qd, ip->i_res->rs_qa_qd_num,
  802. sizeof(struct gfs2_quota_data *), sort_qd, NULL);
  803. for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
  804. int force = NO_FORCE;
  805. qd = ip->i_res->rs_qa_qd[x];
  806. if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
  807. force = FORCE;
  808. error = do_glock(qd, force, &ip->i_res->rs_qa_qd_ghs[x]);
  809. if (error)
  810. break;
  811. }
  812. if (!error)
  813. set_bit(GIF_QD_LOCKED, &ip->i_flags);
  814. else {
  815. while (x--)
  816. gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
  817. gfs2_quota_unhold(ip);
  818. }
  819. return error;
  820. }
  821. static int need_sync(struct gfs2_quota_data *qd)
  822. {
  823. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  824. struct gfs2_tune *gt = &sdp->sd_tune;
  825. s64 value;
  826. unsigned int num, den;
  827. int do_sync = 1;
  828. if (!qd->qd_qb.qb_limit)
  829. return 0;
  830. spin_lock(&qd_lock);
  831. value = qd->qd_change;
  832. spin_unlock(&qd_lock);
  833. spin_lock(&gt->gt_spin);
  834. num = gt->gt_quota_scale_num;
  835. den = gt->gt_quota_scale_den;
  836. spin_unlock(&gt->gt_spin);
  837. if (value < 0)
  838. do_sync = 0;
  839. else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
  840. (s64)be64_to_cpu(qd->qd_qb.qb_limit))
  841. do_sync = 0;
  842. else {
  843. value *= gfs2_jindex_size(sdp) * num;
  844. value = div_s64(value, den);
  845. value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
  846. if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
  847. do_sync = 0;
  848. }
  849. return do_sync;
  850. }
  851. void gfs2_quota_unlock(struct gfs2_inode *ip)
  852. {
  853. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  854. struct gfs2_quota_data *qda[4];
  855. unsigned int count = 0;
  856. unsigned int x;
  857. int found;
  858. if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
  859. goto out;
  860. for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
  861. struct gfs2_quota_data *qd;
  862. int sync;
  863. qd = ip->i_res->rs_qa_qd[x];
  864. sync = need_sync(qd);
  865. gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
  866. if (!sync)
  867. continue;
  868. spin_lock(&qd_lock);
  869. found = qd_check_sync(sdp, qd, NULL);
  870. spin_unlock(&qd_lock);
  871. if (!found)
  872. continue;
  873. gfs2_assert_warn(sdp, qd->qd_change_sync);
  874. if (bh_get(qd)) {
  875. clear_bit(QDF_LOCKED, &qd->qd_flags);
  876. slot_put(qd);
  877. qd_put(qd);
  878. continue;
  879. }
  880. qda[count++] = qd;
  881. }
  882. if (count) {
  883. do_sync(count, qda);
  884. for (x = 0; x < count; x++)
  885. qd_unlock(qda[x]);
  886. }
  887. out:
  888. gfs2_quota_unhold(ip);
  889. }
  890. #define MAX_LINE 256
  891. static int print_message(struct gfs2_quota_data *qd, char *type)
  892. {
  893. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  894. fs_info(sdp, "quota %s for %s %u\n",
  895. type,
  896. (qd->qd_id.type == USRQUOTA) ? "user" : "group",
  897. from_kqid(&init_user_ns, qd->qd_id));
  898. return 0;
  899. }
  900. int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
  901. {
  902. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  903. struct gfs2_quota_data *qd;
  904. s64 value;
  905. unsigned int x;
  906. int error = 0;
  907. if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
  908. return 0;
  909. if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
  910. return 0;
  911. for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
  912. qd = ip->i_res->rs_qa_qd[x];
  913. if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
  914. qid_eq(qd->qd_id, make_kqid_gid(gid))))
  915. continue;
  916. value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
  917. spin_lock(&qd_lock);
  918. value += qd->qd_change;
  919. spin_unlock(&qd_lock);
  920. if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
  921. print_message(qd, "exceeded");
  922. quota_send_warning(qd->qd_id,
  923. sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
  924. error = -EDQUOT;
  925. break;
  926. } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
  927. (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
  928. time_after_eq(jiffies, qd->qd_last_warn +
  929. gfs2_tune_get(sdp,
  930. gt_quota_warn_period) * HZ)) {
  931. quota_send_warning(qd->qd_id,
  932. sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
  933. error = print_message(qd, "warning");
  934. qd->qd_last_warn = jiffies;
  935. }
  936. }
  937. return error;
  938. }
  939. void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
  940. kuid_t uid, kgid_t gid)
  941. {
  942. struct gfs2_quota_data *qd;
  943. unsigned int x;
  944. if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
  945. return;
  946. if (ip->i_diskflags & GFS2_DIF_SYSTEM)
  947. return;
  948. for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
  949. qd = ip->i_res->rs_qa_qd[x];
  950. if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
  951. qid_eq(qd->qd_id, make_kqid_gid(gid))) {
  952. do_qc(qd, change);
  953. }
  954. }
  955. }
  956. int gfs2_quota_sync(struct super_block *sb, int type)
  957. {
  958. struct gfs2_sbd *sdp = sb->s_fs_info;
  959. struct gfs2_quota_data **qda;
  960. unsigned int max_qd = PAGE_SIZE/sizeof(struct gfs2_holder);
  961. unsigned int num_qd;
  962. unsigned int x;
  963. int error = 0;
  964. qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
  965. if (!qda)
  966. return -ENOMEM;
  967. mutex_lock(&sdp->sd_quota_sync_mutex);
  968. sdp->sd_quota_sync_gen++;
  969. do {
  970. num_qd = 0;
  971. for (;;) {
  972. error = qd_fish(sdp, qda + num_qd);
  973. if (error || !qda[num_qd])
  974. break;
  975. if (++num_qd == max_qd)
  976. break;
  977. }
  978. if (num_qd) {
  979. if (!error)
  980. error = do_sync(num_qd, qda);
  981. if (!error)
  982. for (x = 0; x < num_qd; x++)
  983. qda[x]->qd_sync_gen =
  984. sdp->sd_quota_sync_gen;
  985. for (x = 0; x < num_qd; x++)
  986. qd_unlock(qda[x]);
  987. }
  988. } while (!error && num_qd == max_qd);
  989. mutex_unlock(&sdp->sd_quota_sync_mutex);
  990. kfree(qda);
  991. return error;
  992. }
  993. int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
  994. {
  995. struct gfs2_quota_data *qd;
  996. struct gfs2_holder q_gh;
  997. int error;
  998. error = qd_get(sdp, qid, &qd);
  999. if (error)
  1000. return error;
  1001. error = do_glock(qd, FORCE, &q_gh);
  1002. if (!error)
  1003. gfs2_glock_dq_uninit(&q_gh);
  1004. qd_put(qd);
  1005. return error;
  1006. }
  1007. int gfs2_quota_init(struct gfs2_sbd *sdp)
  1008. {
  1009. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  1010. u64 size = i_size_read(sdp->sd_qc_inode);
  1011. unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
  1012. unsigned int x, slot = 0;
  1013. unsigned int found = 0;
  1014. unsigned int hash;
  1015. unsigned int bm_size;
  1016. u64 dblock;
  1017. u32 extlen = 0;
  1018. int error;
  1019. if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
  1020. return -EIO;
  1021. sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
  1022. bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
  1023. bm_size *= sizeof(unsigned long);
  1024. error = -ENOMEM;
  1025. sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN);
  1026. if (sdp->sd_quota_bitmap == NULL)
  1027. sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS |
  1028. __GFP_ZERO, PAGE_KERNEL);
  1029. if (!sdp->sd_quota_bitmap)
  1030. return error;
  1031. for (x = 0; x < blocks; x++) {
  1032. struct buffer_head *bh;
  1033. const struct gfs2_quota_change *qc;
  1034. unsigned int y;
  1035. if (!extlen) {
  1036. int new = 0;
  1037. error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
  1038. if (error)
  1039. goto fail;
  1040. }
  1041. error = -EIO;
  1042. bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
  1043. if (!bh)
  1044. goto fail;
  1045. if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
  1046. brelse(bh);
  1047. goto fail;
  1048. }
  1049. qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
  1050. for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
  1051. y++, slot++) {
  1052. struct gfs2_quota_data *qd;
  1053. s64 qc_change = be64_to_cpu(qc->qc_change);
  1054. u32 qc_flags = be32_to_cpu(qc->qc_flags);
  1055. enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
  1056. USRQUOTA : GRPQUOTA;
  1057. struct kqid qc_id = make_kqid(&init_user_ns, qtype,
  1058. be32_to_cpu(qc->qc_id));
  1059. qc++;
  1060. if (!qc_change)
  1061. continue;
  1062. hash = gfs2_qd_hash(sdp, qc_id);
  1063. qd = qd_alloc(hash, sdp, qc_id);
  1064. if (qd == NULL) {
  1065. brelse(bh);
  1066. goto fail;
  1067. }
  1068. set_bit(QDF_CHANGE, &qd->qd_flags);
  1069. qd->qd_change = qc_change;
  1070. qd->qd_slot = slot;
  1071. qd->qd_slot_count = 1;
  1072. spin_lock(&qd_lock);
  1073. BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
  1074. list_add(&qd->qd_list, &sdp->sd_quota_list);
  1075. atomic_inc(&sdp->sd_quota_count);
  1076. spin_unlock(&qd_lock);
  1077. spin_lock_bucket(hash);
  1078. hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
  1079. spin_unlock_bucket(hash);
  1080. found++;
  1081. }
  1082. brelse(bh);
  1083. dblock++;
  1084. extlen--;
  1085. }
  1086. if (found)
  1087. fs_info(sdp, "found %u quota changes\n", found);
  1088. return 0;
  1089. fail:
  1090. gfs2_quota_cleanup(sdp);
  1091. return error;
  1092. }
  1093. void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
  1094. {
  1095. struct list_head *head = &sdp->sd_quota_list;
  1096. struct gfs2_quota_data *qd;
  1097. spin_lock(&qd_lock);
  1098. while (!list_empty(head)) {
  1099. qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
  1100. list_del(&qd->qd_list);
  1101. /* Also remove if this qd exists in the reclaim list */
  1102. list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
  1103. atomic_dec(&sdp->sd_quota_count);
  1104. spin_unlock(&qd_lock);
  1105. spin_lock_bucket(qd->qd_hash);
  1106. hlist_bl_del_rcu(&qd->qd_hlist);
  1107. spin_unlock_bucket(qd->qd_hash);
  1108. gfs2_assert_warn(sdp, !qd->qd_change);
  1109. gfs2_assert_warn(sdp, !qd->qd_slot_count);
  1110. gfs2_assert_warn(sdp, !qd->qd_bh_count);
  1111. gfs2_glock_put(qd->qd_gl);
  1112. call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
  1113. spin_lock(&qd_lock);
  1114. }
  1115. spin_unlock(&qd_lock);
  1116. gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
  1117. kvfree(sdp->sd_quota_bitmap);
  1118. sdp->sd_quota_bitmap = NULL;
  1119. }
  1120. static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
  1121. {
  1122. if (error == 0 || error == -EROFS)
  1123. return;
  1124. if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
  1125. fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
  1126. }
  1127. static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
  1128. int (*fxn)(struct super_block *sb, int type),
  1129. unsigned long t, unsigned long *timeo,
  1130. unsigned int *new_timeo)
  1131. {
  1132. if (t >= *timeo) {
  1133. int error = fxn(sdp->sd_vfs, 0);
  1134. quotad_error(sdp, msg, error);
  1135. *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
  1136. } else {
  1137. *timeo -= t;
  1138. }
  1139. }
  1140. static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
  1141. {
  1142. struct gfs2_inode *ip;
  1143. while(1) {
  1144. ip = NULL;
  1145. spin_lock(&sdp->sd_trunc_lock);
  1146. if (!list_empty(&sdp->sd_trunc_list)) {
  1147. ip = list_entry(sdp->sd_trunc_list.next,
  1148. struct gfs2_inode, i_trunc_list);
  1149. list_del_init(&ip->i_trunc_list);
  1150. }
  1151. spin_unlock(&sdp->sd_trunc_lock);
  1152. if (ip == NULL)
  1153. return;
  1154. gfs2_glock_finish_truncate(ip);
  1155. }
  1156. }
  1157. void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
  1158. if (!sdp->sd_statfs_force_sync) {
  1159. sdp->sd_statfs_force_sync = 1;
  1160. wake_up(&sdp->sd_quota_wait);
  1161. }
  1162. }
  1163. /**
  1164. * gfs2_quotad - Write cached quota changes into the quota file
  1165. * @sdp: Pointer to GFS2 superblock
  1166. *
  1167. */
  1168. int gfs2_quotad(void *data)
  1169. {
  1170. struct gfs2_sbd *sdp = data;
  1171. struct gfs2_tune *tune = &sdp->sd_tune;
  1172. unsigned long statfs_timeo = 0;
  1173. unsigned long quotad_timeo = 0;
  1174. unsigned long t = 0;
  1175. DEFINE_WAIT(wait);
  1176. int empty;
  1177. while (!kthread_should_stop()) {
  1178. /* Update the master statfs file */
  1179. if (sdp->sd_statfs_force_sync) {
  1180. int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
  1181. quotad_error(sdp, "statfs", error);
  1182. statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
  1183. }
  1184. else
  1185. quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
  1186. &statfs_timeo,
  1187. &tune->gt_statfs_quantum);
  1188. /* Update quota file */
  1189. quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
  1190. &quotad_timeo, &tune->gt_quota_quantum);
  1191. /* Check for & recover partially truncated inodes */
  1192. quotad_check_trunc_list(sdp);
  1193. try_to_freeze();
  1194. t = min(quotad_timeo, statfs_timeo);
  1195. prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
  1196. spin_lock(&sdp->sd_trunc_lock);
  1197. empty = list_empty(&sdp->sd_trunc_list);
  1198. spin_unlock(&sdp->sd_trunc_lock);
  1199. if (empty && !sdp->sd_statfs_force_sync)
  1200. t -= schedule_timeout(t);
  1201. else
  1202. t = 0;
  1203. finish_wait(&sdp->sd_quota_wait, &wait);
  1204. }
  1205. return 0;
  1206. }
  1207. static int gfs2_quota_get_xstate(struct super_block *sb,
  1208. struct fs_quota_stat *fqs)
  1209. {
  1210. struct gfs2_sbd *sdp = sb->s_fs_info;
  1211. memset(fqs, 0, sizeof(struct fs_quota_stat));
  1212. fqs->qs_version = FS_QSTAT_VERSION;
  1213. switch (sdp->sd_args.ar_quota) {
  1214. case GFS2_QUOTA_ON:
  1215. fqs->qs_flags |= (FS_QUOTA_UDQ_ENFD | FS_QUOTA_GDQ_ENFD);
  1216. /*FALLTHRU*/
  1217. case GFS2_QUOTA_ACCOUNT:
  1218. fqs->qs_flags |= (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT);
  1219. break;
  1220. case GFS2_QUOTA_OFF:
  1221. break;
  1222. }
  1223. if (sdp->sd_quota_inode) {
  1224. fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
  1225. fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
  1226. }
  1227. fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
  1228. fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
  1229. fqs->qs_incoredqs = list_lru_count(&gfs2_qd_lru);
  1230. return 0;
  1231. }
  1232. static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
  1233. struct fs_disk_quota *fdq)
  1234. {
  1235. struct gfs2_sbd *sdp = sb->s_fs_info;
  1236. struct gfs2_quota_lvb *qlvb;
  1237. struct gfs2_quota_data *qd;
  1238. struct gfs2_holder q_gh;
  1239. int error;
  1240. memset(fdq, 0, sizeof(struct fs_disk_quota));
  1241. if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
  1242. return -ESRCH; /* Crazy XFS error code */
  1243. if ((qid.type != USRQUOTA) &&
  1244. (qid.type != GRPQUOTA))
  1245. return -EINVAL;
  1246. error = qd_get(sdp, qid, &qd);
  1247. if (error)
  1248. return error;
  1249. error = do_glock(qd, FORCE, &q_gh);
  1250. if (error)
  1251. goto out;
  1252. qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
  1253. fdq->d_version = FS_DQUOT_VERSION;
  1254. fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
  1255. fdq->d_id = from_kqid_munged(current_user_ns(), qid);
  1256. fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
  1257. fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
  1258. fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
  1259. gfs2_glock_dq_uninit(&q_gh);
  1260. out:
  1261. qd_put(qd);
  1262. return error;
  1263. }
  1264. /* GFS2 only supports a subset of the XFS fields */
  1265. #define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
  1266. static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
  1267. struct fs_disk_quota *fdq)
  1268. {
  1269. struct gfs2_sbd *sdp = sb->s_fs_info;
  1270. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  1271. struct gfs2_quota_data *qd;
  1272. struct gfs2_holder q_gh, i_gh;
  1273. unsigned int data_blocks, ind_blocks;
  1274. unsigned int blocks = 0;
  1275. int alloc_required;
  1276. loff_t offset;
  1277. int error;
  1278. if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
  1279. return -ESRCH; /* Crazy XFS error code */
  1280. if ((qid.type != USRQUOTA) &&
  1281. (qid.type != GRPQUOTA))
  1282. return -EINVAL;
  1283. if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
  1284. return -EINVAL;
  1285. error = qd_get(sdp, qid, &qd);
  1286. if (error)
  1287. return error;
  1288. error = gfs2_rs_alloc(ip);
  1289. if (error)
  1290. goto out_put;
  1291. mutex_lock(&ip->i_inode.i_mutex);
  1292. error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
  1293. if (error)
  1294. goto out_unlockput;
  1295. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
  1296. if (error)
  1297. goto out_q;
  1298. /* Check for existing entry, if none then alloc new blocks */
  1299. error = update_qd(sdp, qd);
  1300. if (error)
  1301. goto out_i;
  1302. /* If nothing has changed, this is a no-op */
  1303. if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
  1304. ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
  1305. fdq->d_fieldmask ^= FS_DQ_BSOFT;
  1306. if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
  1307. ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
  1308. fdq->d_fieldmask ^= FS_DQ_BHARD;
  1309. if ((fdq->d_fieldmask & FS_DQ_BCOUNT) &&
  1310. ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
  1311. fdq->d_fieldmask ^= FS_DQ_BCOUNT;
  1312. if (fdq->d_fieldmask == 0)
  1313. goto out_i;
  1314. offset = qd2offset(qd);
  1315. alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
  1316. if (gfs2_is_stuffed(ip))
  1317. alloc_required = 1;
  1318. if (alloc_required) {
  1319. struct gfs2_alloc_parms ap = { .aflags = 0, };
  1320. gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
  1321. &data_blocks, &ind_blocks);
  1322. blocks = 1 + data_blocks + ind_blocks;
  1323. ap.target = blocks;
  1324. error = gfs2_inplace_reserve(ip, &ap);
  1325. if (error)
  1326. goto out_i;
  1327. blocks += gfs2_rg_blocks(ip, blocks);
  1328. }
  1329. /* Some quotas span block boundaries and can update two blocks,
  1330. adding an extra block to the transaction to handle such quotas */
  1331. error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
  1332. if (error)
  1333. goto out_release;
  1334. /* Apply changes */
  1335. error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
  1336. gfs2_trans_end(sdp);
  1337. out_release:
  1338. if (alloc_required)
  1339. gfs2_inplace_release(ip);
  1340. out_i:
  1341. gfs2_glock_dq_uninit(&i_gh);
  1342. out_q:
  1343. gfs2_glock_dq_uninit(&q_gh);
  1344. out_unlockput:
  1345. mutex_unlock(&ip->i_inode.i_mutex);
  1346. out_put:
  1347. qd_put(qd);
  1348. return error;
  1349. }
  1350. const struct quotactl_ops gfs2_quotactl_ops = {
  1351. .quota_sync = gfs2_quota_sync,
  1352. .get_xstate = gfs2_quota_get_xstate,
  1353. .get_dqblk = gfs2_get_dqblk,
  1354. .set_dqblk = gfs2_set_dqblk,
  1355. };
  1356. void __init gfs2_quota_hash_init(void)
  1357. {
  1358. unsigned i;
  1359. for(i = 0; i < GFS2_QD_HASH_SIZE; i++)
  1360. INIT_HLIST_BL_HEAD(&qd_hash_table[i]);
  1361. }