quota.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. /*
  10. * Quota change tags are associated with each transaction that allocates or
  11. * deallocates space. Those changes are accumulated locally to each node (in a
  12. * per-node file) and then are periodically synced to the quota file. This
  13. * avoids the bottleneck of constantly touching the quota file, but introduces
  14. * fuzziness in the current usage value of IDs that are being used on different
  15. * nodes in the cluster simultaneously. So, it is possible for a user on
  16. * multiple nodes to overrun their quota, but that overrun is controlable.
  17. * Since quota tags are part of transactions, there is no need for a quota check
  18. * program to be run on node crashes or anything like that.
  19. *
  20. * There are couple of knobs that let the administrator manage the quota
  21. * fuzziness. "quota_quantum" sets the maximum time a quota change can be
  22. * sitting on one node before being synced to the quota file. (The default is
  23. * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
  24. * of quota file syncs increases as the user moves closer to their limit. The
  25. * more frequent the syncs, the more accurate the quota enforcement, but that
  26. * means that there is more contention between the nodes for the quota file.
  27. * The default value is one. This sets the maximum theoretical quota overrun
  28. * (with infinite node with infinite bandwidth) to twice the user's limit. (In
  29. * practice, the maximum overrun you see should be much less.) A "quota_scale"
  30. * number greater than one makes quota syncs more frequent and reduces the
  31. * maximum overrun. Numbers less than one (but greater than zero) make quota
  32. * syncs less frequent.
  33. *
  34. * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
  35. * the quota file, so it is not being constantly read.
  36. */
  37. #include <linux/sched.h>
  38. #include <linux/slab.h>
  39. #include <linux/mm.h>
  40. #include <linux/spinlock.h>
  41. #include <linux/completion.h>
  42. #include <linux/buffer_head.h>
  43. #include <linux/sort.h>
  44. #include <linux/fs.h>
  45. #include <linux/bio.h>
  46. #include <linux/gfs2_ondisk.h>
  47. #include <linux/kthread.h>
  48. #include <linux/freezer.h>
  49. #include <linux/quota.h>
  50. #include <linux/dqblk_xfs.h>
  51. #include <linux/lockref.h>
  52. #include <linux/list_lru.h>
  53. #include <linux/rcupdate.h>
  54. #include <linux/rculist_bl.h>
  55. #include <linux/bit_spinlock.h>
  56. #include <linux/jhash.h>
  57. #include <linux/vmalloc.h>
  58. #include "gfs2.h"
  59. #include "incore.h"
  60. #include "bmap.h"
  61. #include "glock.h"
  62. #include "glops.h"
  63. #include "log.h"
  64. #include "meta_io.h"
  65. #include "quota.h"
  66. #include "rgrp.h"
  67. #include "super.h"
  68. #include "trans.h"
  69. #include "inode.h"
  70. #include "util.h"
  71. #define GFS2_QD_HASH_SHIFT 12
  72. #define GFS2_QD_HASH_SIZE (1 << GFS2_QD_HASH_SHIFT)
  73. #define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1)
  74. /* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
  75. /* -> sd_bitmap_lock */
  76. static DEFINE_SPINLOCK(qd_lock);
  77. struct list_lru gfs2_qd_lru;
  78. static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];
  79. static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
  80. const struct kqid qid)
  81. {
  82. unsigned int h;
  83. h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
  84. h = jhash(&qid, sizeof(struct kqid), h);
  85. return h & GFS2_QD_HASH_MASK;
  86. }
  87. static inline void spin_lock_bucket(unsigned int hash)
  88. {
  89. hlist_bl_lock(&qd_hash_table[hash]);
  90. }
  91. static inline void spin_unlock_bucket(unsigned int hash)
  92. {
  93. hlist_bl_unlock(&qd_hash_table[hash]);
  94. }
  95. static void gfs2_qd_dealloc(struct rcu_head *rcu)
  96. {
  97. struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
  98. kmem_cache_free(gfs2_quotad_cachep, qd);
  99. }
  100. static void gfs2_qd_dispose(struct list_head *list)
  101. {
  102. struct gfs2_quota_data *qd;
  103. struct gfs2_sbd *sdp;
  104. while (!list_empty(list)) {
  105. qd = list_entry(list->next, struct gfs2_quota_data, qd_lru);
  106. sdp = qd->qd_gl->gl_sbd;
  107. list_del(&qd->qd_lru);
  108. /* Free from the filesystem-specific list */
  109. spin_lock(&qd_lock);
  110. list_del(&qd->qd_list);
  111. spin_unlock(&qd_lock);
  112. spin_lock_bucket(qd->qd_hash);
  113. hlist_bl_del_rcu(&qd->qd_hlist);
  114. spin_unlock_bucket(qd->qd_hash);
  115. gfs2_assert_warn(sdp, !qd->qd_change);
  116. gfs2_assert_warn(sdp, !qd->qd_slot_count);
  117. gfs2_assert_warn(sdp, !qd->qd_bh_count);
  118. gfs2_glock_put(qd->qd_gl);
  119. atomic_dec(&sdp->sd_quota_count);
  120. /* Delete it from the common reclaim list */
  121. call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
  122. }
  123. }
  124. static enum lru_status gfs2_qd_isolate(struct list_head *item, spinlock_t *lock, void *arg)
  125. {
  126. struct list_head *dispose = arg;
  127. struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
  128. if (!spin_trylock(&qd->qd_lockref.lock))
  129. return LRU_SKIP;
  130. if (qd->qd_lockref.count == 0) {
  131. lockref_mark_dead(&qd->qd_lockref);
  132. list_move(&qd->qd_lru, dispose);
  133. }
  134. spin_unlock(&qd->qd_lockref.lock);
  135. return LRU_REMOVED;
  136. }
  137. static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
  138. struct shrink_control *sc)
  139. {
  140. LIST_HEAD(dispose);
  141. unsigned long freed;
  142. if (!(sc->gfp_mask & __GFP_FS))
  143. return SHRINK_STOP;
  144. freed = list_lru_walk_node(&gfs2_qd_lru, sc->nid, gfs2_qd_isolate,
  145. &dispose, &sc->nr_to_scan);
  146. gfs2_qd_dispose(&dispose);
  147. return freed;
  148. }
  149. static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
  150. struct shrink_control *sc)
  151. {
  152. return vfs_pressure_ratio(list_lru_count_node(&gfs2_qd_lru, sc->nid));
  153. }
  154. struct shrinker gfs2_qd_shrinker = {
  155. .count_objects = gfs2_qd_shrink_count,
  156. .scan_objects = gfs2_qd_shrink_scan,
  157. .seeks = DEFAULT_SEEKS,
  158. .flags = SHRINKER_NUMA_AWARE,
  159. };
  160. static u64 qd2index(struct gfs2_quota_data *qd)
  161. {
  162. struct kqid qid = qd->qd_id;
  163. return (2 * (u64)from_kqid(&init_user_ns, qid)) +
  164. ((qid.type == USRQUOTA) ? 0 : 1);
  165. }
  166. static u64 qd2offset(struct gfs2_quota_data *qd)
  167. {
  168. u64 offset;
  169. offset = qd2index(qd);
  170. offset *= sizeof(struct gfs2_quota);
  171. return offset;
  172. }
  173. static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
  174. {
  175. struct gfs2_quota_data *qd;
  176. int error;
  177. qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
  178. if (!qd)
  179. return NULL;
  180. qd->qd_sbd = sdp;
  181. qd->qd_lockref.count = 1;
  182. spin_lock_init(&qd->qd_lockref.lock);
  183. qd->qd_id = qid;
  184. qd->qd_slot = -1;
  185. INIT_LIST_HEAD(&qd->qd_lru);
  186. qd->qd_hash = hash;
  187. error = gfs2_glock_get(sdp, qd2index(qd),
  188. &gfs2_quota_glops, CREATE, &qd->qd_gl);
  189. if (error)
  190. goto fail;
  191. return qd;
  192. fail:
  193. kmem_cache_free(gfs2_quotad_cachep, qd);
  194. return NULL;
  195. }
  196. static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
  197. const struct gfs2_sbd *sdp,
  198. struct kqid qid)
  199. {
  200. struct gfs2_quota_data *qd;
  201. struct hlist_bl_node *h;
  202. hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
  203. if (!qid_eq(qd->qd_id, qid))
  204. continue;
  205. if (qd->qd_sbd != sdp)
  206. continue;
  207. if (lockref_get_not_dead(&qd->qd_lockref)) {
  208. list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
  209. return qd;
  210. }
  211. }
  212. return NULL;
  213. }
  214. static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
  215. struct gfs2_quota_data **qdp)
  216. {
  217. struct gfs2_quota_data *qd, *new_qd;
  218. unsigned int hash = gfs2_qd_hash(sdp, qid);
  219. rcu_read_lock();
  220. *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
  221. rcu_read_unlock();
  222. if (qd)
  223. return 0;
  224. new_qd = qd_alloc(hash, sdp, qid);
  225. if (!new_qd)
  226. return -ENOMEM;
  227. spin_lock(&qd_lock);
  228. spin_lock_bucket(hash);
  229. *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
  230. if (qd == NULL) {
  231. *qdp = new_qd;
  232. list_add(&new_qd->qd_list, &sdp->sd_quota_list);
  233. hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
  234. atomic_inc(&sdp->sd_quota_count);
  235. }
  236. spin_unlock_bucket(hash);
  237. spin_unlock(&qd_lock);
  238. if (qd) {
  239. gfs2_glock_put(new_qd->qd_gl);
  240. kmem_cache_free(gfs2_quotad_cachep, new_qd);
  241. }
  242. return 0;
  243. }
  244. static void qd_hold(struct gfs2_quota_data *qd)
  245. {
  246. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  247. gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
  248. lockref_get(&qd->qd_lockref);
  249. }
  250. static void qd_put(struct gfs2_quota_data *qd)
  251. {
  252. if (lockref_put_or_lock(&qd->qd_lockref))
  253. return;
  254. qd->qd_lockref.count = 0;
  255. list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
  256. spin_unlock(&qd->qd_lockref.lock);
  257. }
  258. static int slot_get(struct gfs2_quota_data *qd)
  259. {
  260. struct gfs2_sbd *sdp = qd->qd_sbd;
  261. unsigned int bit;
  262. int error = 0;
  263. spin_lock(&sdp->sd_bitmap_lock);
  264. if (qd->qd_slot_count != 0)
  265. goto out;
  266. error = -ENOSPC;
  267. bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots);
  268. if (bit < sdp->sd_quota_slots) {
  269. set_bit(bit, sdp->sd_quota_bitmap);
  270. qd->qd_slot = bit;
  271. out:
  272. qd->qd_slot_count++;
  273. }
  274. spin_unlock(&sdp->sd_bitmap_lock);
  275. return error;
  276. }
  277. static void slot_hold(struct gfs2_quota_data *qd)
  278. {
  279. struct gfs2_sbd *sdp = qd->qd_sbd;
  280. spin_lock(&sdp->sd_bitmap_lock);
  281. gfs2_assert(sdp, qd->qd_slot_count);
  282. qd->qd_slot_count++;
  283. spin_unlock(&sdp->sd_bitmap_lock);
  284. }
  285. static void slot_put(struct gfs2_quota_data *qd)
  286. {
  287. struct gfs2_sbd *sdp = qd->qd_sbd;
  288. spin_lock(&sdp->sd_bitmap_lock);
  289. gfs2_assert(sdp, qd->qd_slot_count);
  290. if (!--qd->qd_slot_count) {
  291. BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
  292. qd->qd_slot = -1;
  293. }
  294. spin_unlock(&sdp->sd_bitmap_lock);
  295. }
  296. static int bh_get(struct gfs2_quota_data *qd)
  297. {
  298. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  299. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  300. unsigned int block, offset;
  301. struct buffer_head *bh;
  302. int error;
  303. struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
  304. mutex_lock(&sdp->sd_quota_mutex);
  305. if (qd->qd_bh_count++) {
  306. mutex_unlock(&sdp->sd_quota_mutex);
  307. return 0;
  308. }
  309. block = qd->qd_slot / sdp->sd_qc_per_block;
  310. offset = qd->qd_slot % sdp->sd_qc_per_block;
  311. bh_map.b_size = 1 << ip->i_inode.i_blkbits;
  312. error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
  313. if (error)
  314. goto fail;
  315. error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
  316. if (error)
  317. goto fail;
  318. error = -EIO;
  319. if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
  320. goto fail_brelse;
  321. qd->qd_bh = bh;
  322. qd->qd_bh_qc = (struct gfs2_quota_change *)
  323. (bh->b_data + sizeof(struct gfs2_meta_header) +
  324. offset * sizeof(struct gfs2_quota_change));
  325. mutex_unlock(&sdp->sd_quota_mutex);
  326. return 0;
  327. fail_brelse:
  328. brelse(bh);
  329. fail:
  330. qd->qd_bh_count--;
  331. mutex_unlock(&sdp->sd_quota_mutex);
  332. return error;
  333. }
  334. static void bh_put(struct gfs2_quota_data *qd)
  335. {
  336. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  337. mutex_lock(&sdp->sd_quota_mutex);
  338. gfs2_assert(sdp, qd->qd_bh_count);
  339. if (!--qd->qd_bh_count) {
  340. brelse(qd->qd_bh);
  341. qd->qd_bh = NULL;
  342. qd->qd_bh_qc = NULL;
  343. }
  344. mutex_unlock(&sdp->sd_quota_mutex);
  345. }
  346. static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
  347. u64 *sync_gen)
  348. {
  349. if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
  350. !test_bit(QDF_CHANGE, &qd->qd_flags) ||
  351. (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
  352. return 0;
  353. if (!lockref_get_not_dead(&qd->qd_lockref))
  354. return 0;
  355. list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
  356. set_bit(QDF_LOCKED, &qd->qd_flags);
  357. qd->qd_change_sync = qd->qd_change;
  358. slot_hold(qd);
  359. return 1;
  360. }
  361. static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
  362. {
  363. struct gfs2_quota_data *qd = NULL;
  364. int error;
  365. int found = 0;
  366. *qdp = NULL;
  367. if (sdp->sd_vfs->s_flags & MS_RDONLY)
  368. return 0;
  369. spin_lock(&qd_lock);
  370. list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
  371. found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen);
  372. if (found)
  373. break;
  374. }
  375. if (!found)
  376. qd = NULL;
  377. spin_unlock(&qd_lock);
  378. if (qd) {
  379. gfs2_assert_warn(sdp, qd->qd_change_sync);
  380. error = bh_get(qd);
  381. if (error) {
  382. clear_bit(QDF_LOCKED, &qd->qd_flags);
  383. slot_put(qd);
  384. qd_put(qd);
  385. return error;
  386. }
  387. }
  388. *qdp = qd;
  389. return 0;
  390. }
  391. static void qd_unlock(struct gfs2_quota_data *qd)
  392. {
  393. gfs2_assert_warn(qd->qd_gl->gl_sbd,
  394. test_bit(QDF_LOCKED, &qd->qd_flags));
  395. clear_bit(QDF_LOCKED, &qd->qd_flags);
  396. bh_put(qd);
  397. slot_put(qd);
  398. qd_put(qd);
  399. }
  400. static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
  401. struct gfs2_quota_data **qdp)
  402. {
  403. int error;
  404. error = qd_get(sdp, qid, qdp);
  405. if (error)
  406. return error;
  407. error = slot_get(*qdp);
  408. if (error)
  409. goto fail;
  410. error = bh_get(*qdp);
  411. if (error)
  412. goto fail_slot;
  413. return 0;
  414. fail_slot:
  415. slot_put(*qdp);
  416. fail:
  417. qd_put(*qdp);
  418. return error;
  419. }
  420. static void qdsb_put(struct gfs2_quota_data *qd)
  421. {
  422. bh_put(qd);
  423. slot_put(qd);
  424. qd_put(qd);
  425. }
  426. int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
  427. {
  428. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  429. struct gfs2_quota_data **qd;
  430. int error;
  431. if (ip->i_res == NULL) {
  432. error = gfs2_rs_alloc(ip);
  433. if (error)
  434. return error;
  435. }
  436. qd = ip->i_res->rs_qa_qd;
  437. if (gfs2_assert_warn(sdp, !ip->i_res->rs_qa_qd_num) ||
  438. gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
  439. return -EIO;
  440. if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
  441. return 0;
  442. error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
  443. if (error)
  444. goto out;
  445. ip->i_res->rs_qa_qd_num++;
  446. qd++;
  447. error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
  448. if (error)
  449. goto out;
  450. ip->i_res->rs_qa_qd_num++;
  451. qd++;
  452. if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
  453. !uid_eq(uid, ip->i_inode.i_uid)) {
  454. error = qdsb_get(sdp, make_kqid_uid(uid), qd);
  455. if (error)
  456. goto out;
  457. ip->i_res->rs_qa_qd_num++;
  458. qd++;
  459. }
  460. if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
  461. !gid_eq(gid, ip->i_inode.i_gid)) {
  462. error = qdsb_get(sdp, make_kqid_gid(gid), qd);
  463. if (error)
  464. goto out;
  465. ip->i_res->rs_qa_qd_num++;
  466. qd++;
  467. }
  468. out:
  469. if (error)
  470. gfs2_quota_unhold(ip);
  471. return error;
  472. }
  473. void gfs2_quota_unhold(struct gfs2_inode *ip)
  474. {
  475. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  476. unsigned int x;
  477. if (ip->i_res == NULL)
  478. return;
  479. gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
  480. for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
  481. qdsb_put(ip->i_res->rs_qa_qd[x]);
  482. ip->i_res->rs_qa_qd[x] = NULL;
  483. }
  484. ip->i_res->rs_qa_qd_num = 0;
  485. }
  486. static int sort_qd(const void *a, const void *b)
  487. {
  488. const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
  489. const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
  490. if (qid_lt(qd_a->qd_id, qd_b->qd_id))
  491. return -1;
  492. if (qid_lt(qd_b->qd_id, qd_a->qd_id))
  493. return 1;
  494. return 0;
  495. }
  496. static void do_qc(struct gfs2_quota_data *qd, s64 change)
  497. {
  498. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  499. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  500. struct gfs2_quota_change *qc = qd->qd_bh_qc;
  501. s64 x;
  502. mutex_lock(&sdp->sd_quota_mutex);
  503. gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
  504. if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
  505. qc->qc_change = 0;
  506. qc->qc_flags = 0;
  507. if (qd->qd_id.type == USRQUOTA)
  508. qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
  509. qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
  510. }
  511. x = be64_to_cpu(qc->qc_change) + change;
  512. qc->qc_change = cpu_to_be64(x);
  513. spin_lock(&qd_lock);
  514. qd->qd_change = x;
  515. spin_unlock(&qd_lock);
  516. if (!x) {
  517. gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
  518. clear_bit(QDF_CHANGE, &qd->qd_flags);
  519. qc->qc_flags = 0;
  520. qc->qc_id = 0;
  521. slot_put(qd);
  522. qd_put(qd);
  523. } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
  524. qd_hold(qd);
  525. slot_hold(qd);
  526. }
  527. mutex_unlock(&sdp->sd_quota_mutex);
  528. }
  529. /**
  530. * gfs2_adjust_quota - adjust record of current block usage
  531. * @ip: The quota inode
  532. * @loc: Offset of the entry in the quota file
  533. * @change: The amount of usage change to record
  534. * @qd: The quota data
  535. * @fdq: The updated limits to record
  536. *
  537. * This function was mostly borrowed from gfs2_block_truncate_page which was
  538. * in turn mostly borrowed from ext3
  539. *
  540. * Returns: 0 or -ve on error
  541. */
  542. static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
  543. s64 change, struct gfs2_quota_data *qd,
  544. struct fs_disk_quota *fdq)
  545. {
  546. struct inode *inode = &ip->i_inode;
  547. struct gfs2_sbd *sdp = GFS2_SB(inode);
  548. struct address_space *mapping = inode->i_mapping;
  549. unsigned long index = loc >> PAGE_CACHE_SHIFT;
  550. unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
  551. unsigned blocksize, iblock, pos;
  552. struct buffer_head *bh;
  553. struct page *page;
  554. void *kaddr, *ptr;
  555. struct gfs2_quota q;
  556. int err, nbytes;
  557. u64 size;
  558. if (gfs2_is_stuffed(ip)) {
  559. err = gfs2_unstuff_dinode(ip, NULL);
  560. if (err)
  561. return err;
  562. }
  563. memset(&q, 0, sizeof(struct gfs2_quota));
  564. err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
  565. if (err < 0)
  566. return err;
  567. err = -EIO;
  568. be64_add_cpu(&q.qu_value, change);
  569. qd->qd_qb.qb_value = q.qu_value;
  570. if (fdq) {
  571. if (fdq->d_fieldmask & FS_DQ_BSOFT) {
  572. q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
  573. qd->qd_qb.qb_warn = q.qu_warn;
  574. }
  575. if (fdq->d_fieldmask & FS_DQ_BHARD) {
  576. q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
  577. qd->qd_qb.qb_limit = q.qu_limit;
  578. }
  579. if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
  580. q.qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
  581. qd->qd_qb.qb_value = q.qu_value;
  582. }
  583. }
  584. /* Write the quota into the quota file on disk */
  585. ptr = &q;
  586. nbytes = sizeof(struct gfs2_quota);
  587. get_a_page:
  588. page = find_or_create_page(mapping, index, GFP_NOFS);
  589. if (!page)
  590. return -ENOMEM;
  591. blocksize = inode->i_sb->s_blocksize;
  592. iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
  593. if (!page_has_buffers(page))
  594. create_empty_buffers(page, blocksize, 0);
  595. bh = page_buffers(page);
  596. pos = blocksize;
  597. while (offset >= pos) {
  598. bh = bh->b_this_page;
  599. iblock++;
  600. pos += blocksize;
  601. }
  602. if (!buffer_mapped(bh)) {
  603. gfs2_block_map(inode, iblock, bh, 1);
  604. if (!buffer_mapped(bh))
  605. goto unlock_out;
  606. /* If it's a newly allocated disk block for quota, zero it */
  607. if (buffer_new(bh))
  608. zero_user(page, pos - blocksize, bh->b_size);
  609. }
  610. if (PageUptodate(page))
  611. set_buffer_uptodate(bh);
  612. if (!buffer_uptodate(bh)) {
  613. ll_rw_block(READ | REQ_META, 1, &bh);
  614. wait_on_buffer(bh);
  615. if (!buffer_uptodate(bh))
  616. goto unlock_out;
  617. }
  618. gfs2_trans_add_data(ip->i_gl, bh);
  619. kaddr = kmap_atomic(page);
  620. if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
  621. nbytes = PAGE_CACHE_SIZE - offset;
  622. memcpy(kaddr + offset, ptr, nbytes);
  623. flush_dcache_page(page);
  624. kunmap_atomic(kaddr);
  625. unlock_page(page);
  626. page_cache_release(page);
  627. /* If quota straddles page boundary, we need to update the rest of the
  628. * quota at the beginning of the next page */
  629. if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) {
  630. ptr = ptr + nbytes;
  631. nbytes = sizeof(struct gfs2_quota) - nbytes;
  632. offset = 0;
  633. index++;
  634. goto get_a_page;
  635. }
  636. size = loc + sizeof(struct gfs2_quota);
  637. if (size > inode->i_size)
  638. i_size_write(inode, size);
  639. inode->i_mtime = inode->i_atime = CURRENT_TIME;
  640. mark_inode_dirty(inode);
  641. return 0;
  642. unlock_out:
  643. unlock_page(page);
  644. page_cache_release(page);
  645. return err;
  646. }
  647. static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
  648. {
  649. struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
  650. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  651. struct gfs2_alloc_parms ap = { .aflags = 0, };
  652. unsigned int data_blocks, ind_blocks;
  653. struct gfs2_holder *ghs, i_gh;
  654. unsigned int qx, x;
  655. struct gfs2_quota_data *qd;
  656. unsigned reserved;
  657. loff_t offset;
  658. unsigned int nalloc = 0, blocks;
  659. int error;
  660. error = gfs2_rs_alloc(ip);
  661. if (error)
  662. return error;
  663. gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
  664. &data_blocks, &ind_blocks);
  665. ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
  666. if (!ghs)
  667. return -ENOMEM;
  668. sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
  669. mutex_lock(&ip->i_inode.i_mutex);
  670. for (qx = 0; qx < num_qd; qx++) {
  671. error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
  672. GL_NOCACHE, &ghs[qx]);
  673. if (error)
  674. goto out;
  675. }
  676. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
  677. if (error)
  678. goto out;
  679. for (x = 0; x < num_qd; x++) {
  680. offset = qd2offset(qda[x]);
  681. if (gfs2_write_alloc_required(ip, offset,
  682. sizeof(struct gfs2_quota)))
  683. nalloc++;
  684. }
  685. /*
  686. * 1 blk for unstuffing inode if stuffed. We add this extra
  687. * block to the reservation unconditionally. If the inode
  688. * doesn't need unstuffing, the block will be released to the
  689. * rgrp since it won't be allocated during the transaction
  690. */
  691. /* +3 in the end for unstuffing block, inode size update block
  692. * and another block in case quota straddles page boundary and
  693. * two blocks need to be updated instead of 1 */
  694. blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
  695. reserved = 1 + (nalloc * (data_blocks + ind_blocks));
  696. ap.target = reserved;
  697. error = gfs2_inplace_reserve(ip, &ap);
  698. if (error)
  699. goto out_alloc;
  700. if (nalloc)
  701. blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
  702. error = gfs2_trans_begin(sdp, blocks, 0);
  703. if (error)
  704. goto out_ipres;
  705. for (x = 0; x < num_qd; x++) {
  706. qd = qda[x];
  707. offset = qd2offset(qd);
  708. error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
  709. if (error)
  710. goto out_end_trans;
  711. do_qc(qd, -qd->qd_change_sync);
  712. set_bit(QDF_REFRESH, &qd->qd_flags);
  713. }
  714. error = 0;
  715. out_end_trans:
  716. gfs2_trans_end(sdp);
  717. out_ipres:
  718. gfs2_inplace_release(ip);
  719. out_alloc:
  720. gfs2_glock_dq_uninit(&i_gh);
  721. out:
  722. while (qx--)
  723. gfs2_glock_dq_uninit(&ghs[qx]);
  724. mutex_unlock(&ip->i_inode.i_mutex);
  725. kfree(ghs);
  726. gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
  727. return error;
  728. }
  729. static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
  730. {
  731. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  732. struct gfs2_quota q;
  733. struct gfs2_quota_lvb *qlvb;
  734. loff_t pos;
  735. int error;
  736. memset(&q, 0, sizeof(struct gfs2_quota));
  737. pos = qd2offset(qd);
  738. error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
  739. if (error < 0)
  740. return error;
  741. qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
  742. qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
  743. qlvb->__pad = 0;
  744. qlvb->qb_limit = q.qu_limit;
  745. qlvb->qb_warn = q.qu_warn;
  746. qlvb->qb_value = q.qu_value;
  747. qd->qd_qb = *qlvb;
  748. return 0;
  749. }
  750. static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
  751. struct gfs2_holder *q_gh)
  752. {
  753. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  754. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  755. struct gfs2_holder i_gh;
  756. int error;
  757. restart:
  758. error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
  759. if (error)
  760. return error;
  761. qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
  762. if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
  763. gfs2_glock_dq_uninit(q_gh);
  764. error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
  765. GL_NOCACHE, q_gh);
  766. if (error)
  767. return error;
  768. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
  769. if (error)
  770. goto fail;
  771. error = update_qd(sdp, qd);
  772. if (error)
  773. goto fail_gunlock;
  774. gfs2_glock_dq_uninit(&i_gh);
  775. gfs2_glock_dq_uninit(q_gh);
  776. force_refresh = 0;
  777. goto restart;
  778. }
  779. return 0;
  780. fail_gunlock:
  781. gfs2_glock_dq_uninit(&i_gh);
  782. fail:
  783. gfs2_glock_dq_uninit(q_gh);
  784. return error;
  785. }
  786. int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
  787. {
  788. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  789. struct gfs2_quota_data *qd;
  790. unsigned int x;
  791. int error = 0;
  792. error = gfs2_quota_hold(ip, uid, gid);
  793. if (error)
  794. return error;
  795. if (capable(CAP_SYS_RESOURCE) ||
  796. sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
  797. return 0;
  798. sort(ip->i_res->rs_qa_qd, ip->i_res->rs_qa_qd_num,
  799. sizeof(struct gfs2_quota_data *), sort_qd, NULL);
  800. for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
  801. int force = NO_FORCE;
  802. qd = ip->i_res->rs_qa_qd[x];
  803. if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
  804. force = FORCE;
  805. error = do_glock(qd, force, &ip->i_res->rs_qa_qd_ghs[x]);
  806. if (error)
  807. break;
  808. }
  809. if (!error)
  810. set_bit(GIF_QD_LOCKED, &ip->i_flags);
  811. else {
  812. while (x--)
  813. gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
  814. gfs2_quota_unhold(ip);
  815. }
  816. return error;
  817. }
  818. static int need_sync(struct gfs2_quota_data *qd)
  819. {
  820. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  821. struct gfs2_tune *gt = &sdp->sd_tune;
  822. s64 value;
  823. unsigned int num, den;
  824. int do_sync = 1;
  825. if (!qd->qd_qb.qb_limit)
  826. return 0;
  827. spin_lock(&qd_lock);
  828. value = qd->qd_change;
  829. spin_unlock(&qd_lock);
  830. spin_lock(&gt->gt_spin);
  831. num = gt->gt_quota_scale_num;
  832. den = gt->gt_quota_scale_den;
  833. spin_unlock(&gt->gt_spin);
  834. if (value < 0)
  835. do_sync = 0;
  836. else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
  837. (s64)be64_to_cpu(qd->qd_qb.qb_limit))
  838. do_sync = 0;
  839. else {
  840. value *= gfs2_jindex_size(sdp) * num;
  841. value = div_s64(value, den);
  842. value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
  843. if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
  844. do_sync = 0;
  845. }
  846. return do_sync;
  847. }
  848. void gfs2_quota_unlock(struct gfs2_inode *ip)
  849. {
  850. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  851. struct gfs2_quota_data *qda[4];
  852. unsigned int count = 0;
  853. unsigned int x;
  854. int found;
  855. if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
  856. goto out;
  857. for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
  858. struct gfs2_quota_data *qd;
  859. int sync;
  860. qd = ip->i_res->rs_qa_qd[x];
  861. sync = need_sync(qd);
  862. gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
  863. if (!sync)
  864. continue;
  865. spin_lock(&qd_lock);
  866. found = qd_check_sync(sdp, qd, NULL);
  867. spin_unlock(&qd_lock);
  868. if (!found)
  869. continue;
  870. gfs2_assert_warn(sdp, qd->qd_change_sync);
  871. if (bh_get(qd)) {
  872. clear_bit(QDF_LOCKED, &qd->qd_flags);
  873. slot_put(qd);
  874. qd_put(qd);
  875. continue;
  876. }
  877. qda[count++] = qd;
  878. }
  879. if (count) {
  880. do_sync(count, qda);
  881. for (x = 0; x < count; x++)
  882. qd_unlock(qda[x]);
  883. }
  884. out:
  885. gfs2_quota_unhold(ip);
  886. }
  887. #define MAX_LINE 256
  888. static int print_message(struct gfs2_quota_data *qd, char *type)
  889. {
  890. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  891. printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\n",
  892. sdp->sd_fsname, type,
  893. (qd->qd_id.type == USRQUOTA) ? "user" : "group",
  894. from_kqid(&init_user_ns, qd->qd_id));
  895. return 0;
  896. }
  897. int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
  898. {
  899. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  900. struct gfs2_quota_data *qd;
  901. s64 value;
  902. unsigned int x;
  903. int error = 0;
  904. if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
  905. return 0;
  906. if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
  907. return 0;
  908. for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
  909. qd = ip->i_res->rs_qa_qd[x];
  910. if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
  911. qid_eq(qd->qd_id, make_kqid_gid(gid))))
  912. continue;
  913. value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
  914. spin_lock(&qd_lock);
  915. value += qd->qd_change;
  916. spin_unlock(&qd_lock);
  917. if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
  918. print_message(qd, "exceeded");
  919. quota_send_warning(qd->qd_id,
  920. sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
  921. error = -EDQUOT;
  922. break;
  923. } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
  924. (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
  925. time_after_eq(jiffies, qd->qd_last_warn +
  926. gfs2_tune_get(sdp,
  927. gt_quota_warn_period) * HZ)) {
  928. quota_send_warning(qd->qd_id,
  929. sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
  930. error = print_message(qd, "warning");
  931. qd->qd_last_warn = jiffies;
  932. }
  933. }
  934. return error;
  935. }
  936. void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
  937. kuid_t uid, kgid_t gid)
  938. {
  939. struct gfs2_quota_data *qd;
  940. unsigned int x;
  941. if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
  942. return;
  943. if (ip->i_diskflags & GFS2_DIF_SYSTEM)
  944. return;
  945. for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
  946. qd = ip->i_res->rs_qa_qd[x];
  947. if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
  948. qid_eq(qd->qd_id, make_kqid_gid(gid))) {
  949. do_qc(qd, change);
  950. }
  951. }
  952. }
  953. int gfs2_quota_sync(struct super_block *sb, int type)
  954. {
  955. struct gfs2_sbd *sdp = sb->s_fs_info;
  956. struct gfs2_quota_data **qda;
  957. unsigned int max_qd = PAGE_SIZE/sizeof(struct gfs2_holder);
  958. unsigned int num_qd;
  959. unsigned int x;
  960. int error = 0;
  961. qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
  962. if (!qda)
  963. return -ENOMEM;
  964. mutex_lock(&sdp->sd_quota_sync_mutex);
  965. sdp->sd_quota_sync_gen++;
  966. do {
  967. num_qd = 0;
  968. for (;;) {
  969. error = qd_fish(sdp, qda + num_qd);
  970. if (error || !qda[num_qd])
  971. break;
  972. if (++num_qd == max_qd)
  973. break;
  974. }
  975. if (num_qd) {
  976. if (!error)
  977. error = do_sync(num_qd, qda);
  978. if (!error)
  979. for (x = 0; x < num_qd; x++)
  980. qda[x]->qd_sync_gen =
  981. sdp->sd_quota_sync_gen;
  982. for (x = 0; x < num_qd; x++)
  983. qd_unlock(qda[x]);
  984. }
  985. } while (!error && num_qd == max_qd);
  986. mutex_unlock(&sdp->sd_quota_sync_mutex);
  987. kfree(qda);
  988. return error;
  989. }
  990. int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
  991. {
  992. struct gfs2_quota_data *qd;
  993. struct gfs2_holder q_gh;
  994. int error;
  995. error = qd_get(sdp, qid, &qd);
  996. if (error)
  997. return error;
  998. error = do_glock(qd, FORCE, &q_gh);
  999. if (!error)
  1000. gfs2_glock_dq_uninit(&q_gh);
  1001. qd_put(qd);
  1002. return error;
  1003. }
  1004. int gfs2_quota_init(struct gfs2_sbd *sdp)
  1005. {
  1006. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  1007. u64 size = i_size_read(sdp->sd_qc_inode);
  1008. unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
  1009. unsigned int x, slot = 0;
  1010. unsigned int found = 0;
  1011. unsigned int hash;
  1012. unsigned int bm_size;
  1013. u64 dblock;
  1014. u32 extlen = 0;
  1015. int error;
  1016. if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
  1017. return -EIO;
  1018. sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
  1019. bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
  1020. bm_size *= sizeof(unsigned long);
  1021. error = -ENOMEM;
  1022. sdp->sd_quota_bitmap = kmalloc(bm_size, GFP_NOFS|__GFP_NOWARN);
  1023. if (sdp->sd_quota_bitmap == NULL)
  1024. sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS, PAGE_KERNEL);
  1025. if (!sdp->sd_quota_bitmap)
  1026. return error;
  1027. memset(sdp->sd_quota_bitmap, 0, bm_size);
  1028. for (x = 0; x < blocks; x++) {
  1029. struct buffer_head *bh;
  1030. const struct gfs2_quota_change *qc;
  1031. unsigned int y;
  1032. if (!extlen) {
  1033. int new = 0;
  1034. error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
  1035. if (error)
  1036. goto fail;
  1037. }
  1038. error = -EIO;
  1039. bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
  1040. if (!bh)
  1041. goto fail;
  1042. if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
  1043. brelse(bh);
  1044. goto fail;
  1045. }
  1046. qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
  1047. for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
  1048. y++, slot++) {
  1049. struct gfs2_quota_data *qd;
  1050. s64 qc_change = be64_to_cpu(qc->qc_change);
  1051. u32 qc_flags = be32_to_cpu(qc->qc_flags);
  1052. enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
  1053. USRQUOTA : GRPQUOTA;
  1054. struct kqid qc_id = make_kqid(&init_user_ns, qtype,
  1055. be32_to_cpu(qc->qc_id));
  1056. qc++;
  1057. if (!qc_change)
  1058. continue;
  1059. hash = gfs2_qd_hash(sdp, qc_id);
  1060. qd = qd_alloc(hash, sdp, qc_id);
  1061. if (qd == NULL) {
  1062. brelse(bh);
  1063. goto fail;
  1064. }
  1065. set_bit(QDF_CHANGE, &qd->qd_flags);
  1066. qd->qd_change = qc_change;
  1067. qd->qd_slot = slot;
  1068. qd->qd_slot_count = 1;
  1069. spin_lock(&qd_lock);
  1070. BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
  1071. list_add(&qd->qd_list, &sdp->sd_quota_list);
  1072. atomic_inc(&sdp->sd_quota_count);
  1073. spin_unlock(&qd_lock);
  1074. spin_lock_bucket(hash);
  1075. hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
  1076. spin_unlock_bucket(hash);
  1077. found++;
  1078. }
  1079. brelse(bh);
  1080. dblock++;
  1081. extlen--;
  1082. }
  1083. if (found)
  1084. fs_info(sdp, "found %u quota changes\n", found);
  1085. return 0;
  1086. fail:
  1087. gfs2_quota_cleanup(sdp);
  1088. return error;
  1089. }
  1090. void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
  1091. {
  1092. struct list_head *head = &sdp->sd_quota_list;
  1093. struct gfs2_quota_data *qd;
  1094. spin_lock(&qd_lock);
  1095. while (!list_empty(head)) {
  1096. qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
  1097. list_del(&qd->qd_list);
  1098. /* Also remove if this qd exists in the reclaim list */
  1099. list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
  1100. atomic_dec(&sdp->sd_quota_count);
  1101. spin_unlock(&qd_lock);
  1102. spin_lock_bucket(qd->qd_hash);
  1103. hlist_bl_del_rcu(&qd->qd_hlist);
  1104. spin_unlock_bucket(qd->qd_hash);
  1105. gfs2_assert_warn(sdp, !qd->qd_change);
  1106. gfs2_assert_warn(sdp, !qd->qd_slot_count);
  1107. gfs2_assert_warn(sdp, !qd->qd_bh_count);
  1108. gfs2_glock_put(qd->qd_gl);
  1109. call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
  1110. spin_lock(&qd_lock);
  1111. }
  1112. spin_unlock(&qd_lock);
  1113. gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
  1114. if (sdp->sd_quota_bitmap) {
  1115. if (is_vmalloc_addr(sdp->sd_quota_bitmap))
  1116. vfree(sdp->sd_quota_bitmap);
  1117. else
  1118. kfree(sdp->sd_quota_bitmap);
  1119. sdp->sd_quota_bitmap = NULL;
  1120. }
  1121. }
  1122. static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
  1123. {
  1124. if (error == 0 || error == -EROFS)
  1125. return;
  1126. if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
  1127. fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
  1128. }
  1129. static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
  1130. int (*fxn)(struct super_block *sb, int type),
  1131. unsigned long t, unsigned long *timeo,
  1132. unsigned int *new_timeo)
  1133. {
  1134. if (t >= *timeo) {
  1135. int error = fxn(sdp->sd_vfs, 0);
  1136. quotad_error(sdp, msg, error);
  1137. *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
  1138. } else {
  1139. *timeo -= t;
  1140. }
  1141. }
  1142. static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
  1143. {
  1144. struct gfs2_inode *ip;
  1145. while(1) {
  1146. ip = NULL;
  1147. spin_lock(&sdp->sd_trunc_lock);
  1148. if (!list_empty(&sdp->sd_trunc_list)) {
  1149. ip = list_entry(sdp->sd_trunc_list.next,
  1150. struct gfs2_inode, i_trunc_list);
  1151. list_del_init(&ip->i_trunc_list);
  1152. }
  1153. spin_unlock(&sdp->sd_trunc_lock);
  1154. if (ip == NULL)
  1155. return;
  1156. gfs2_glock_finish_truncate(ip);
  1157. }
  1158. }
  1159. void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
  1160. if (!sdp->sd_statfs_force_sync) {
  1161. sdp->sd_statfs_force_sync = 1;
  1162. wake_up(&sdp->sd_quota_wait);
  1163. }
  1164. }
  1165. /**
  1166. * gfs2_quotad - Write cached quota changes into the quota file
  1167. * @sdp: Pointer to GFS2 superblock
  1168. *
  1169. */
  1170. int gfs2_quotad(void *data)
  1171. {
  1172. struct gfs2_sbd *sdp = data;
  1173. struct gfs2_tune *tune = &sdp->sd_tune;
  1174. unsigned long statfs_timeo = 0;
  1175. unsigned long quotad_timeo = 0;
  1176. unsigned long t = 0;
  1177. DEFINE_WAIT(wait);
  1178. int empty;
  1179. while (!kthread_should_stop()) {
  1180. /* Update the master statfs file */
  1181. if (sdp->sd_statfs_force_sync) {
  1182. int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
  1183. quotad_error(sdp, "statfs", error);
  1184. statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
  1185. }
  1186. else
  1187. quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
  1188. &statfs_timeo,
  1189. &tune->gt_statfs_quantum);
  1190. /* Update quota file */
  1191. quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
  1192. &quotad_timeo, &tune->gt_quota_quantum);
  1193. /* Check for & recover partially truncated inodes */
  1194. quotad_check_trunc_list(sdp);
  1195. try_to_freeze();
  1196. t = min(quotad_timeo, statfs_timeo);
  1197. prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
  1198. spin_lock(&sdp->sd_trunc_lock);
  1199. empty = list_empty(&sdp->sd_trunc_list);
  1200. spin_unlock(&sdp->sd_trunc_lock);
  1201. if (empty && !sdp->sd_statfs_force_sync)
  1202. t -= schedule_timeout(t);
  1203. else
  1204. t = 0;
  1205. finish_wait(&sdp->sd_quota_wait, &wait);
  1206. }
  1207. return 0;
  1208. }
  1209. static int gfs2_quota_get_xstate(struct super_block *sb,
  1210. struct fs_quota_stat *fqs)
  1211. {
  1212. struct gfs2_sbd *sdp = sb->s_fs_info;
  1213. memset(fqs, 0, sizeof(struct fs_quota_stat));
  1214. fqs->qs_version = FS_QSTAT_VERSION;
  1215. switch (sdp->sd_args.ar_quota) {
  1216. case GFS2_QUOTA_ON:
  1217. fqs->qs_flags |= (FS_QUOTA_UDQ_ENFD | FS_QUOTA_GDQ_ENFD);
  1218. /*FALLTHRU*/
  1219. case GFS2_QUOTA_ACCOUNT:
  1220. fqs->qs_flags |= (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT);
  1221. break;
  1222. case GFS2_QUOTA_OFF:
  1223. break;
  1224. }
  1225. if (sdp->sd_quota_inode) {
  1226. fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
  1227. fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
  1228. }
  1229. fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
  1230. fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
  1231. fqs->qs_incoredqs = list_lru_count(&gfs2_qd_lru);
  1232. return 0;
  1233. }
  1234. static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
  1235. struct fs_disk_quota *fdq)
  1236. {
  1237. struct gfs2_sbd *sdp = sb->s_fs_info;
  1238. struct gfs2_quota_lvb *qlvb;
  1239. struct gfs2_quota_data *qd;
  1240. struct gfs2_holder q_gh;
  1241. int error;
  1242. memset(fdq, 0, sizeof(struct fs_disk_quota));
  1243. if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
  1244. return -ESRCH; /* Crazy XFS error code */
  1245. if ((qid.type != USRQUOTA) &&
  1246. (qid.type != GRPQUOTA))
  1247. return -EINVAL;
  1248. error = qd_get(sdp, qid, &qd);
  1249. if (error)
  1250. return error;
  1251. error = do_glock(qd, FORCE, &q_gh);
  1252. if (error)
  1253. goto out;
  1254. qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
  1255. fdq->d_version = FS_DQUOT_VERSION;
  1256. fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
  1257. fdq->d_id = from_kqid_munged(current_user_ns(), qid);
  1258. fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
  1259. fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
  1260. fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
  1261. gfs2_glock_dq_uninit(&q_gh);
  1262. out:
  1263. qd_put(qd);
  1264. return error;
  1265. }
  1266. /* GFS2 only supports a subset of the XFS fields */
  1267. #define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
  1268. static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
  1269. struct fs_disk_quota *fdq)
  1270. {
  1271. struct gfs2_sbd *sdp = sb->s_fs_info;
  1272. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  1273. struct gfs2_quota_data *qd;
  1274. struct gfs2_holder q_gh, i_gh;
  1275. unsigned int data_blocks, ind_blocks;
  1276. unsigned int blocks = 0;
  1277. int alloc_required;
  1278. loff_t offset;
  1279. int error;
  1280. if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
  1281. return -ESRCH; /* Crazy XFS error code */
  1282. if ((qid.type != USRQUOTA) &&
  1283. (qid.type != GRPQUOTA))
  1284. return -EINVAL;
  1285. if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
  1286. return -EINVAL;
  1287. error = qd_get(sdp, qid, &qd);
  1288. if (error)
  1289. return error;
  1290. error = gfs2_rs_alloc(ip);
  1291. if (error)
  1292. goto out_put;
  1293. mutex_lock(&ip->i_inode.i_mutex);
  1294. error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
  1295. if (error)
  1296. goto out_unlockput;
  1297. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
  1298. if (error)
  1299. goto out_q;
  1300. /* Check for existing entry, if none then alloc new blocks */
  1301. error = update_qd(sdp, qd);
  1302. if (error)
  1303. goto out_i;
  1304. /* If nothing has changed, this is a no-op */
  1305. if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
  1306. ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
  1307. fdq->d_fieldmask ^= FS_DQ_BSOFT;
  1308. if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
  1309. ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
  1310. fdq->d_fieldmask ^= FS_DQ_BHARD;
  1311. if ((fdq->d_fieldmask & FS_DQ_BCOUNT) &&
  1312. ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
  1313. fdq->d_fieldmask ^= FS_DQ_BCOUNT;
  1314. if (fdq->d_fieldmask == 0)
  1315. goto out_i;
  1316. offset = qd2offset(qd);
  1317. alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
  1318. if (gfs2_is_stuffed(ip))
  1319. alloc_required = 1;
  1320. if (alloc_required) {
  1321. struct gfs2_alloc_parms ap = { .aflags = 0, };
  1322. gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
  1323. &data_blocks, &ind_blocks);
  1324. blocks = 1 + data_blocks + ind_blocks;
  1325. ap.target = blocks;
  1326. error = gfs2_inplace_reserve(ip, &ap);
  1327. if (error)
  1328. goto out_i;
  1329. blocks += gfs2_rg_blocks(ip, blocks);
  1330. }
  1331. /* Some quotas span block boundaries and can update two blocks,
  1332. adding an extra block to the transaction to handle such quotas */
  1333. error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
  1334. if (error)
  1335. goto out_release;
  1336. /* Apply changes */
  1337. error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
  1338. gfs2_trans_end(sdp);
  1339. out_release:
  1340. if (alloc_required)
  1341. gfs2_inplace_release(ip);
  1342. out_i:
  1343. gfs2_glock_dq_uninit(&i_gh);
  1344. out_q:
  1345. gfs2_glock_dq_uninit(&q_gh);
  1346. out_unlockput:
  1347. mutex_unlock(&ip->i_inode.i_mutex);
  1348. out_put:
  1349. qd_put(qd);
  1350. return error;
  1351. }
  1352. const struct quotactl_ops gfs2_quotactl_ops = {
  1353. .quota_sync = gfs2_quota_sync,
  1354. .get_xstate = gfs2_quota_get_xstate,
  1355. .get_dqblk = gfs2_get_dqblk,
  1356. .set_dqblk = gfs2_set_dqblk,
  1357. };
  1358. void __init gfs2_quota_hash_init(void)
  1359. {
  1360. unsigned i;
  1361. for(i = 0; i < GFS2_QD_HASH_SIZE; i++)
  1362. INIT_HLIST_BL_HEAD(&qd_hash_table[i]);
  1363. }