quota.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. /*
  10. * Quota change tags are associated with each transaction that allocates or
  11. * deallocates space. Those changes are accumulated locally to each node (in a
  12. * per-node file) and then are periodically synced to the quota file. This
  13. * avoids the bottleneck of constantly touching the quota file, but introduces
  14. * fuzziness in the current usage value of IDs that are being used on different
  15. * nodes in the cluster simultaneously. So, it is possible for a user on
  16. * multiple nodes to overrun their quota, but that overrun is controlable.
  17. * Since quota tags are part of transactions, there is no need for a quota check
  18. * program to be run on node crashes or anything like that.
  19. *
  20. * There are couple of knobs that let the administrator manage the quota
  21. * fuzziness. "quota_quantum" sets the maximum time a quota change can be
  22. * sitting on one node before being synced to the quota file. (The default is
  23. * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
  24. * of quota file syncs increases as the user moves closer to their limit. The
  25. * more frequent the syncs, the more accurate the quota enforcement, but that
  26. * means that there is more contention between the nodes for the quota file.
  27. * The default value is one. This sets the maximum theoretical quota overrun
  28. * (with infinite node with infinite bandwidth) to twice the user's limit. (In
  29. * practice, the maximum overrun you see should be much less.) A "quota_scale"
  30. * number greater than one makes quota syncs more frequent and reduces the
  31. * maximum overrun. Numbers less than one (but greater than zero) make quota
  32. * syncs less frequent.
  33. *
  34. * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
  35. * the quota file, so it is not being constantly read.
  36. */
  37. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  38. #include <linux/sched.h>
  39. #include <linux/slab.h>
  40. #include <linux/mm.h>
  41. #include <linux/spinlock.h>
  42. #include <linux/completion.h>
  43. #include <linux/buffer_head.h>
  44. #include <linux/sort.h>
  45. #include <linux/fs.h>
  46. #include <linux/bio.h>
  47. #include <linux/gfs2_ondisk.h>
  48. #include <linux/kthread.h>
  49. #include <linux/freezer.h>
  50. #include <linux/quota.h>
  51. #include <linux/dqblk_xfs.h>
  52. #include <linux/lockref.h>
  53. #include <linux/list_lru.h>
  54. #include <linux/rcupdate.h>
  55. #include <linux/rculist_bl.h>
  56. #include <linux/bit_spinlock.h>
  57. #include <linux/jhash.h>
  58. #include <linux/vmalloc.h>
  59. #include "gfs2.h"
  60. #include "incore.h"
  61. #include "bmap.h"
  62. #include "glock.h"
  63. #include "glops.h"
  64. #include "log.h"
  65. #include "meta_io.h"
  66. #include "quota.h"
  67. #include "rgrp.h"
  68. #include "super.h"
  69. #include "trans.h"
  70. #include "inode.h"
  71. #include "util.h"
  72. #define GFS2_QD_HASH_SHIFT 12
  73. #define GFS2_QD_HASH_SIZE (1 << GFS2_QD_HASH_SHIFT)
  74. #define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1)
  75. /* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
  76. /* -> sd_bitmap_lock */
  77. static DEFINE_SPINLOCK(qd_lock);
  78. struct list_lru gfs2_qd_lru;
  79. static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];
  80. static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
  81. const struct kqid qid)
  82. {
  83. unsigned int h;
  84. h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
  85. h = jhash(&qid, sizeof(struct kqid), h);
  86. return h & GFS2_QD_HASH_MASK;
  87. }
  88. static inline void spin_lock_bucket(unsigned int hash)
  89. {
  90. hlist_bl_lock(&qd_hash_table[hash]);
  91. }
  92. static inline void spin_unlock_bucket(unsigned int hash)
  93. {
  94. hlist_bl_unlock(&qd_hash_table[hash]);
  95. }
  96. static void gfs2_qd_dealloc(struct rcu_head *rcu)
  97. {
  98. struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
  99. kmem_cache_free(gfs2_quotad_cachep, qd);
  100. }
  101. static void gfs2_qd_dispose(struct list_head *list)
  102. {
  103. struct gfs2_quota_data *qd;
  104. struct gfs2_sbd *sdp;
  105. while (!list_empty(list)) {
  106. qd = list_entry(list->next, struct gfs2_quota_data, qd_lru);
  107. sdp = qd->qd_gl->gl_sbd;
  108. list_del(&qd->qd_lru);
  109. /* Free from the filesystem-specific list */
  110. spin_lock(&qd_lock);
  111. list_del(&qd->qd_list);
  112. spin_unlock(&qd_lock);
  113. spin_lock_bucket(qd->qd_hash);
  114. hlist_bl_del_rcu(&qd->qd_hlist);
  115. spin_unlock_bucket(qd->qd_hash);
  116. gfs2_assert_warn(sdp, !qd->qd_change);
  117. gfs2_assert_warn(sdp, !qd->qd_slot_count);
  118. gfs2_assert_warn(sdp, !qd->qd_bh_count);
  119. gfs2_glock_put(qd->qd_gl);
  120. atomic_dec(&sdp->sd_quota_count);
  121. /* Delete it from the common reclaim list */
  122. call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
  123. }
  124. }
  125. static enum lru_status gfs2_qd_isolate(struct list_head *item, spinlock_t *lock, void *arg)
  126. {
  127. struct list_head *dispose = arg;
  128. struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
  129. if (!spin_trylock(&qd->qd_lockref.lock))
  130. return LRU_SKIP;
  131. if (qd->qd_lockref.count == 0) {
  132. lockref_mark_dead(&qd->qd_lockref);
  133. list_move(&qd->qd_lru, dispose);
  134. }
  135. spin_unlock(&qd->qd_lockref.lock);
  136. return LRU_REMOVED;
  137. }
  138. static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
  139. struct shrink_control *sc)
  140. {
  141. LIST_HEAD(dispose);
  142. unsigned long freed;
  143. if (!(sc->gfp_mask & __GFP_FS))
  144. return SHRINK_STOP;
  145. freed = list_lru_walk_node(&gfs2_qd_lru, sc->nid, gfs2_qd_isolate,
  146. &dispose, &sc->nr_to_scan);
  147. gfs2_qd_dispose(&dispose);
  148. return freed;
  149. }
  150. static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
  151. struct shrink_control *sc)
  152. {
  153. return vfs_pressure_ratio(list_lru_count_node(&gfs2_qd_lru, sc->nid));
  154. }
  155. struct shrinker gfs2_qd_shrinker = {
  156. .count_objects = gfs2_qd_shrink_count,
  157. .scan_objects = gfs2_qd_shrink_scan,
  158. .seeks = DEFAULT_SEEKS,
  159. .flags = SHRINKER_NUMA_AWARE,
  160. };
  161. static u64 qd2index(struct gfs2_quota_data *qd)
  162. {
  163. struct kqid qid = qd->qd_id;
  164. return (2 * (u64)from_kqid(&init_user_ns, qid)) +
  165. ((qid.type == USRQUOTA) ? 0 : 1);
  166. }
  167. static u64 qd2offset(struct gfs2_quota_data *qd)
  168. {
  169. u64 offset;
  170. offset = qd2index(qd);
  171. offset *= sizeof(struct gfs2_quota);
  172. return offset;
  173. }
  174. static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
  175. {
  176. struct gfs2_quota_data *qd;
  177. int error;
  178. qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
  179. if (!qd)
  180. return NULL;
  181. qd->qd_sbd = sdp;
  182. qd->qd_lockref.count = 1;
  183. spin_lock_init(&qd->qd_lockref.lock);
  184. qd->qd_id = qid;
  185. qd->qd_slot = -1;
  186. INIT_LIST_HEAD(&qd->qd_lru);
  187. qd->qd_hash = hash;
  188. error = gfs2_glock_get(sdp, qd2index(qd),
  189. &gfs2_quota_glops, CREATE, &qd->qd_gl);
  190. if (error)
  191. goto fail;
  192. return qd;
  193. fail:
  194. kmem_cache_free(gfs2_quotad_cachep, qd);
  195. return NULL;
  196. }
  197. static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
  198. const struct gfs2_sbd *sdp,
  199. struct kqid qid)
  200. {
  201. struct gfs2_quota_data *qd;
  202. struct hlist_bl_node *h;
  203. hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
  204. if (!qid_eq(qd->qd_id, qid))
  205. continue;
  206. if (qd->qd_sbd != sdp)
  207. continue;
  208. if (lockref_get_not_dead(&qd->qd_lockref)) {
  209. list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
  210. return qd;
  211. }
  212. }
  213. return NULL;
  214. }
  215. static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
  216. struct gfs2_quota_data **qdp)
  217. {
  218. struct gfs2_quota_data *qd, *new_qd;
  219. unsigned int hash = gfs2_qd_hash(sdp, qid);
  220. rcu_read_lock();
  221. *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
  222. rcu_read_unlock();
  223. if (qd)
  224. return 0;
  225. new_qd = qd_alloc(hash, sdp, qid);
  226. if (!new_qd)
  227. return -ENOMEM;
  228. spin_lock(&qd_lock);
  229. spin_lock_bucket(hash);
  230. *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
  231. if (qd == NULL) {
  232. *qdp = new_qd;
  233. list_add(&new_qd->qd_list, &sdp->sd_quota_list);
  234. hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
  235. atomic_inc(&sdp->sd_quota_count);
  236. }
  237. spin_unlock_bucket(hash);
  238. spin_unlock(&qd_lock);
  239. if (qd) {
  240. gfs2_glock_put(new_qd->qd_gl);
  241. kmem_cache_free(gfs2_quotad_cachep, new_qd);
  242. }
  243. return 0;
  244. }
  245. static void qd_hold(struct gfs2_quota_data *qd)
  246. {
  247. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  248. gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
  249. lockref_get(&qd->qd_lockref);
  250. }
  251. static void qd_put(struct gfs2_quota_data *qd)
  252. {
  253. if (lockref_put_or_lock(&qd->qd_lockref))
  254. return;
  255. qd->qd_lockref.count = 0;
  256. list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
  257. spin_unlock(&qd->qd_lockref.lock);
  258. }
  259. static int slot_get(struct gfs2_quota_data *qd)
  260. {
  261. struct gfs2_sbd *sdp = qd->qd_sbd;
  262. unsigned int bit;
  263. int error = 0;
  264. spin_lock(&sdp->sd_bitmap_lock);
  265. if (qd->qd_slot_count != 0)
  266. goto out;
  267. error = -ENOSPC;
  268. bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots);
  269. if (bit < sdp->sd_quota_slots) {
  270. set_bit(bit, sdp->sd_quota_bitmap);
  271. qd->qd_slot = bit;
  272. error = 0;
  273. out:
  274. qd->qd_slot_count++;
  275. }
  276. spin_unlock(&sdp->sd_bitmap_lock);
  277. return error;
  278. }
  279. static void slot_hold(struct gfs2_quota_data *qd)
  280. {
  281. struct gfs2_sbd *sdp = qd->qd_sbd;
  282. spin_lock(&sdp->sd_bitmap_lock);
  283. gfs2_assert(sdp, qd->qd_slot_count);
  284. qd->qd_slot_count++;
  285. spin_unlock(&sdp->sd_bitmap_lock);
  286. }
  287. static void slot_put(struct gfs2_quota_data *qd)
  288. {
  289. struct gfs2_sbd *sdp = qd->qd_sbd;
  290. spin_lock(&sdp->sd_bitmap_lock);
  291. gfs2_assert(sdp, qd->qd_slot_count);
  292. if (!--qd->qd_slot_count) {
  293. BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
  294. qd->qd_slot = -1;
  295. }
  296. spin_unlock(&sdp->sd_bitmap_lock);
  297. }
  298. static int bh_get(struct gfs2_quota_data *qd)
  299. {
  300. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  301. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  302. unsigned int block, offset;
  303. struct buffer_head *bh;
  304. int error;
  305. struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
  306. mutex_lock(&sdp->sd_quota_mutex);
  307. if (qd->qd_bh_count++) {
  308. mutex_unlock(&sdp->sd_quota_mutex);
  309. return 0;
  310. }
  311. block = qd->qd_slot / sdp->sd_qc_per_block;
  312. offset = qd->qd_slot % sdp->sd_qc_per_block;
  313. bh_map.b_size = 1 << ip->i_inode.i_blkbits;
  314. error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
  315. if (error)
  316. goto fail;
  317. error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
  318. if (error)
  319. goto fail;
  320. error = -EIO;
  321. if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
  322. goto fail_brelse;
  323. qd->qd_bh = bh;
  324. qd->qd_bh_qc = (struct gfs2_quota_change *)
  325. (bh->b_data + sizeof(struct gfs2_meta_header) +
  326. offset * sizeof(struct gfs2_quota_change));
  327. mutex_unlock(&sdp->sd_quota_mutex);
  328. return 0;
  329. fail_brelse:
  330. brelse(bh);
  331. fail:
  332. qd->qd_bh_count--;
  333. mutex_unlock(&sdp->sd_quota_mutex);
  334. return error;
  335. }
  336. static void bh_put(struct gfs2_quota_data *qd)
  337. {
  338. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  339. mutex_lock(&sdp->sd_quota_mutex);
  340. gfs2_assert(sdp, qd->qd_bh_count);
  341. if (!--qd->qd_bh_count) {
  342. brelse(qd->qd_bh);
  343. qd->qd_bh = NULL;
  344. qd->qd_bh_qc = NULL;
  345. }
  346. mutex_unlock(&sdp->sd_quota_mutex);
  347. }
  348. static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
  349. u64 *sync_gen)
  350. {
  351. if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
  352. !test_bit(QDF_CHANGE, &qd->qd_flags) ||
  353. (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
  354. return 0;
  355. if (!lockref_get_not_dead(&qd->qd_lockref))
  356. return 0;
  357. list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
  358. set_bit(QDF_LOCKED, &qd->qd_flags);
  359. qd->qd_change_sync = qd->qd_change;
  360. slot_hold(qd);
  361. return 1;
  362. }
  363. static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
  364. {
  365. struct gfs2_quota_data *qd = NULL;
  366. int error;
  367. int found = 0;
  368. *qdp = NULL;
  369. if (sdp->sd_vfs->s_flags & MS_RDONLY)
  370. return 0;
  371. spin_lock(&qd_lock);
  372. list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
  373. found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen);
  374. if (found)
  375. break;
  376. }
  377. if (!found)
  378. qd = NULL;
  379. spin_unlock(&qd_lock);
  380. if (qd) {
  381. gfs2_assert_warn(sdp, qd->qd_change_sync);
  382. error = bh_get(qd);
  383. if (error) {
  384. clear_bit(QDF_LOCKED, &qd->qd_flags);
  385. slot_put(qd);
  386. qd_put(qd);
  387. return error;
  388. }
  389. }
  390. *qdp = qd;
  391. return 0;
  392. }
  393. static void qd_unlock(struct gfs2_quota_data *qd)
  394. {
  395. gfs2_assert_warn(qd->qd_gl->gl_sbd,
  396. test_bit(QDF_LOCKED, &qd->qd_flags));
  397. clear_bit(QDF_LOCKED, &qd->qd_flags);
  398. bh_put(qd);
  399. slot_put(qd);
  400. qd_put(qd);
  401. }
  402. static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
  403. struct gfs2_quota_data **qdp)
  404. {
  405. int error;
  406. error = qd_get(sdp, qid, qdp);
  407. if (error)
  408. return error;
  409. error = slot_get(*qdp);
  410. if (error)
  411. goto fail;
  412. error = bh_get(*qdp);
  413. if (error)
  414. goto fail_slot;
  415. return 0;
  416. fail_slot:
  417. slot_put(*qdp);
  418. fail:
  419. qd_put(*qdp);
  420. return error;
  421. }
  422. static void qdsb_put(struct gfs2_quota_data *qd)
  423. {
  424. bh_put(qd);
  425. slot_put(qd);
  426. qd_put(qd);
  427. }
  428. int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
  429. {
  430. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  431. struct gfs2_quota_data **qd;
  432. int error;
  433. if (ip->i_res == NULL) {
  434. error = gfs2_rs_alloc(ip);
  435. if (error)
  436. return error;
  437. }
  438. qd = ip->i_res->rs_qa_qd;
  439. if (gfs2_assert_warn(sdp, !ip->i_res->rs_qa_qd_num) ||
  440. gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
  441. return -EIO;
  442. if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
  443. return 0;
  444. error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
  445. if (error)
  446. goto out;
  447. ip->i_res->rs_qa_qd_num++;
  448. qd++;
  449. error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
  450. if (error)
  451. goto out;
  452. ip->i_res->rs_qa_qd_num++;
  453. qd++;
  454. if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
  455. !uid_eq(uid, ip->i_inode.i_uid)) {
  456. error = qdsb_get(sdp, make_kqid_uid(uid), qd);
  457. if (error)
  458. goto out;
  459. ip->i_res->rs_qa_qd_num++;
  460. qd++;
  461. }
  462. if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
  463. !gid_eq(gid, ip->i_inode.i_gid)) {
  464. error = qdsb_get(sdp, make_kqid_gid(gid), qd);
  465. if (error)
  466. goto out;
  467. ip->i_res->rs_qa_qd_num++;
  468. qd++;
  469. }
  470. out:
  471. if (error)
  472. gfs2_quota_unhold(ip);
  473. return error;
  474. }
  475. void gfs2_quota_unhold(struct gfs2_inode *ip)
  476. {
  477. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  478. unsigned int x;
  479. if (ip->i_res == NULL)
  480. return;
  481. gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
  482. for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
  483. qdsb_put(ip->i_res->rs_qa_qd[x]);
  484. ip->i_res->rs_qa_qd[x] = NULL;
  485. }
  486. ip->i_res->rs_qa_qd_num = 0;
  487. }
  488. static int sort_qd(const void *a, const void *b)
  489. {
  490. const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
  491. const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
  492. if (qid_lt(qd_a->qd_id, qd_b->qd_id))
  493. return -1;
  494. if (qid_lt(qd_b->qd_id, qd_a->qd_id))
  495. return 1;
  496. return 0;
  497. }
  498. static void do_qc(struct gfs2_quota_data *qd, s64 change)
  499. {
  500. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  501. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  502. struct gfs2_quota_change *qc = qd->qd_bh_qc;
  503. s64 x;
  504. mutex_lock(&sdp->sd_quota_mutex);
  505. gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
  506. if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
  507. qc->qc_change = 0;
  508. qc->qc_flags = 0;
  509. if (qd->qd_id.type == USRQUOTA)
  510. qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
  511. qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
  512. }
  513. x = be64_to_cpu(qc->qc_change) + change;
  514. qc->qc_change = cpu_to_be64(x);
  515. spin_lock(&qd_lock);
  516. qd->qd_change = x;
  517. spin_unlock(&qd_lock);
  518. if (!x) {
  519. gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
  520. clear_bit(QDF_CHANGE, &qd->qd_flags);
  521. qc->qc_flags = 0;
  522. qc->qc_id = 0;
  523. slot_put(qd);
  524. qd_put(qd);
  525. } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
  526. qd_hold(qd);
  527. slot_hold(qd);
  528. }
  529. mutex_unlock(&sdp->sd_quota_mutex);
  530. }
  531. /**
  532. * gfs2_adjust_quota - adjust record of current block usage
  533. * @ip: The quota inode
  534. * @loc: Offset of the entry in the quota file
  535. * @change: The amount of usage change to record
  536. * @qd: The quota data
  537. * @fdq: The updated limits to record
  538. *
  539. * This function was mostly borrowed from gfs2_block_truncate_page which was
  540. * in turn mostly borrowed from ext3
  541. *
  542. * Returns: 0 or -ve on error
  543. */
  544. static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
  545. s64 change, struct gfs2_quota_data *qd,
  546. struct fs_disk_quota *fdq)
  547. {
  548. struct inode *inode = &ip->i_inode;
  549. struct gfs2_sbd *sdp = GFS2_SB(inode);
  550. struct address_space *mapping = inode->i_mapping;
  551. unsigned long index = loc >> PAGE_CACHE_SHIFT;
  552. unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
  553. unsigned blocksize, iblock, pos;
  554. struct buffer_head *bh;
  555. struct page *page;
  556. void *kaddr, *ptr;
  557. struct gfs2_quota q;
  558. int err, nbytes;
  559. u64 size;
  560. if (gfs2_is_stuffed(ip)) {
  561. err = gfs2_unstuff_dinode(ip, NULL);
  562. if (err)
  563. return err;
  564. }
  565. memset(&q, 0, sizeof(struct gfs2_quota));
  566. err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
  567. if (err < 0)
  568. return err;
  569. err = -EIO;
  570. be64_add_cpu(&q.qu_value, change);
  571. qd->qd_qb.qb_value = q.qu_value;
  572. if (fdq) {
  573. if (fdq->d_fieldmask & FS_DQ_BSOFT) {
  574. q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
  575. qd->qd_qb.qb_warn = q.qu_warn;
  576. }
  577. if (fdq->d_fieldmask & FS_DQ_BHARD) {
  578. q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
  579. qd->qd_qb.qb_limit = q.qu_limit;
  580. }
  581. if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
  582. q.qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
  583. qd->qd_qb.qb_value = q.qu_value;
  584. }
  585. }
  586. /* Write the quota into the quota file on disk */
  587. ptr = &q;
  588. nbytes = sizeof(struct gfs2_quota);
  589. get_a_page:
  590. page = find_or_create_page(mapping, index, GFP_NOFS);
  591. if (!page)
  592. return -ENOMEM;
  593. blocksize = inode->i_sb->s_blocksize;
  594. iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
  595. if (!page_has_buffers(page))
  596. create_empty_buffers(page, blocksize, 0);
  597. bh = page_buffers(page);
  598. pos = blocksize;
  599. while (offset >= pos) {
  600. bh = bh->b_this_page;
  601. iblock++;
  602. pos += blocksize;
  603. }
  604. if (!buffer_mapped(bh)) {
  605. gfs2_block_map(inode, iblock, bh, 1);
  606. if (!buffer_mapped(bh))
  607. goto unlock_out;
  608. /* If it's a newly allocated disk block for quota, zero it */
  609. if (buffer_new(bh))
  610. zero_user(page, pos - blocksize, bh->b_size);
  611. }
  612. if (PageUptodate(page))
  613. set_buffer_uptodate(bh);
  614. if (!buffer_uptodate(bh)) {
  615. ll_rw_block(READ | REQ_META, 1, &bh);
  616. wait_on_buffer(bh);
  617. if (!buffer_uptodate(bh))
  618. goto unlock_out;
  619. }
  620. gfs2_trans_add_data(ip->i_gl, bh);
  621. kaddr = kmap_atomic(page);
  622. if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
  623. nbytes = PAGE_CACHE_SIZE - offset;
  624. memcpy(kaddr + offset, ptr, nbytes);
  625. flush_dcache_page(page);
  626. kunmap_atomic(kaddr);
  627. unlock_page(page);
  628. page_cache_release(page);
  629. /* If quota straddles page boundary, we need to update the rest of the
  630. * quota at the beginning of the next page */
  631. if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) {
  632. ptr = ptr + nbytes;
  633. nbytes = sizeof(struct gfs2_quota) - nbytes;
  634. offset = 0;
  635. index++;
  636. goto get_a_page;
  637. }
  638. size = loc + sizeof(struct gfs2_quota);
  639. if (size > inode->i_size)
  640. i_size_write(inode, size);
  641. inode->i_mtime = inode->i_atime = CURRENT_TIME;
  642. mark_inode_dirty(inode);
  643. set_bit(QDF_REFRESH, &qd->qd_flags);
  644. return 0;
  645. unlock_out:
  646. unlock_page(page);
  647. page_cache_release(page);
  648. return err;
  649. }
  650. static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
  651. {
  652. struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
  653. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  654. struct gfs2_alloc_parms ap = { .aflags = 0, };
  655. unsigned int data_blocks, ind_blocks;
  656. struct gfs2_holder *ghs, i_gh;
  657. unsigned int qx, x;
  658. struct gfs2_quota_data *qd;
  659. unsigned reserved;
  660. loff_t offset;
  661. unsigned int nalloc = 0, blocks;
  662. int error;
  663. error = gfs2_rs_alloc(ip);
  664. if (error)
  665. return error;
  666. gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
  667. &data_blocks, &ind_blocks);
  668. ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
  669. if (!ghs)
  670. return -ENOMEM;
  671. sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
  672. mutex_lock(&ip->i_inode.i_mutex);
  673. for (qx = 0; qx < num_qd; qx++) {
  674. error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
  675. GL_NOCACHE, &ghs[qx]);
  676. if (error)
  677. goto out;
  678. }
  679. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
  680. if (error)
  681. goto out;
  682. for (x = 0; x < num_qd; x++) {
  683. offset = qd2offset(qda[x]);
  684. if (gfs2_write_alloc_required(ip, offset,
  685. sizeof(struct gfs2_quota)))
  686. nalloc++;
  687. }
  688. /*
  689. * 1 blk for unstuffing inode if stuffed. We add this extra
  690. * block to the reservation unconditionally. If the inode
  691. * doesn't need unstuffing, the block will be released to the
  692. * rgrp since it won't be allocated during the transaction
  693. */
  694. /* +3 in the end for unstuffing block, inode size update block
  695. * and another block in case quota straddles page boundary and
  696. * two blocks need to be updated instead of 1 */
  697. blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
  698. reserved = 1 + (nalloc * (data_blocks + ind_blocks));
  699. ap.target = reserved;
  700. error = gfs2_inplace_reserve(ip, &ap);
  701. if (error)
  702. goto out_alloc;
  703. if (nalloc)
  704. blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
  705. error = gfs2_trans_begin(sdp, blocks, 0);
  706. if (error)
  707. goto out_ipres;
  708. for (x = 0; x < num_qd; x++) {
  709. qd = qda[x];
  710. offset = qd2offset(qd);
  711. error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
  712. if (error)
  713. goto out_end_trans;
  714. do_qc(qd, -qd->qd_change_sync);
  715. set_bit(QDF_REFRESH, &qd->qd_flags);
  716. }
  717. error = 0;
  718. out_end_trans:
  719. gfs2_trans_end(sdp);
  720. out_ipres:
  721. gfs2_inplace_release(ip);
  722. out_alloc:
  723. gfs2_glock_dq_uninit(&i_gh);
  724. out:
  725. while (qx--)
  726. gfs2_glock_dq_uninit(&ghs[qx]);
  727. mutex_unlock(&ip->i_inode.i_mutex);
  728. kfree(ghs);
  729. gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl, NORMAL_FLUSH);
  730. return error;
  731. }
  732. static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
  733. {
  734. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  735. struct gfs2_quota q;
  736. struct gfs2_quota_lvb *qlvb;
  737. loff_t pos;
  738. int error;
  739. memset(&q, 0, sizeof(struct gfs2_quota));
  740. pos = qd2offset(qd);
  741. error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
  742. if (error < 0)
  743. return error;
  744. qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
  745. qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
  746. qlvb->__pad = 0;
  747. qlvb->qb_limit = q.qu_limit;
  748. qlvb->qb_warn = q.qu_warn;
  749. qlvb->qb_value = q.qu_value;
  750. qd->qd_qb = *qlvb;
  751. return 0;
  752. }
  753. static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
  754. struct gfs2_holder *q_gh)
  755. {
  756. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  757. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  758. struct gfs2_holder i_gh;
  759. int error;
  760. restart:
  761. error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
  762. if (error)
  763. return error;
  764. qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
  765. if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
  766. gfs2_glock_dq_uninit(q_gh);
  767. error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
  768. GL_NOCACHE, q_gh);
  769. if (error)
  770. return error;
  771. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
  772. if (error)
  773. goto fail;
  774. error = update_qd(sdp, qd);
  775. if (error)
  776. goto fail_gunlock;
  777. gfs2_glock_dq_uninit(&i_gh);
  778. gfs2_glock_dq_uninit(q_gh);
  779. force_refresh = 0;
  780. goto restart;
  781. }
  782. return 0;
  783. fail_gunlock:
  784. gfs2_glock_dq_uninit(&i_gh);
  785. fail:
  786. gfs2_glock_dq_uninit(q_gh);
  787. return error;
  788. }
  789. int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
  790. {
  791. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  792. struct gfs2_quota_data *qd;
  793. unsigned int x;
  794. int error = 0;
  795. error = gfs2_quota_hold(ip, uid, gid);
  796. if (error)
  797. return error;
  798. if (capable(CAP_SYS_RESOURCE) ||
  799. sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
  800. return 0;
  801. sort(ip->i_res->rs_qa_qd, ip->i_res->rs_qa_qd_num,
  802. sizeof(struct gfs2_quota_data *), sort_qd, NULL);
  803. for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
  804. int force = NO_FORCE;
  805. qd = ip->i_res->rs_qa_qd[x];
  806. if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
  807. force = FORCE;
  808. error = do_glock(qd, force, &ip->i_res->rs_qa_qd_ghs[x]);
  809. if (error)
  810. break;
  811. }
  812. if (!error)
  813. set_bit(GIF_QD_LOCKED, &ip->i_flags);
  814. else {
  815. while (x--)
  816. gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
  817. gfs2_quota_unhold(ip);
  818. }
  819. return error;
  820. }
  821. static int need_sync(struct gfs2_quota_data *qd)
  822. {
  823. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  824. struct gfs2_tune *gt = &sdp->sd_tune;
  825. s64 value;
  826. unsigned int num, den;
  827. int do_sync = 1;
  828. if (!qd->qd_qb.qb_limit)
  829. return 0;
  830. spin_lock(&qd_lock);
  831. value = qd->qd_change;
  832. spin_unlock(&qd_lock);
  833. spin_lock(&gt->gt_spin);
  834. num = gt->gt_quota_scale_num;
  835. den = gt->gt_quota_scale_den;
  836. spin_unlock(&gt->gt_spin);
  837. if (value < 0)
  838. do_sync = 0;
  839. else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
  840. (s64)be64_to_cpu(qd->qd_qb.qb_limit))
  841. do_sync = 0;
  842. else {
  843. value *= gfs2_jindex_size(sdp) * num;
  844. value = div_s64(value, den);
  845. value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
  846. if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
  847. do_sync = 0;
  848. }
  849. return do_sync;
  850. }
  851. void gfs2_quota_unlock(struct gfs2_inode *ip)
  852. {
  853. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  854. struct gfs2_quota_data *qda[4];
  855. unsigned int count = 0;
  856. unsigned int x;
  857. int found;
  858. if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
  859. goto out;
  860. for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
  861. struct gfs2_quota_data *qd;
  862. int sync;
  863. qd = ip->i_res->rs_qa_qd[x];
  864. sync = need_sync(qd);
  865. gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
  866. if (!sync)
  867. continue;
  868. spin_lock(&qd_lock);
  869. found = qd_check_sync(sdp, qd, NULL);
  870. spin_unlock(&qd_lock);
  871. if (!found)
  872. continue;
  873. gfs2_assert_warn(sdp, qd->qd_change_sync);
  874. if (bh_get(qd)) {
  875. clear_bit(QDF_LOCKED, &qd->qd_flags);
  876. slot_put(qd);
  877. qd_put(qd);
  878. continue;
  879. }
  880. qda[count++] = qd;
  881. }
  882. if (count) {
  883. do_sync(count, qda);
  884. for (x = 0; x < count; x++)
  885. qd_unlock(qda[x]);
  886. }
  887. out:
  888. gfs2_quota_unhold(ip);
  889. }
  890. #define MAX_LINE 256
  891. static int print_message(struct gfs2_quota_data *qd, char *type)
  892. {
  893. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  894. fs_info(sdp, "quota %s for %s %u\n",
  895. type,
  896. (qd->qd_id.type == USRQUOTA) ? "user" : "group",
  897. from_kqid(&init_user_ns, qd->qd_id));
  898. return 0;
  899. }
  900. int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
  901. {
  902. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  903. struct gfs2_quota_data *qd;
  904. s64 value;
  905. unsigned int x;
  906. int error = 0;
  907. if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
  908. return 0;
  909. if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
  910. return 0;
  911. for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
  912. qd = ip->i_res->rs_qa_qd[x];
  913. if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
  914. qid_eq(qd->qd_id, make_kqid_gid(gid))))
  915. continue;
  916. value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
  917. spin_lock(&qd_lock);
  918. value += qd->qd_change;
  919. spin_unlock(&qd_lock);
  920. if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
  921. print_message(qd, "exceeded");
  922. quota_send_warning(qd->qd_id,
  923. sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
  924. error = -EDQUOT;
  925. break;
  926. } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
  927. (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
  928. time_after_eq(jiffies, qd->qd_last_warn +
  929. gfs2_tune_get(sdp,
  930. gt_quota_warn_period) * HZ)) {
  931. quota_send_warning(qd->qd_id,
  932. sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
  933. error = print_message(qd, "warning");
  934. qd->qd_last_warn = jiffies;
  935. }
  936. }
  937. return error;
  938. }
  939. void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
  940. kuid_t uid, kgid_t gid)
  941. {
  942. struct gfs2_quota_data *qd;
  943. unsigned int x;
  944. if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
  945. return;
  946. if (ip->i_diskflags & GFS2_DIF_SYSTEM)
  947. return;
  948. for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
  949. qd = ip->i_res->rs_qa_qd[x];
  950. if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
  951. qid_eq(qd->qd_id, make_kqid_gid(gid))) {
  952. do_qc(qd, change);
  953. }
  954. }
  955. }
  956. int gfs2_quota_sync(struct super_block *sb, int type)
  957. {
  958. struct gfs2_sbd *sdp = sb->s_fs_info;
  959. struct gfs2_quota_data **qda;
  960. unsigned int max_qd = PAGE_SIZE/sizeof(struct gfs2_holder);
  961. unsigned int num_qd;
  962. unsigned int x;
  963. int error = 0;
  964. qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
  965. if (!qda)
  966. return -ENOMEM;
  967. mutex_lock(&sdp->sd_quota_sync_mutex);
  968. sdp->sd_quota_sync_gen++;
  969. do {
  970. num_qd = 0;
  971. for (;;) {
  972. error = qd_fish(sdp, qda + num_qd);
  973. if (error || !qda[num_qd])
  974. break;
  975. if (++num_qd == max_qd)
  976. break;
  977. }
  978. if (num_qd) {
  979. if (!error)
  980. error = do_sync(num_qd, qda);
  981. if (!error)
  982. for (x = 0; x < num_qd; x++)
  983. qda[x]->qd_sync_gen =
  984. sdp->sd_quota_sync_gen;
  985. for (x = 0; x < num_qd; x++)
  986. qd_unlock(qda[x]);
  987. }
  988. } while (!error && num_qd == max_qd);
  989. mutex_unlock(&sdp->sd_quota_sync_mutex);
  990. kfree(qda);
  991. return error;
  992. }
  993. int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
  994. {
  995. struct gfs2_quota_data *qd;
  996. struct gfs2_holder q_gh;
  997. int error;
  998. error = qd_get(sdp, qid, &qd);
  999. if (error)
  1000. return error;
  1001. error = do_glock(qd, FORCE, &q_gh);
  1002. if (!error)
  1003. gfs2_glock_dq_uninit(&q_gh);
  1004. qd_put(qd);
  1005. return error;
  1006. }
  1007. int gfs2_quota_init(struct gfs2_sbd *sdp)
  1008. {
  1009. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  1010. u64 size = i_size_read(sdp->sd_qc_inode);
  1011. unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
  1012. unsigned int x, slot = 0;
  1013. unsigned int found = 0;
  1014. unsigned int hash;
  1015. unsigned int bm_size;
  1016. u64 dblock;
  1017. u32 extlen = 0;
  1018. int error;
  1019. if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
  1020. return -EIO;
  1021. sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
  1022. bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
  1023. bm_size *= sizeof(unsigned long);
  1024. error = -ENOMEM;
  1025. sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN);
  1026. if (sdp->sd_quota_bitmap == NULL)
  1027. sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS |
  1028. __GFP_ZERO, PAGE_KERNEL);
  1029. if (!sdp->sd_quota_bitmap)
  1030. return error;
  1031. for (x = 0; x < blocks; x++) {
  1032. struct buffer_head *bh;
  1033. const struct gfs2_quota_change *qc;
  1034. unsigned int y;
  1035. if (!extlen) {
  1036. int new = 0;
  1037. error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
  1038. if (error)
  1039. goto fail;
  1040. }
  1041. error = -EIO;
  1042. bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
  1043. if (!bh)
  1044. goto fail;
  1045. if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
  1046. brelse(bh);
  1047. goto fail;
  1048. }
  1049. qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
  1050. for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
  1051. y++, slot++) {
  1052. struct gfs2_quota_data *qd;
  1053. s64 qc_change = be64_to_cpu(qc->qc_change);
  1054. u32 qc_flags = be32_to_cpu(qc->qc_flags);
  1055. enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
  1056. USRQUOTA : GRPQUOTA;
  1057. struct kqid qc_id = make_kqid(&init_user_ns, qtype,
  1058. be32_to_cpu(qc->qc_id));
  1059. qc++;
  1060. if (!qc_change)
  1061. continue;
  1062. hash = gfs2_qd_hash(sdp, qc_id);
  1063. qd = qd_alloc(hash, sdp, qc_id);
  1064. if (qd == NULL) {
  1065. brelse(bh);
  1066. goto fail;
  1067. }
  1068. set_bit(QDF_CHANGE, &qd->qd_flags);
  1069. qd->qd_change = qc_change;
  1070. qd->qd_slot = slot;
  1071. qd->qd_slot_count = 1;
  1072. spin_lock(&qd_lock);
  1073. BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
  1074. list_add(&qd->qd_list, &sdp->sd_quota_list);
  1075. atomic_inc(&sdp->sd_quota_count);
  1076. spin_unlock(&qd_lock);
  1077. spin_lock_bucket(hash);
  1078. hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
  1079. spin_unlock_bucket(hash);
  1080. found++;
  1081. }
  1082. brelse(bh);
  1083. dblock++;
  1084. extlen--;
  1085. }
  1086. if (found)
  1087. fs_info(sdp, "found %u quota changes\n", found);
  1088. return 0;
  1089. fail:
  1090. gfs2_quota_cleanup(sdp);
  1091. return error;
  1092. }
  1093. void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
  1094. {
  1095. struct list_head *head = &sdp->sd_quota_list;
  1096. struct gfs2_quota_data *qd;
  1097. spin_lock(&qd_lock);
  1098. while (!list_empty(head)) {
  1099. qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
  1100. list_del(&qd->qd_list);
  1101. /* Also remove if this qd exists in the reclaim list */
  1102. list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
  1103. atomic_dec(&sdp->sd_quota_count);
  1104. spin_unlock(&qd_lock);
  1105. spin_lock_bucket(qd->qd_hash);
  1106. hlist_bl_del_rcu(&qd->qd_hlist);
  1107. spin_unlock_bucket(qd->qd_hash);
  1108. gfs2_assert_warn(sdp, !qd->qd_change);
  1109. gfs2_assert_warn(sdp, !qd->qd_slot_count);
  1110. gfs2_assert_warn(sdp, !qd->qd_bh_count);
  1111. gfs2_glock_put(qd->qd_gl);
  1112. call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
  1113. spin_lock(&qd_lock);
  1114. }
  1115. spin_unlock(&qd_lock);
  1116. gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
  1117. if (sdp->sd_quota_bitmap) {
  1118. if (is_vmalloc_addr(sdp->sd_quota_bitmap))
  1119. vfree(sdp->sd_quota_bitmap);
  1120. else
  1121. kfree(sdp->sd_quota_bitmap);
  1122. sdp->sd_quota_bitmap = NULL;
  1123. }
  1124. }
  1125. static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
  1126. {
  1127. if (error == 0 || error == -EROFS)
  1128. return;
  1129. if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
  1130. fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
  1131. }
  1132. static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
  1133. int (*fxn)(struct super_block *sb, int type),
  1134. unsigned long t, unsigned long *timeo,
  1135. unsigned int *new_timeo)
  1136. {
  1137. if (t >= *timeo) {
  1138. int error = fxn(sdp->sd_vfs, 0);
  1139. quotad_error(sdp, msg, error);
  1140. *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
  1141. } else {
  1142. *timeo -= t;
  1143. }
  1144. }
  1145. static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
  1146. {
  1147. struct gfs2_inode *ip;
  1148. while(1) {
  1149. ip = NULL;
  1150. spin_lock(&sdp->sd_trunc_lock);
  1151. if (!list_empty(&sdp->sd_trunc_list)) {
  1152. ip = list_entry(sdp->sd_trunc_list.next,
  1153. struct gfs2_inode, i_trunc_list);
  1154. list_del_init(&ip->i_trunc_list);
  1155. }
  1156. spin_unlock(&sdp->sd_trunc_lock);
  1157. if (ip == NULL)
  1158. return;
  1159. gfs2_glock_finish_truncate(ip);
  1160. }
  1161. }
  1162. void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
  1163. if (!sdp->sd_statfs_force_sync) {
  1164. sdp->sd_statfs_force_sync = 1;
  1165. wake_up(&sdp->sd_quota_wait);
  1166. }
  1167. }
  1168. /**
  1169. * gfs2_quotad - Write cached quota changes into the quota file
  1170. * @sdp: Pointer to GFS2 superblock
  1171. *
  1172. */
  1173. int gfs2_quotad(void *data)
  1174. {
  1175. struct gfs2_sbd *sdp = data;
  1176. struct gfs2_tune *tune = &sdp->sd_tune;
  1177. unsigned long statfs_timeo = 0;
  1178. unsigned long quotad_timeo = 0;
  1179. unsigned long t = 0;
  1180. DEFINE_WAIT(wait);
  1181. int empty;
  1182. while (!kthread_should_stop()) {
  1183. /* Update the master statfs file */
  1184. if (sdp->sd_statfs_force_sync) {
  1185. int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
  1186. quotad_error(sdp, "statfs", error);
  1187. statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
  1188. }
  1189. else
  1190. quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
  1191. &statfs_timeo,
  1192. &tune->gt_statfs_quantum);
  1193. /* Update quota file */
  1194. quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
  1195. &quotad_timeo, &tune->gt_quota_quantum);
  1196. /* Check for & recover partially truncated inodes */
  1197. quotad_check_trunc_list(sdp);
  1198. try_to_freeze();
  1199. t = min(quotad_timeo, statfs_timeo);
  1200. prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
  1201. spin_lock(&sdp->sd_trunc_lock);
  1202. empty = list_empty(&sdp->sd_trunc_list);
  1203. spin_unlock(&sdp->sd_trunc_lock);
  1204. if (empty && !sdp->sd_statfs_force_sync)
  1205. t -= schedule_timeout(t);
  1206. else
  1207. t = 0;
  1208. finish_wait(&sdp->sd_quota_wait, &wait);
  1209. }
  1210. return 0;
  1211. }
  1212. static int gfs2_quota_get_xstate(struct super_block *sb,
  1213. struct fs_quota_stat *fqs)
  1214. {
  1215. struct gfs2_sbd *sdp = sb->s_fs_info;
  1216. memset(fqs, 0, sizeof(struct fs_quota_stat));
  1217. fqs->qs_version = FS_QSTAT_VERSION;
  1218. switch (sdp->sd_args.ar_quota) {
  1219. case GFS2_QUOTA_ON:
  1220. fqs->qs_flags |= (FS_QUOTA_UDQ_ENFD | FS_QUOTA_GDQ_ENFD);
  1221. /*FALLTHRU*/
  1222. case GFS2_QUOTA_ACCOUNT:
  1223. fqs->qs_flags |= (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT);
  1224. break;
  1225. case GFS2_QUOTA_OFF:
  1226. break;
  1227. }
  1228. if (sdp->sd_quota_inode) {
  1229. fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
  1230. fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
  1231. }
  1232. fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
  1233. fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
  1234. fqs->qs_incoredqs = list_lru_count(&gfs2_qd_lru);
  1235. return 0;
  1236. }
  1237. static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
  1238. struct fs_disk_quota *fdq)
  1239. {
  1240. struct gfs2_sbd *sdp = sb->s_fs_info;
  1241. struct gfs2_quota_lvb *qlvb;
  1242. struct gfs2_quota_data *qd;
  1243. struct gfs2_holder q_gh;
  1244. int error;
  1245. memset(fdq, 0, sizeof(struct fs_disk_quota));
  1246. if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
  1247. return -ESRCH; /* Crazy XFS error code */
  1248. if ((qid.type != USRQUOTA) &&
  1249. (qid.type != GRPQUOTA))
  1250. return -EINVAL;
  1251. error = qd_get(sdp, qid, &qd);
  1252. if (error)
  1253. return error;
  1254. error = do_glock(qd, FORCE, &q_gh);
  1255. if (error)
  1256. goto out;
  1257. qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
  1258. fdq->d_version = FS_DQUOT_VERSION;
  1259. fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
  1260. fdq->d_id = from_kqid_munged(current_user_ns(), qid);
  1261. fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
  1262. fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
  1263. fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
  1264. gfs2_glock_dq_uninit(&q_gh);
  1265. out:
  1266. qd_put(qd);
  1267. return error;
  1268. }
  1269. /* GFS2 only supports a subset of the XFS fields */
  1270. #define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
  1271. static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
  1272. struct fs_disk_quota *fdq)
  1273. {
  1274. struct gfs2_sbd *sdp = sb->s_fs_info;
  1275. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  1276. struct gfs2_quota_data *qd;
  1277. struct gfs2_holder q_gh, i_gh;
  1278. unsigned int data_blocks, ind_blocks;
  1279. unsigned int blocks = 0;
  1280. int alloc_required;
  1281. loff_t offset;
  1282. int error;
  1283. if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
  1284. return -ESRCH; /* Crazy XFS error code */
  1285. if ((qid.type != USRQUOTA) &&
  1286. (qid.type != GRPQUOTA))
  1287. return -EINVAL;
  1288. if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
  1289. return -EINVAL;
  1290. error = qd_get(sdp, qid, &qd);
  1291. if (error)
  1292. return error;
  1293. error = gfs2_rs_alloc(ip);
  1294. if (error)
  1295. goto out_put;
  1296. mutex_lock(&ip->i_inode.i_mutex);
  1297. error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
  1298. if (error)
  1299. goto out_unlockput;
  1300. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
  1301. if (error)
  1302. goto out_q;
  1303. /* Check for existing entry, if none then alloc new blocks */
  1304. error = update_qd(sdp, qd);
  1305. if (error)
  1306. goto out_i;
  1307. /* If nothing has changed, this is a no-op */
  1308. if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
  1309. ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
  1310. fdq->d_fieldmask ^= FS_DQ_BSOFT;
  1311. if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
  1312. ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
  1313. fdq->d_fieldmask ^= FS_DQ_BHARD;
  1314. if ((fdq->d_fieldmask & FS_DQ_BCOUNT) &&
  1315. ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
  1316. fdq->d_fieldmask ^= FS_DQ_BCOUNT;
  1317. if (fdq->d_fieldmask == 0)
  1318. goto out_i;
  1319. offset = qd2offset(qd);
  1320. alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
  1321. if (gfs2_is_stuffed(ip))
  1322. alloc_required = 1;
  1323. if (alloc_required) {
  1324. struct gfs2_alloc_parms ap = { .aflags = 0, };
  1325. gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
  1326. &data_blocks, &ind_blocks);
  1327. blocks = 1 + data_blocks + ind_blocks;
  1328. ap.target = blocks;
  1329. error = gfs2_inplace_reserve(ip, &ap);
  1330. if (error)
  1331. goto out_i;
  1332. blocks += gfs2_rg_blocks(ip, blocks);
  1333. }
  1334. /* Some quotas span block boundaries and can update two blocks,
  1335. adding an extra block to the transaction to handle such quotas */
  1336. error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
  1337. if (error)
  1338. goto out_release;
  1339. /* Apply changes */
  1340. error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
  1341. gfs2_trans_end(sdp);
  1342. out_release:
  1343. if (alloc_required)
  1344. gfs2_inplace_release(ip);
  1345. out_i:
  1346. gfs2_glock_dq_uninit(&i_gh);
  1347. out_q:
  1348. gfs2_glock_dq_uninit(&q_gh);
  1349. out_unlockput:
  1350. mutex_unlock(&ip->i_inode.i_mutex);
  1351. out_put:
  1352. qd_put(qd);
  1353. return error;
  1354. }
  1355. const struct quotactl_ops gfs2_quotactl_ops = {
  1356. .quota_sync = gfs2_quota_sync,
  1357. .get_xstate = gfs2_quota_get_xstate,
  1358. .get_dqblk = gfs2_get_dqblk,
  1359. .set_dqblk = gfs2_set_dqblk,
  1360. };
  1361. void __init gfs2_quota_hash_init(void)
  1362. {
  1363. unsigned i;
  1364. for(i = 0; i < GFS2_QD_HASH_SIZE; i++)
  1365. INIT_HLIST_BL_HEAD(&qd_hash_table[i]);
  1366. }