quota.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. /*
  10. * Quota change tags are associated with each transaction that allocates or
  11. * deallocates space. Those changes are accumulated locally to each node (in a
  12. * per-node file) and then are periodically synced to the quota file. This
  13. * avoids the bottleneck of constantly touching the quota file, but introduces
  14. * fuzziness in the current usage value of IDs that are being used on different
  15. * nodes in the cluster simultaneously. So, it is possible for a user on
  16. * multiple nodes to overrun their quota, but that overrun is controlable.
  17. * Since quota tags are part of transactions, there is no need for a quota check
  18. * program to be run on node crashes or anything like that.
  19. *
  20. * There are couple of knobs that let the administrator manage the quota
  21. * fuzziness. "quota_quantum" sets the maximum time a quota change can be
  22. * sitting on one node before being synced to the quota file. (The default is
  23. * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
  24. * of quota file syncs increases as the user moves closer to their limit. The
  25. * more frequent the syncs, the more accurate the quota enforcement, but that
  26. * means that there is more contention between the nodes for the quota file.
  27. * The default value is one. This sets the maximum theoretical quota overrun
  28. * (with infinite node with infinite bandwidth) to twice the user's limit. (In
  29. * practice, the maximum overrun you see should be much less.) A "quota_scale"
  30. * number greater than one makes quota syncs more frequent and reduces the
  31. * maximum overrun. Numbers less than one (but greater than zero) make quota
  32. * syncs less frequent.
  33. *
  34. * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
  35. * the quota file, so it is not being constantly read.
  36. */
  37. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  38. #include <linux/sched.h>
  39. #include <linux/slab.h>
  40. #include <linux/mm.h>
  41. #include <linux/spinlock.h>
  42. #include <linux/completion.h>
  43. #include <linux/buffer_head.h>
  44. #include <linux/sort.h>
  45. #include <linux/fs.h>
  46. #include <linux/bio.h>
  47. #include <linux/gfs2_ondisk.h>
  48. #include <linux/kthread.h>
  49. #include <linux/freezer.h>
  50. #include <linux/quota.h>
  51. #include <linux/dqblk_xfs.h>
  52. #include <linux/lockref.h>
  53. #include <linux/list_lru.h>
  54. #include <linux/rcupdate.h>
  55. #include <linux/rculist_bl.h>
  56. #include <linux/bit_spinlock.h>
  57. #include <linux/jhash.h>
  58. #include <linux/vmalloc.h>
  59. #include "gfs2.h"
  60. #include "incore.h"
  61. #include "bmap.h"
  62. #include "glock.h"
  63. #include "glops.h"
  64. #include "log.h"
  65. #include "meta_io.h"
  66. #include "quota.h"
  67. #include "rgrp.h"
  68. #include "super.h"
  69. #include "trans.h"
  70. #include "inode.h"
  71. #include "util.h"
  72. #define GFS2_QD_HASH_SHIFT 12
  73. #define GFS2_QD_HASH_SIZE (1 << GFS2_QD_HASH_SHIFT)
  74. #define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1)
  75. /* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
  76. /* -> sd_bitmap_lock */
  77. static DEFINE_SPINLOCK(qd_lock);
  78. struct list_lru gfs2_qd_lru;
  79. static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];
  80. static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
  81. const struct kqid qid)
  82. {
  83. unsigned int h;
  84. h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
  85. h = jhash(&qid, sizeof(struct kqid), h);
  86. return h & GFS2_QD_HASH_MASK;
  87. }
  88. static inline void spin_lock_bucket(unsigned int hash)
  89. {
  90. hlist_bl_lock(&qd_hash_table[hash]);
  91. }
  92. static inline void spin_unlock_bucket(unsigned int hash)
  93. {
  94. hlist_bl_unlock(&qd_hash_table[hash]);
  95. }
  96. static void gfs2_qd_dealloc(struct rcu_head *rcu)
  97. {
  98. struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
  99. kmem_cache_free(gfs2_quotad_cachep, qd);
  100. }
  101. static void gfs2_qd_dispose(struct list_head *list)
  102. {
  103. struct gfs2_quota_data *qd;
  104. struct gfs2_sbd *sdp;
  105. while (!list_empty(list)) {
  106. qd = list_entry(list->next, struct gfs2_quota_data, qd_lru);
  107. sdp = qd->qd_gl->gl_sbd;
  108. list_del(&qd->qd_lru);
  109. /* Free from the filesystem-specific list */
  110. spin_lock(&qd_lock);
  111. list_del(&qd->qd_list);
  112. spin_unlock(&qd_lock);
  113. spin_lock_bucket(qd->qd_hash);
  114. hlist_bl_del_rcu(&qd->qd_hlist);
  115. spin_unlock_bucket(qd->qd_hash);
  116. gfs2_assert_warn(sdp, !qd->qd_change);
  117. gfs2_assert_warn(sdp, !qd->qd_slot_count);
  118. gfs2_assert_warn(sdp, !qd->qd_bh_count);
  119. gfs2_glock_put(qd->qd_gl);
  120. atomic_dec(&sdp->sd_quota_count);
  121. /* Delete it from the common reclaim list */
  122. call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
  123. }
  124. }
  125. static enum lru_status gfs2_qd_isolate(struct list_head *item,
  126. struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
  127. {
  128. struct list_head *dispose = arg;
  129. struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
  130. if (!spin_trylock(&qd->qd_lockref.lock))
  131. return LRU_SKIP;
  132. if (qd->qd_lockref.count == 0) {
  133. lockref_mark_dead(&qd->qd_lockref);
  134. list_lru_isolate_move(lru, &qd->qd_lru, dispose);
  135. }
  136. spin_unlock(&qd->qd_lockref.lock);
  137. return LRU_REMOVED;
  138. }
  139. static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
  140. struct shrink_control *sc)
  141. {
  142. LIST_HEAD(dispose);
  143. unsigned long freed;
  144. if (!(sc->gfp_mask & __GFP_FS))
  145. return SHRINK_STOP;
  146. freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
  147. gfs2_qd_isolate, &dispose);
  148. gfs2_qd_dispose(&dispose);
  149. return freed;
  150. }
  151. static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
  152. struct shrink_control *sc)
  153. {
  154. return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc));
  155. }
  156. struct shrinker gfs2_qd_shrinker = {
  157. .count_objects = gfs2_qd_shrink_count,
  158. .scan_objects = gfs2_qd_shrink_scan,
  159. .seeks = DEFAULT_SEEKS,
  160. .flags = SHRINKER_NUMA_AWARE,
  161. };
  162. static u64 qd2index(struct gfs2_quota_data *qd)
  163. {
  164. struct kqid qid = qd->qd_id;
  165. return (2 * (u64)from_kqid(&init_user_ns, qid)) +
  166. ((qid.type == USRQUOTA) ? 0 : 1);
  167. }
  168. static u64 qd2offset(struct gfs2_quota_data *qd)
  169. {
  170. u64 offset;
  171. offset = qd2index(qd);
  172. offset *= sizeof(struct gfs2_quota);
  173. return offset;
  174. }
  175. static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
  176. {
  177. struct gfs2_quota_data *qd;
  178. int error;
  179. qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
  180. if (!qd)
  181. return NULL;
  182. qd->qd_sbd = sdp;
  183. qd->qd_lockref.count = 1;
  184. spin_lock_init(&qd->qd_lockref.lock);
  185. qd->qd_id = qid;
  186. qd->qd_slot = -1;
  187. INIT_LIST_HEAD(&qd->qd_lru);
  188. qd->qd_hash = hash;
  189. error = gfs2_glock_get(sdp, qd2index(qd),
  190. &gfs2_quota_glops, CREATE, &qd->qd_gl);
  191. if (error)
  192. goto fail;
  193. return qd;
  194. fail:
  195. kmem_cache_free(gfs2_quotad_cachep, qd);
  196. return NULL;
  197. }
  198. static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
  199. const struct gfs2_sbd *sdp,
  200. struct kqid qid)
  201. {
  202. struct gfs2_quota_data *qd;
  203. struct hlist_bl_node *h;
  204. hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
  205. if (!qid_eq(qd->qd_id, qid))
  206. continue;
  207. if (qd->qd_sbd != sdp)
  208. continue;
  209. if (lockref_get_not_dead(&qd->qd_lockref)) {
  210. list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
  211. return qd;
  212. }
  213. }
  214. return NULL;
  215. }
  216. static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
  217. struct gfs2_quota_data **qdp)
  218. {
  219. struct gfs2_quota_data *qd, *new_qd;
  220. unsigned int hash = gfs2_qd_hash(sdp, qid);
  221. rcu_read_lock();
  222. *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
  223. rcu_read_unlock();
  224. if (qd)
  225. return 0;
  226. new_qd = qd_alloc(hash, sdp, qid);
  227. if (!new_qd)
  228. return -ENOMEM;
  229. spin_lock(&qd_lock);
  230. spin_lock_bucket(hash);
  231. *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
  232. if (qd == NULL) {
  233. *qdp = new_qd;
  234. list_add(&new_qd->qd_list, &sdp->sd_quota_list);
  235. hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
  236. atomic_inc(&sdp->sd_quota_count);
  237. }
  238. spin_unlock_bucket(hash);
  239. spin_unlock(&qd_lock);
  240. if (qd) {
  241. gfs2_glock_put(new_qd->qd_gl);
  242. kmem_cache_free(gfs2_quotad_cachep, new_qd);
  243. }
  244. return 0;
  245. }
  246. static void qd_hold(struct gfs2_quota_data *qd)
  247. {
  248. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  249. gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
  250. lockref_get(&qd->qd_lockref);
  251. }
  252. static void qd_put(struct gfs2_quota_data *qd)
  253. {
  254. if (lockref_put_or_lock(&qd->qd_lockref))
  255. return;
  256. qd->qd_lockref.count = 0;
  257. list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
  258. spin_unlock(&qd->qd_lockref.lock);
  259. }
  260. static int slot_get(struct gfs2_quota_data *qd)
  261. {
  262. struct gfs2_sbd *sdp = qd->qd_sbd;
  263. unsigned int bit;
  264. int error = 0;
  265. spin_lock(&sdp->sd_bitmap_lock);
  266. if (qd->qd_slot_count != 0)
  267. goto out;
  268. error = -ENOSPC;
  269. bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots);
  270. if (bit < sdp->sd_quota_slots) {
  271. set_bit(bit, sdp->sd_quota_bitmap);
  272. qd->qd_slot = bit;
  273. error = 0;
  274. out:
  275. qd->qd_slot_count++;
  276. }
  277. spin_unlock(&sdp->sd_bitmap_lock);
  278. return error;
  279. }
  280. static void slot_hold(struct gfs2_quota_data *qd)
  281. {
  282. struct gfs2_sbd *sdp = qd->qd_sbd;
  283. spin_lock(&sdp->sd_bitmap_lock);
  284. gfs2_assert(sdp, qd->qd_slot_count);
  285. qd->qd_slot_count++;
  286. spin_unlock(&sdp->sd_bitmap_lock);
  287. }
  288. static void slot_put(struct gfs2_quota_data *qd)
  289. {
  290. struct gfs2_sbd *sdp = qd->qd_sbd;
  291. spin_lock(&sdp->sd_bitmap_lock);
  292. gfs2_assert(sdp, qd->qd_slot_count);
  293. if (!--qd->qd_slot_count) {
  294. BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
  295. qd->qd_slot = -1;
  296. }
  297. spin_unlock(&sdp->sd_bitmap_lock);
  298. }
  299. static int bh_get(struct gfs2_quota_data *qd)
  300. {
  301. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  302. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  303. unsigned int block, offset;
  304. struct buffer_head *bh;
  305. int error;
  306. struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
  307. mutex_lock(&sdp->sd_quota_mutex);
  308. if (qd->qd_bh_count++) {
  309. mutex_unlock(&sdp->sd_quota_mutex);
  310. return 0;
  311. }
  312. block = qd->qd_slot / sdp->sd_qc_per_block;
  313. offset = qd->qd_slot % sdp->sd_qc_per_block;
  314. bh_map.b_size = 1 << ip->i_inode.i_blkbits;
  315. error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
  316. if (error)
  317. goto fail;
  318. error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
  319. if (error)
  320. goto fail;
  321. error = -EIO;
  322. if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
  323. goto fail_brelse;
  324. qd->qd_bh = bh;
  325. qd->qd_bh_qc = (struct gfs2_quota_change *)
  326. (bh->b_data + sizeof(struct gfs2_meta_header) +
  327. offset * sizeof(struct gfs2_quota_change));
  328. mutex_unlock(&sdp->sd_quota_mutex);
  329. return 0;
  330. fail_brelse:
  331. brelse(bh);
  332. fail:
  333. qd->qd_bh_count--;
  334. mutex_unlock(&sdp->sd_quota_mutex);
  335. return error;
  336. }
  337. static void bh_put(struct gfs2_quota_data *qd)
  338. {
  339. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  340. mutex_lock(&sdp->sd_quota_mutex);
  341. gfs2_assert(sdp, qd->qd_bh_count);
  342. if (!--qd->qd_bh_count) {
  343. brelse(qd->qd_bh);
  344. qd->qd_bh = NULL;
  345. qd->qd_bh_qc = NULL;
  346. }
  347. mutex_unlock(&sdp->sd_quota_mutex);
  348. }
  349. static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
  350. u64 *sync_gen)
  351. {
  352. if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
  353. !test_bit(QDF_CHANGE, &qd->qd_flags) ||
  354. (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
  355. return 0;
  356. if (!lockref_get_not_dead(&qd->qd_lockref))
  357. return 0;
  358. list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
  359. set_bit(QDF_LOCKED, &qd->qd_flags);
  360. qd->qd_change_sync = qd->qd_change;
  361. slot_hold(qd);
  362. return 1;
  363. }
  364. static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
  365. {
  366. struct gfs2_quota_data *qd = NULL;
  367. int error;
  368. int found = 0;
  369. *qdp = NULL;
  370. if (sdp->sd_vfs->s_flags & MS_RDONLY)
  371. return 0;
  372. spin_lock(&qd_lock);
  373. list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
  374. found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen);
  375. if (found)
  376. break;
  377. }
  378. if (!found)
  379. qd = NULL;
  380. spin_unlock(&qd_lock);
  381. if (qd) {
  382. gfs2_assert_warn(sdp, qd->qd_change_sync);
  383. error = bh_get(qd);
  384. if (error) {
  385. clear_bit(QDF_LOCKED, &qd->qd_flags);
  386. slot_put(qd);
  387. qd_put(qd);
  388. return error;
  389. }
  390. }
  391. *qdp = qd;
  392. return 0;
  393. }
  394. static void qd_unlock(struct gfs2_quota_data *qd)
  395. {
  396. gfs2_assert_warn(qd->qd_gl->gl_sbd,
  397. test_bit(QDF_LOCKED, &qd->qd_flags));
  398. clear_bit(QDF_LOCKED, &qd->qd_flags);
  399. bh_put(qd);
  400. slot_put(qd);
  401. qd_put(qd);
  402. }
  403. static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
  404. struct gfs2_quota_data **qdp)
  405. {
  406. int error;
  407. error = qd_get(sdp, qid, qdp);
  408. if (error)
  409. return error;
  410. error = slot_get(*qdp);
  411. if (error)
  412. goto fail;
  413. error = bh_get(*qdp);
  414. if (error)
  415. goto fail_slot;
  416. return 0;
  417. fail_slot:
  418. slot_put(*qdp);
  419. fail:
  420. qd_put(*qdp);
  421. return error;
  422. }
  423. static void qdsb_put(struct gfs2_quota_data *qd)
  424. {
  425. bh_put(qd);
  426. slot_put(qd);
  427. qd_put(qd);
  428. }
  429. int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
  430. {
  431. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  432. struct gfs2_quota_data **qd;
  433. int error;
  434. if (ip->i_res == NULL) {
  435. error = gfs2_rs_alloc(ip);
  436. if (error)
  437. return error;
  438. }
  439. qd = ip->i_res->rs_qa_qd;
  440. if (gfs2_assert_warn(sdp, !ip->i_res->rs_qa_qd_num) ||
  441. gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
  442. return -EIO;
  443. if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
  444. return 0;
  445. error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
  446. if (error)
  447. goto out;
  448. ip->i_res->rs_qa_qd_num++;
  449. qd++;
  450. error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
  451. if (error)
  452. goto out;
  453. ip->i_res->rs_qa_qd_num++;
  454. qd++;
  455. if (!uid_eq(uid, NO_UID_QUOTA_CHANGE) &&
  456. !uid_eq(uid, ip->i_inode.i_uid)) {
  457. error = qdsb_get(sdp, make_kqid_uid(uid), qd);
  458. if (error)
  459. goto out;
  460. ip->i_res->rs_qa_qd_num++;
  461. qd++;
  462. }
  463. if (!gid_eq(gid, NO_GID_QUOTA_CHANGE) &&
  464. !gid_eq(gid, ip->i_inode.i_gid)) {
  465. error = qdsb_get(sdp, make_kqid_gid(gid), qd);
  466. if (error)
  467. goto out;
  468. ip->i_res->rs_qa_qd_num++;
  469. qd++;
  470. }
  471. out:
  472. if (error)
  473. gfs2_quota_unhold(ip);
  474. return error;
  475. }
  476. void gfs2_quota_unhold(struct gfs2_inode *ip)
  477. {
  478. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  479. unsigned int x;
  480. if (ip->i_res == NULL)
  481. return;
  482. gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
  483. for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
  484. qdsb_put(ip->i_res->rs_qa_qd[x]);
  485. ip->i_res->rs_qa_qd[x] = NULL;
  486. }
  487. ip->i_res->rs_qa_qd_num = 0;
  488. }
  489. static int sort_qd(const void *a, const void *b)
  490. {
  491. const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
  492. const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
  493. if (qid_lt(qd_a->qd_id, qd_b->qd_id))
  494. return -1;
  495. if (qid_lt(qd_b->qd_id, qd_a->qd_id))
  496. return 1;
  497. return 0;
  498. }
  499. static void do_qc(struct gfs2_quota_data *qd, s64 change)
  500. {
  501. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  502. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  503. struct gfs2_quota_change *qc = qd->qd_bh_qc;
  504. s64 x;
  505. mutex_lock(&sdp->sd_quota_mutex);
  506. gfs2_trans_add_meta(ip->i_gl, qd->qd_bh);
  507. if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
  508. qc->qc_change = 0;
  509. qc->qc_flags = 0;
  510. if (qd->qd_id.type == USRQUOTA)
  511. qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
  512. qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id));
  513. }
  514. x = be64_to_cpu(qc->qc_change) + change;
  515. qc->qc_change = cpu_to_be64(x);
  516. spin_lock(&qd_lock);
  517. qd->qd_change = x;
  518. spin_unlock(&qd_lock);
  519. if (!x) {
  520. gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
  521. clear_bit(QDF_CHANGE, &qd->qd_flags);
  522. qc->qc_flags = 0;
  523. qc->qc_id = 0;
  524. slot_put(qd);
  525. qd_put(qd);
  526. } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
  527. qd_hold(qd);
  528. slot_hold(qd);
  529. }
  530. mutex_unlock(&sdp->sd_quota_mutex);
  531. }
  532. /**
  533. * gfs2_adjust_quota - adjust record of current block usage
  534. * @ip: The quota inode
  535. * @loc: Offset of the entry in the quota file
  536. * @change: The amount of usage change to record
  537. * @qd: The quota data
  538. * @fdq: The updated limits to record
  539. *
  540. * This function was mostly borrowed from gfs2_block_truncate_page which was
  541. * in turn mostly borrowed from ext3
  542. *
  543. * Returns: 0 or -ve on error
  544. */
  545. static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
  546. s64 change, struct gfs2_quota_data *qd,
  547. struct qc_dqblk *fdq)
  548. {
  549. struct inode *inode = &ip->i_inode;
  550. struct gfs2_sbd *sdp = GFS2_SB(inode);
  551. struct address_space *mapping = inode->i_mapping;
  552. unsigned long index = loc >> PAGE_CACHE_SHIFT;
  553. unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
  554. unsigned blocksize, iblock, pos;
  555. struct buffer_head *bh;
  556. struct page *page;
  557. void *kaddr, *ptr;
  558. struct gfs2_quota q;
  559. int err, nbytes;
  560. u64 size;
  561. if (gfs2_is_stuffed(ip)) {
  562. err = gfs2_unstuff_dinode(ip, NULL);
  563. if (err)
  564. return err;
  565. }
  566. memset(&q, 0, sizeof(struct gfs2_quota));
  567. err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q));
  568. if (err < 0)
  569. return err;
  570. err = -EIO;
  571. be64_add_cpu(&q.qu_value, change);
  572. qd->qd_qb.qb_value = q.qu_value;
  573. if (fdq) {
  574. if (fdq->d_fieldmask & QC_SPC_SOFT) {
  575. q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
  576. qd->qd_qb.qb_warn = q.qu_warn;
  577. }
  578. if (fdq->d_fieldmask & QC_SPC_HARD) {
  579. q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
  580. qd->qd_qb.qb_limit = q.qu_limit;
  581. }
  582. if (fdq->d_fieldmask & QC_SPACE) {
  583. q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
  584. qd->qd_qb.qb_value = q.qu_value;
  585. }
  586. }
  587. /* Write the quota into the quota file on disk */
  588. ptr = &q;
  589. nbytes = sizeof(struct gfs2_quota);
  590. get_a_page:
  591. page = find_or_create_page(mapping, index, GFP_NOFS);
  592. if (!page)
  593. return -ENOMEM;
  594. blocksize = inode->i_sb->s_blocksize;
  595. iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
  596. if (!page_has_buffers(page))
  597. create_empty_buffers(page, blocksize, 0);
  598. bh = page_buffers(page);
  599. pos = blocksize;
  600. while (offset >= pos) {
  601. bh = bh->b_this_page;
  602. iblock++;
  603. pos += blocksize;
  604. }
  605. if (!buffer_mapped(bh)) {
  606. gfs2_block_map(inode, iblock, bh, 1);
  607. if (!buffer_mapped(bh))
  608. goto unlock_out;
  609. /* If it's a newly allocated disk block for quota, zero it */
  610. if (buffer_new(bh))
  611. zero_user(page, pos - blocksize, bh->b_size);
  612. }
  613. if (PageUptodate(page))
  614. set_buffer_uptodate(bh);
  615. if (!buffer_uptodate(bh)) {
  616. ll_rw_block(READ | REQ_META, 1, &bh);
  617. wait_on_buffer(bh);
  618. if (!buffer_uptodate(bh))
  619. goto unlock_out;
  620. }
  621. gfs2_trans_add_data(ip->i_gl, bh);
  622. kaddr = kmap_atomic(page);
  623. if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
  624. nbytes = PAGE_CACHE_SIZE - offset;
  625. memcpy(kaddr + offset, ptr, nbytes);
  626. flush_dcache_page(page);
  627. kunmap_atomic(kaddr);
  628. unlock_page(page);
  629. page_cache_release(page);
  630. /* If quota straddles page boundary, we need to update the rest of the
  631. * quota at the beginning of the next page */
  632. if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) {
  633. ptr = ptr + nbytes;
  634. nbytes = sizeof(struct gfs2_quota) - nbytes;
  635. offset = 0;
  636. index++;
  637. goto get_a_page;
  638. }
  639. size = loc + sizeof(struct gfs2_quota);
  640. if (size > inode->i_size)
  641. i_size_write(inode, size);
  642. inode->i_mtime = inode->i_atime = CURRENT_TIME;
  643. mark_inode_dirty(inode);
  644. set_bit(QDF_REFRESH, &qd->qd_flags);
  645. return 0;
  646. unlock_out:
  647. unlock_page(page);
  648. page_cache_release(page);
  649. return err;
  650. }
  651. static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
  652. {
  653. struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
  654. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  655. struct gfs2_alloc_parms ap = { .aflags = 0, };
  656. unsigned int data_blocks, ind_blocks;
  657. struct gfs2_holder *ghs, i_gh;
  658. unsigned int qx, x;
  659. struct gfs2_quota_data *qd;
  660. unsigned reserved;
  661. loff_t offset;
  662. unsigned int nalloc = 0, blocks;
  663. int error;
  664. error = gfs2_rs_alloc(ip);
  665. if (error)
  666. return error;
  667. gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
  668. &data_blocks, &ind_blocks);
  669. ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
  670. if (!ghs)
  671. return -ENOMEM;
  672. sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
  673. mutex_lock(&ip->i_inode.i_mutex);
  674. for (qx = 0; qx < num_qd; qx++) {
  675. error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
  676. GL_NOCACHE, &ghs[qx]);
  677. if (error)
  678. goto out;
  679. }
  680. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
  681. if (error)
  682. goto out;
  683. for (x = 0; x < num_qd; x++) {
  684. offset = qd2offset(qda[x]);
  685. if (gfs2_write_alloc_required(ip, offset,
  686. sizeof(struct gfs2_quota)))
  687. nalloc++;
  688. }
  689. /*
  690. * 1 blk for unstuffing inode if stuffed. We add this extra
  691. * block to the reservation unconditionally. If the inode
  692. * doesn't need unstuffing, the block will be released to the
  693. * rgrp since it won't be allocated during the transaction
  694. */
  695. /* +3 in the end for unstuffing block, inode size update block
  696. * and another block in case quota straddles page boundary and
  697. * two blocks need to be updated instead of 1 */
  698. blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
  699. reserved = 1 + (nalloc * (data_blocks + ind_blocks));
  700. ap.target = reserved;
  701. error = gfs2_inplace_reserve(ip, &ap);
  702. if (error)
  703. goto out_alloc;
  704. if (nalloc)
  705. blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
  706. error = gfs2_trans_begin(sdp, blocks, 0);
  707. if (error)
  708. goto out_ipres;
  709. for (x = 0; x < num_qd; x++) {
  710. qd = qda[x];
  711. offset = qd2offset(qd);
  712. error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
  713. if (error)
  714. goto out_end_trans;
  715. do_qc(qd, -qd->qd_change_sync);
  716. set_bit(QDF_REFRESH, &qd->qd_flags);
  717. }
  718. error = 0;
  719. out_end_trans:
  720. gfs2_trans_end(sdp);
  721. out_ipres:
  722. gfs2_inplace_release(ip);
  723. out_alloc:
  724. gfs2_glock_dq_uninit(&i_gh);
  725. out:
  726. while (qx--)
  727. gfs2_glock_dq_uninit(&ghs[qx]);
  728. mutex_unlock(&ip->i_inode.i_mutex);
  729. kfree(ghs);
  730. gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl, NORMAL_FLUSH);
  731. return error;
  732. }
  733. static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
  734. {
  735. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  736. struct gfs2_quota q;
  737. struct gfs2_quota_lvb *qlvb;
  738. loff_t pos;
  739. int error;
  740. memset(&q, 0, sizeof(struct gfs2_quota));
  741. pos = qd2offset(qd);
  742. error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q));
  743. if (error < 0)
  744. return error;
  745. qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
  746. qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
  747. qlvb->__pad = 0;
  748. qlvb->qb_limit = q.qu_limit;
  749. qlvb->qb_warn = q.qu_warn;
  750. qlvb->qb_value = q.qu_value;
  751. qd->qd_qb = *qlvb;
  752. return 0;
  753. }
  754. static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
  755. struct gfs2_holder *q_gh)
  756. {
  757. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  758. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  759. struct gfs2_holder i_gh;
  760. int error;
  761. restart:
  762. error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
  763. if (error)
  764. return error;
  765. if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
  766. force_refresh = FORCE;
  767. qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
  768. if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
  769. gfs2_glock_dq_uninit(q_gh);
  770. error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
  771. GL_NOCACHE, q_gh);
  772. if (error)
  773. return error;
  774. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
  775. if (error)
  776. goto fail;
  777. error = update_qd(sdp, qd);
  778. if (error)
  779. goto fail_gunlock;
  780. gfs2_glock_dq_uninit(&i_gh);
  781. gfs2_glock_dq_uninit(q_gh);
  782. force_refresh = 0;
  783. goto restart;
  784. }
  785. return 0;
  786. fail_gunlock:
  787. gfs2_glock_dq_uninit(&i_gh);
  788. fail:
  789. gfs2_glock_dq_uninit(q_gh);
  790. return error;
  791. }
  792. int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
  793. {
  794. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  795. struct gfs2_quota_data *qd;
  796. unsigned int x;
  797. int error = 0;
  798. error = gfs2_quota_hold(ip, uid, gid);
  799. if (error)
  800. return error;
  801. if (capable(CAP_SYS_RESOURCE) ||
  802. sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
  803. return 0;
  804. sort(ip->i_res->rs_qa_qd, ip->i_res->rs_qa_qd_num,
  805. sizeof(struct gfs2_quota_data *), sort_qd, NULL);
  806. for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
  807. qd = ip->i_res->rs_qa_qd[x];
  808. error = do_glock(qd, NO_FORCE, &ip->i_res->rs_qa_qd_ghs[x]);
  809. if (error)
  810. break;
  811. }
  812. if (!error)
  813. set_bit(GIF_QD_LOCKED, &ip->i_flags);
  814. else {
  815. while (x--)
  816. gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
  817. gfs2_quota_unhold(ip);
  818. }
  819. return error;
  820. }
  821. static int need_sync(struct gfs2_quota_data *qd)
  822. {
  823. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  824. struct gfs2_tune *gt = &sdp->sd_tune;
  825. s64 value;
  826. unsigned int num, den;
  827. int do_sync = 1;
  828. if (!qd->qd_qb.qb_limit)
  829. return 0;
  830. spin_lock(&qd_lock);
  831. value = qd->qd_change;
  832. spin_unlock(&qd_lock);
  833. spin_lock(&gt->gt_spin);
  834. num = gt->gt_quota_scale_num;
  835. den = gt->gt_quota_scale_den;
  836. spin_unlock(&gt->gt_spin);
  837. if (value < 0)
  838. do_sync = 0;
  839. else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
  840. (s64)be64_to_cpu(qd->qd_qb.qb_limit))
  841. do_sync = 0;
  842. else {
  843. value *= gfs2_jindex_size(sdp) * num;
  844. value = div_s64(value, den);
  845. value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
  846. if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
  847. do_sync = 0;
  848. }
  849. return do_sync;
  850. }
  851. void gfs2_quota_unlock(struct gfs2_inode *ip)
  852. {
  853. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  854. struct gfs2_quota_data *qda[4];
  855. unsigned int count = 0;
  856. unsigned int x;
  857. int found;
  858. if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
  859. goto out;
  860. for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
  861. struct gfs2_quota_data *qd;
  862. int sync;
  863. qd = ip->i_res->rs_qa_qd[x];
  864. sync = need_sync(qd);
  865. gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
  866. if (!sync)
  867. continue;
  868. spin_lock(&qd_lock);
  869. found = qd_check_sync(sdp, qd, NULL);
  870. spin_unlock(&qd_lock);
  871. if (!found)
  872. continue;
  873. gfs2_assert_warn(sdp, qd->qd_change_sync);
  874. if (bh_get(qd)) {
  875. clear_bit(QDF_LOCKED, &qd->qd_flags);
  876. slot_put(qd);
  877. qd_put(qd);
  878. continue;
  879. }
  880. qda[count++] = qd;
  881. }
  882. if (count) {
  883. do_sync(count, qda);
  884. for (x = 0; x < count; x++)
  885. qd_unlock(qda[x]);
  886. }
  887. out:
  888. gfs2_quota_unhold(ip);
  889. }
  890. #define MAX_LINE 256
  891. static int print_message(struct gfs2_quota_data *qd, char *type)
  892. {
  893. struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
  894. fs_info(sdp, "quota %s for %s %u\n",
  895. type,
  896. (qd->qd_id.type == USRQUOTA) ? "user" : "group",
  897. from_kqid(&init_user_ns, qd->qd_id));
  898. return 0;
  899. }
  900. /**
  901. * gfs2_quota_check - check if allocating new blocks will exceed quota
  902. * @ip: The inode for which this check is being performed
  903. * @uid: The uid to check against
  904. * @gid: The gid to check against
  905. * @ap: The allocation parameters. ap->target contains the requested
  906. * blocks. ap->min_target, if set, contains the minimum blks
  907. * requested.
  908. *
  909. * Returns: 0 on success.
  910. * min_req = ap->min_target ? ap->min_target : ap->target;
  911. * quota must allow atleast min_req blks for success and
  912. * ap->allowed is set to the number of blocks allowed
  913. *
  914. * -EDQUOT otherwise, quota violation. ap->allowed is set to number
  915. * of blocks available.
  916. */
  917. int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
  918. struct gfs2_alloc_parms *ap)
  919. {
  920. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  921. struct gfs2_quota_data *qd;
  922. s64 value, warn, limit;
  923. unsigned int x;
  924. int error = 0;
  925. ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
  926. if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
  927. return 0;
  928. if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
  929. return 0;
  930. for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
  931. qd = ip->i_res->rs_qa_qd[x];
  932. if (!(qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
  933. qid_eq(qd->qd_id, make_kqid_gid(gid))))
  934. continue;
  935. warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn);
  936. limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
  937. value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
  938. spin_lock(&qd_lock);
  939. value += qd->qd_change;
  940. spin_unlock(&qd_lock);
  941. if (limit > 0 && (limit - value) < ap->allowed)
  942. ap->allowed = limit - value;
  943. /* If we can't meet the target */
  944. if (limit && limit < (value + (s64)ap->target)) {
  945. /* If no min_target specified or we don't meet
  946. * min_target, return -EDQUOT */
  947. if (!ap->min_target || ap->min_target > ap->allowed) {
  948. print_message(qd, "exceeded");
  949. quota_send_warning(qd->qd_id,
  950. sdp->sd_vfs->s_dev,
  951. QUOTA_NL_BHARDWARN);
  952. error = -EDQUOT;
  953. break;
  954. }
  955. } else if (warn && warn < value &&
  956. time_after_eq(jiffies, qd->qd_last_warn +
  957. gfs2_tune_get(sdp, gt_quota_warn_period)
  958. * HZ)) {
  959. quota_send_warning(qd->qd_id,
  960. sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
  961. error = print_message(qd, "warning");
  962. qd->qd_last_warn = jiffies;
  963. }
  964. }
  965. return error;
  966. }
  967. void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
  968. kuid_t uid, kgid_t gid)
  969. {
  970. struct gfs2_quota_data *qd;
  971. unsigned int x;
  972. if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
  973. return;
  974. if (ip->i_diskflags & GFS2_DIF_SYSTEM)
  975. return;
  976. for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
  977. qd = ip->i_res->rs_qa_qd[x];
  978. if (qid_eq(qd->qd_id, make_kqid_uid(uid)) ||
  979. qid_eq(qd->qd_id, make_kqid_gid(gid))) {
  980. do_qc(qd, change);
  981. }
  982. }
  983. }
  984. int gfs2_quota_sync(struct super_block *sb, int type)
  985. {
  986. struct gfs2_sbd *sdp = sb->s_fs_info;
  987. struct gfs2_quota_data **qda;
  988. unsigned int max_qd = PAGE_SIZE/sizeof(struct gfs2_holder);
  989. unsigned int num_qd;
  990. unsigned int x;
  991. int error = 0;
  992. qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
  993. if (!qda)
  994. return -ENOMEM;
  995. mutex_lock(&sdp->sd_quota_sync_mutex);
  996. sdp->sd_quota_sync_gen++;
  997. do {
  998. num_qd = 0;
  999. for (;;) {
  1000. error = qd_fish(sdp, qda + num_qd);
  1001. if (error || !qda[num_qd])
  1002. break;
  1003. if (++num_qd == max_qd)
  1004. break;
  1005. }
  1006. if (num_qd) {
  1007. if (!error)
  1008. error = do_sync(num_qd, qda);
  1009. if (!error)
  1010. for (x = 0; x < num_qd; x++)
  1011. qda[x]->qd_sync_gen =
  1012. sdp->sd_quota_sync_gen;
  1013. for (x = 0; x < num_qd; x++)
  1014. qd_unlock(qda[x]);
  1015. }
  1016. } while (!error && num_qd == max_qd);
  1017. mutex_unlock(&sdp->sd_quota_sync_mutex);
  1018. kfree(qda);
  1019. return error;
  1020. }
  1021. int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
  1022. {
  1023. struct gfs2_quota_data *qd;
  1024. struct gfs2_holder q_gh;
  1025. int error;
  1026. error = qd_get(sdp, qid, &qd);
  1027. if (error)
  1028. return error;
  1029. error = do_glock(qd, FORCE, &q_gh);
  1030. if (!error)
  1031. gfs2_glock_dq_uninit(&q_gh);
  1032. qd_put(qd);
  1033. return error;
  1034. }
  1035. int gfs2_quota_init(struct gfs2_sbd *sdp)
  1036. {
  1037. struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
  1038. u64 size = i_size_read(sdp->sd_qc_inode);
  1039. unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
  1040. unsigned int x, slot = 0;
  1041. unsigned int found = 0;
  1042. unsigned int hash;
  1043. unsigned int bm_size;
  1044. u64 dblock;
  1045. u32 extlen = 0;
  1046. int error;
  1047. if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
  1048. return -EIO;
  1049. sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
  1050. bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
  1051. bm_size *= sizeof(unsigned long);
  1052. error = -ENOMEM;
  1053. sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN);
  1054. if (sdp->sd_quota_bitmap == NULL)
  1055. sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS |
  1056. __GFP_ZERO, PAGE_KERNEL);
  1057. if (!sdp->sd_quota_bitmap)
  1058. return error;
  1059. for (x = 0; x < blocks; x++) {
  1060. struct buffer_head *bh;
  1061. const struct gfs2_quota_change *qc;
  1062. unsigned int y;
  1063. if (!extlen) {
  1064. int new = 0;
  1065. error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
  1066. if (error)
  1067. goto fail;
  1068. }
  1069. error = -EIO;
  1070. bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
  1071. if (!bh)
  1072. goto fail;
  1073. if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
  1074. brelse(bh);
  1075. goto fail;
  1076. }
  1077. qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
  1078. for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
  1079. y++, slot++) {
  1080. struct gfs2_quota_data *qd;
  1081. s64 qc_change = be64_to_cpu(qc->qc_change);
  1082. u32 qc_flags = be32_to_cpu(qc->qc_flags);
  1083. enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
  1084. USRQUOTA : GRPQUOTA;
  1085. struct kqid qc_id = make_kqid(&init_user_ns, qtype,
  1086. be32_to_cpu(qc->qc_id));
  1087. qc++;
  1088. if (!qc_change)
  1089. continue;
  1090. hash = gfs2_qd_hash(sdp, qc_id);
  1091. qd = qd_alloc(hash, sdp, qc_id);
  1092. if (qd == NULL) {
  1093. brelse(bh);
  1094. goto fail;
  1095. }
  1096. set_bit(QDF_CHANGE, &qd->qd_flags);
  1097. qd->qd_change = qc_change;
  1098. qd->qd_slot = slot;
  1099. qd->qd_slot_count = 1;
  1100. spin_lock(&qd_lock);
  1101. BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
  1102. list_add(&qd->qd_list, &sdp->sd_quota_list);
  1103. atomic_inc(&sdp->sd_quota_count);
  1104. spin_unlock(&qd_lock);
  1105. spin_lock_bucket(hash);
  1106. hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
  1107. spin_unlock_bucket(hash);
  1108. found++;
  1109. }
  1110. brelse(bh);
  1111. dblock++;
  1112. extlen--;
  1113. }
  1114. if (found)
  1115. fs_info(sdp, "found %u quota changes\n", found);
  1116. return 0;
  1117. fail:
  1118. gfs2_quota_cleanup(sdp);
  1119. return error;
  1120. }
  1121. void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
  1122. {
  1123. struct list_head *head = &sdp->sd_quota_list;
  1124. struct gfs2_quota_data *qd;
  1125. spin_lock(&qd_lock);
  1126. while (!list_empty(head)) {
  1127. qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
  1128. list_del(&qd->qd_list);
  1129. /* Also remove if this qd exists in the reclaim list */
  1130. list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
  1131. atomic_dec(&sdp->sd_quota_count);
  1132. spin_unlock(&qd_lock);
  1133. spin_lock_bucket(qd->qd_hash);
  1134. hlist_bl_del_rcu(&qd->qd_hlist);
  1135. spin_unlock_bucket(qd->qd_hash);
  1136. gfs2_assert_warn(sdp, !qd->qd_change);
  1137. gfs2_assert_warn(sdp, !qd->qd_slot_count);
  1138. gfs2_assert_warn(sdp, !qd->qd_bh_count);
  1139. gfs2_glock_put(qd->qd_gl);
  1140. call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
  1141. spin_lock(&qd_lock);
  1142. }
  1143. spin_unlock(&qd_lock);
  1144. gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
  1145. kvfree(sdp->sd_quota_bitmap);
  1146. sdp->sd_quota_bitmap = NULL;
  1147. }
  1148. static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
  1149. {
  1150. if (error == 0 || error == -EROFS)
  1151. return;
  1152. if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
  1153. fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
  1154. }
  1155. static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
  1156. int (*fxn)(struct super_block *sb, int type),
  1157. unsigned long t, unsigned long *timeo,
  1158. unsigned int *new_timeo)
  1159. {
  1160. if (t >= *timeo) {
  1161. int error = fxn(sdp->sd_vfs, 0);
  1162. quotad_error(sdp, msg, error);
  1163. *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
  1164. } else {
  1165. *timeo -= t;
  1166. }
  1167. }
  1168. static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
  1169. {
  1170. struct gfs2_inode *ip;
  1171. while(1) {
  1172. ip = NULL;
  1173. spin_lock(&sdp->sd_trunc_lock);
  1174. if (!list_empty(&sdp->sd_trunc_list)) {
  1175. ip = list_entry(sdp->sd_trunc_list.next,
  1176. struct gfs2_inode, i_trunc_list);
  1177. list_del_init(&ip->i_trunc_list);
  1178. }
  1179. spin_unlock(&sdp->sd_trunc_lock);
  1180. if (ip == NULL)
  1181. return;
  1182. gfs2_glock_finish_truncate(ip);
  1183. }
  1184. }
  1185. void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
  1186. if (!sdp->sd_statfs_force_sync) {
  1187. sdp->sd_statfs_force_sync = 1;
  1188. wake_up(&sdp->sd_quota_wait);
  1189. }
  1190. }
  1191. /**
  1192. * gfs2_quotad - Write cached quota changes into the quota file
  1193. * @sdp: Pointer to GFS2 superblock
  1194. *
  1195. */
  1196. int gfs2_quotad(void *data)
  1197. {
  1198. struct gfs2_sbd *sdp = data;
  1199. struct gfs2_tune *tune = &sdp->sd_tune;
  1200. unsigned long statfs_timeo = 0;
  1201. unsigned long quotad_timeo = 0;
  1202. unsigned long t = 0;
  1203. DEFINE_WAIT(wait);
  1204. int empty;
  1205. while (!kthread_should_stop()) {
  1206. /* Update the master statfs file */
  1207. if (sdp->sd_statfs_force_sync) {
  1208. int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
  1209. quotad_error(sdp, "statfs", error);
  1210. statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
  1211. }
  1212. else
  1213. quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
  1214. &statfs_timeo,
  1215. &tune->gt_statfs_quantum);
  1216. /* Update quota file */
  1217. quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
  1218. &quotad_timeo, &tune->gt_quota_quantum);
  1219. /* Check for & recover partially truncated inodes */
  1220. quotad_check_trunc_list(sdp);
  1221. try_to_freeze();
  1222. t = min(quotad_timeo, statfs_timeo);
  1223. prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
  1224. spin_lock(&sdp->sd_trunc_lock);
  1225. empty = list_empty(&sdp->sd_trunc_list);
  1226. spin_unlock(&sdp->sd_trunc_lock);
  1227. if (empty && !sdp->sd_statfs_force_sync)
  1228. t -= schedule_timeout(t);
  1229. else
  1230. t = 0;
  1231. finish_wait(&sdp->sd_quota_wait, &wait);
  1232. }
  1233. return 0;
  1234. }
  1235. static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state)
  1236. {
  1237. struct gfs2_sbd *sdp = sb->s_fs_info;
  1238. memset(state, 0, sizeof(*state));
  1239. switch (sdp->sd_args.ar_quota) {
  1240. case GFS2_QUOTA_ON:
  1241. state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED;
  1242. state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED;
  1243. /*FALLTHRU*/
  1244. case GFS2_QUOTA_ACCOUNT:
  1245. state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED |
  1246. QCI_SYSFILE;
  1247. state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED |
  1248. QCI_SYSFILE;
  1249. break;
  1250. case GFS2_QUOTA_OFF:
  1251. break;
  1252. }
  1253. if (sdp->sd_quota_inode) {
  1254. state->s_state[USRQUOTA].ino =
  1255. GFS2_I(sdp->sd_quota_inode)->i_no_addr;
  1256. state->s_state[USRQUOTA].blocks = sdp->sd_quota_inode->i_blocks;
  1257. }
  1258. state->s_state[USRQUOTA].nextents = 1; /* unsupported */
  1259. state->s_state[GRPQUOTA] = state->s_state[USRQUOTA];
  1260. state->s_incoredqs = list_lru_count(&gfs2_qd_lru);
  1261. return 0;
  1262. }
  1263. static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
  1264. struct qc_dqblk *fdq)
  1265. {
  1266. struct gfs2_sbd *sdp = sb->s_fs_info;
  1267. struct gfs2_quota_lvb *qlvb;
  1268. struct gfs2_quota_data *qd;
  1269. struct gfs2_holder q_gh;
  1270. int error;
  1271. memset(fdq, 0, sizeof(*fdq));
  1272. if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
  1273. return -ESRCH; /* Crazy XFS error code */
  1274. if ((qid.type != USRQUOTA) &&
  1275. (qid.type != GRPQUOTA))
  1276. return -EINVAL;
  1277. error = qd_get(sdp, qid, &qd);
  1278. if (error)
  1279. return error;
  1280. error = do_glock(qd, FORCE, &q_gh);
  1281. if (error)
  1282. goto out;
  1283. qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
  1284. fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
  1285. fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
  1286. fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
  1287. gfs2_glock_dq_uninit(&q_gh);
  1288. out:
  1289. qd_put(qd);
  1290. return error;
  1291. }
  1292. /* GFS2 only supports a subset of the XFS fields */
  1293. #define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
  1294. static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
  1295. struct qc_dqblk *fdq)
  1296. {
  1297. struct gfs2_sbd *sdp = sb->s_fs_info;
  1298. struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
  1299. struct gfs2_quota_data *qd;
  1300. struct gfs2_holder q_gh, i_gh;
  1301. unsigned int data_blocks, ind_blocks;
  1302. unsigned int blocks = 0;
  1303. int alloc_required;
  1304. loff_t offset;
  1305. int error;
  1306. if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
  1307. return -ESRCH; /* Crazy XFS error code */
  1308. if ((qid.type != USRQUOTA) &&
  1309. (qid.type != GRPQUOTA))
  1310. return -EINVAL;
  1311. if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
  1312. return -EINVAL;
  1313. error = qd_get(sdp, qid, &qd);
  1314. if (error)
  1315. return error;
  1316. error = gfs2_rs_alloc(ip);
  1317. if (error)
  1318. goto out_put;
  1319. mutex_lock(&ip->i_inode.i_mutex);
  1320. error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
  1321. if (error)
  1322. goto out_unlockput;
  1323. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
  1324. if (error)
  1325. goto out_q;
  1326. /* Check for existing entry, if none then alloc new blocks */
  1327. error = update_qd(sdp, qd);
  1328. if (error)
  1329. goto out_i;
  1330. /* If nothing has changed, this is a no-op */
  1331. if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
  1332. ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
  1333. fdq->d_fieldmask ^= QC_SPC_SOFT;
  1334. if ((fdq->d_fieldmask & QC_SPC_HARD) &&
  1335. ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
  1336. fdq->d_fieldmask ^= QC_SPC_HARD;
  1337. if ((fdq->d_fieldmask & QC_SPACE) &&
  1338. ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
  1339. fdq->d_fieldmask ^= QC_SPACE;
  1340. if (fdq->d_fieldmask == 0)
  1341. goto out_i;
  1342. offset = qd2offset(qd);
  1343. alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
  1344. if (gfs2_is_stuffed(ip))
  1345. alloc_required = 1;
  1346. if (alloc_required) {
  1347. struct gfs2_alloc_parms ap = { .aflags = 0, };
  1348. gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
  1349. &data_blocks, &ind_blocks);
  1350. blocks = 1 + data_blocks + ind_blocks;
  1351. ap.target = blocks;
  1352. error = gfs2_inplace_reserve(ip, &ap);
  1353. if (error)
  1354. goto out_i;
  1355. blocks += gfs2_rg_blocks(ip, blocks);
  1356. }
  1357. /* Some quotas span block boundaries and can update two blocks,
  1358. adding an extra block to the transaction to handle such quotas */
  1359. error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
  1360. if (error)
  1361. goto out_release;
  1362. /* Apply changes */
  1363. error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
  1364. gfs2_trans_end(sdp);
  1365. out_release:
  1366. if (alloc_required)
  1367. gfs2_inplace_release(ip);
  1368. out_i:
  1369. gfs2_glock_dq_uninit(&i_gh);
  1370. out_q:
  1371. gfs2_glock_dq_uninit(&q_gh);
  1372. out_unlockput:
  1373. mutex_unlock(&ip->i_inode.i_mutex);
  1374. out_put:
  1375. qd_put(qd);
  1376. return error;
  1377. }
  1378. const struct quotactl_ops gfs2_quotactl_ops = {
  1379. .quota_sync = gfs2_quota_sync,
  1380. .get_state = gfs2_quota_get_state,
  1381. .get_dqblk = gfs2_get_dqblk,
  1382. .set_dqblk = gfs2_set_dqblk,
  1383. };
  1384. void __init gfs2_quota_hash_init(void)
  1385. {
  1386. unsigned i;
  1387. for(i = 0; i < GFS2_QD_HASH_SIZE; i++)
  1388. INIT_HLIST_BL_HEAD(&qd_hash_table[i]);
  1389. }