xfs_fsops.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885
  1. /*
  2. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_fs.h"
  20. #include "xfs_shared.h"
  21. #include "xfs_format.h"
  22. #include "xfs_log_format.h"
  23. #include "xfs_trans_resv.h"
  24. #include "xfs_sb.h"
  25. #include "xfs_ag.h"
  26. #include "xfs_mount.h"
  27. #include "xfs_da_format.h"
  28. #include "xfs_da_btree.h"
  29. #include "xfs_inode.h"
  30. #include "xfs_trans.h"
  31. #include "xfs_inode_item.h"
  32. #include "xfs_error.h"
  33. #include "xfs_btree.h"
  34. #include "xfs_alloc_btree.h"
  35. #include "xfs_alloc.h"
  36. #include "xfs_ialloc.h"
  37. #include "xfs_fsops.h"
  38. #include "xfs_itable.h"
  39. #include "xfs_trans_space.h"
  40. #include "xfs_rtalloc.h"
  41. #include "xfs_trace.h"
  42. #include "xfs_log.h"
  43. #include "xfs_dinode.h"
  44. #include "xfs_filestream.h"
  45. /*
  46. * File system operations
  47. */
  48. int
  49. xfs_fs_geometry(
  50. xfs_mount_t *mp,
  51. xfs_fsop_geom_t *geo,
  52. int new_version)
  53. {
  54. memset(geo, 0, sizeof(*geo));
  55. geo->blocksize = mp->m_sb.sb_blocksize;
  56. geo->rtextsize = mp->m_sb.sb_rextsize;
  57. geo->agblocks = mp->m_sb.sb_agblocks;
  58. geo->agcount = mp->m_sb.sb_agcount;
  59. geo->logblocks = mp->m_sb.sb_logblocks;
  60. geo->sectsize = mp->m_sb.sb_sectsize;
  61. geo->inodesize = mp->m_sb.sb_inodesize;
  62. geo->imaxpct = mp->m_sb.sb_imax_pct;
  63. geo->datablocks = mp->m_sb.sb_dblocks;
  64. geo->rtblocks = mp->m_sb.sb_rblocks;
  65. geo->rtextents = mp->m_sb.sb_rextents;
  66. geo->logstart = mp->m_sb.sb_logstart;
  67. ASSERT(sizeof(geo->uuid)==sizeof(mp->m_sb.sb_uuid));
  68. memcpy(geo->uuid, &mp->m_sb.sb_uuid, sizeof(mp->m_sb.sb_uuid));
  69. if (new_version >= 2) {
  70. geo->sunit = mp->m_sb.sb_unit;
  71. geo->swidth = mp->m_sb.sb_width;
  72. }
  73. if (new_version >= 3) {
  74. geo->version = XFS_FSOP_GEOM_VERSION;
  75. geo->flags = XFS_FSOP_GEOM_FLAGS_NLINK |
  76. XFS_FSOP_GEOM_FLAGS_DIRV2 |
  77. (xfs_sb_version_hasattr(&mp->m_sb) ?
  78. XFS_FSOP_GEOM_FLAGS_ATTR : 0) |
  79. (xfs_sb_version_hasquota(&mp->m_sb) ?
  80. XFS_FSOP_GEOM_FLAGS_QUOTA : 0) |
  81. (xfs_sb_version_hasalign(&mp->m_sb) ?
  82. XFS_FSOP_GEOM_FLAGS_IALIGN : 0) |
  83. (xfs_sb_version_hasdalign(&mp->m_sb) ?
  84. XFS_FSOP_GEOM_FLAGS_DALIGN : 0) |
  85. (xfs_sb_version_hasextflgbit(&mp->m_sb) ?
  86. XFS_FSOP_GEOM_FLAGS_EXTFLG : 0) |
  87. (xfs_sb_version_hassector(&mp->m_sb) ?
  88. XFS_FSOP_GEOM_FLAGS_SECTOR : 0) |
  89. (xfs_sb_version_hasasciici(&mp->m_sb) ?
  90. XFS_FSOP_GEOM_FLAGS_DIRV2CI : 0) |
  91. (xfs_sb_version_haslazysbcount(&mp->m_sb) ?
  92. XFS_FSOP_GEOM_FLAGS_LAZYSB : 0) |
  93. (xfs_sb_version_hasattr2(&mp->m_sb) ?
  94. XFS_FSOP_GEOM_FLAGS_ATTR2 : 0) |
  95. (xfs_sb_version_hasprojid32bit(&mp->m_sb) ?
  96. XFS_FSOP_GEOM_FLAGS_PROJID32 : 0) |
  97. (xfs_sb_version_hascrc(&mp->m_sb) ?
  98. XFS_FSOP_GEOM_FLAGS_V5SB : 0) |
  99. (xfs_sb_version_hasftype(&mp->m_sb) ?
  100. XFS_FSOP_GEOM_FLAGS_FTYPE : 0) |
  101. (xfs_sb_version_hasfinobt(&mp->m_sb) ?
  102. XFS_FSOP_GEOM_FLAGS_FINOBT : 0);
  103. geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ?
  104. mp->m_sb.sb_logsectsize : BBSIZE;
  105. geo->rtsectsize = mp->m_sb.sb_blocksize;
  106. geo->dirblocksize = mp->m_dir_geo->blksize;
  107. }
  108. if (new_version >= 4) {
  109. geo->flags |=
  110. (xfs_sb_version_haslogv2(&mp->m_sb) ?
  111. XFS_FSOP_GEOM_FLAGS_LOGV2 : 0);
  112. geo->logsunit = mp->m_sb.sb_logsunit;
  113. }
  114. return 0;
  115. }
  116. static struct xfs_buf *
  117. xfs_growfs_get_hdr_buf(
  118. struct xfs_mount *mp,
  119. xfs_daddr_t blkno,
  120. size_t numblks,
  121. int flags,
  122. const struct xfs_buf_ops *ops)
  123. {
  124. struct xfs_buf *bp;
  125. bp = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, flags);
  126. if (!bp)
  127. return NULL;
  128. xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
  129. bp->b_bn = blkno;
  130. bp->b_maps[0].bm_bn = blkno;
  131. bp->b_ops = ops;
  132. return bp;
  133. }
  134. static int
  135. xfs_growfs_data_private(
  136. xfs_mount_t *mp, /* mount point for filesystem */
  137. xfs_growfs_data_t *in) /* growfs data input struct */
  138. {
  139. xfs_agf_t *agf;
  140. struct xfs_agfl *agfl;
  141. xfs_agi_t *agi;
  142. xfs_agnumber_t agno;
  143. xfs_extlen_t agsize;
  144. xfs_extlen_t tmpsize;
  145. xfs_alloc_rec_t *arec;
  146. xfs_buf_t *bp;
  147. int bucket;
  148. int dpct;
  149. int error, saved_error = 0;
  150. xfs_agnumber_t nagcount;
  151. xfs_agnumber_t nagimax = 0;
  152. xfs_rfsblock_t nb, nb_mod;
  153. xfs_rfsblock_t new;
  154. xfs_rfsblock_t nfree;
  155. xfs_agnumber_t oagcount;
  156. int pct;
  157. xfs_trans_t *tp;
  158. nb = in->newblocks;
  159. pct = in->imaxpct;
  160. if (nb < mp->m_sb.sb_dblocks || pct < 0 || pct > 100)
  161. return XFS_ERROR(EINVAL);
  162. if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb)))
  163. return error;
  164. dpct = pct - mp->m_sb.sb_imax_pct;
  165. bp = xfs_buf_read_uncached(mp->m_ddev_targp,
  166. XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
  167. XFS_FSS_TO_BB(mp, 1), 0, NULL);
  168. if (!bp)
  169. return EIO;
  170. if (bp->b_error) {
  171. error = bp->b_error;
  172. xfs_buf_relse(bp);
  173. return error;
  174. }
  175. xfs_buf_relse(bp);
  176. new = nb; /* use new as a temporary here */
  177. nb_mod = do_div(new, mp->m_sb.sb_agblocks);
  178. nagcount = new + (nb_mod != 0);
  179. if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) {
  180. nagcount--;
  181. nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks;
  182. if (nb < mp->m_sb.sb_dblocks)
  183. return XFS_ERROR(EINVAL);
  184. }
  185. new = nb - mp->m_sb.sb_dblocks;
  186. oagcount = mp->m_sb.sb_agcount;
  187. /* allocate the new per-ag structures */
  188. if (nagcount > oagcount) {
  189. error = xfs_initialize_perag(mp, nagcount, &nagimax);
  190. if (error)
  191. return error;
  192. }
  193. tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS);
  194. tp->t_flags |= XFS_TRANS_RESERVE;
  195. error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growdata,
  196. XFS_GROWFS_SPACE_RES(mp), 0);
  197. if (error) {
  198. xfs_trans_cancel(tp, 0);
  199. return error;
  200. }
  201. /*
  202. * Write new AG headers to disk. Non-transactional, but written
  203. * synchronously so they are completed prior to the growfs transaction
  204. * being logged.
  205. */
  206. nfree = 0;
  207. for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) {
  208. __be32 *agfl_bno;
  209. /*
  210. * AG freespace header block
  211. */
  212. bp = xfs_growfs_get_hdr_buf(mp,
  213. XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
  214. XFS_FSS_TO_BB(mp, 1), 0,
  215. &xfs_agf_buf_ops);
  216. if (!bp) {
  217. error = ENOMEM;
  218. goto error0;
  219. }
  220. agf = XFS_BUF_TO_AGF(bp);
  221. agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
  222. agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
  223. agf->agf_seqno = cpu_to_be32(agno);
  224. if (agno == nagcount - 1)
  225. agsize =
  226. nb -
  227. (agno * (xfs_rfsblock_t)mp->m_sb.sb_agblocks);
  228. else
  229. agsize = mp->m_sb.sb_agblocks;
  230. agf->agf_length = cpu_to_be32(agsize);
  231. agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp));
  232. agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
  233. agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
  234. agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
  235. agf->agf_flfirst = 0;
  236. agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1);
  237. agf->agf_flcount = 0;
  238. tmpsize = agsize - XFS_PREALLOC_BLOCKS(mp);
  239. agf->agf_freeblks = cpu_to_be32(tmpsize);
  240. agf->agf_longest = cpu_to_be32(tmpsize);
  241. if (xfs_sb_version_hascrc(&mp->m_sb))
  242. uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_uuid);
  243. error = xfs_bwrite(bp);
  244. xfs_buf_relse(bp);
  245. if (error)
  246. goto error0;
  247. /*
  248. * AG freelist header block
  249. */
  250. bp = xfs_growfs_get_hdr_buf(mp,
  251. XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
  252. XFS_FSS_TO_BB(mp, 1), 0,
  253. &xfs_agfl_buf_ops);
  254. if (!bp) {
  255. error = ENOMEM;
  256. goto error0;
  257. }
  258. agfl = XFS_BUF_TO_AGFL(bp);
  259. if (xfs_sb_version_hascrc(&mp->m_sb)) {
  260. agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
  261. agfl->agfl_seqno = cpu_to_be32(agno);
  262. uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_uuid);
  263. }
  264. agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp);
  265. for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++)
  266. agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
  267. error = xfs_bwrite(bp);
  268. xfs_buf_relse(bp);
  269. if (error)
  270. goto error0;
  271. /*
  272. * AG inode header block
  273. */
  274. bp = xfs_growfs_get_hdr_buf(mp,
  275. XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
  276. XFS_FSS_TO_BB(mp, 1), 0,
  277. &xfs_agi_buf_ops);
  278. if (!bp) {
  279. error = ENOMEM;
  280. goto error0;
  281. }
  282. agi = XFS_BUF_TO_AGI(bp);
  283. agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
  284. agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
  285. agi->agi_seqno = cpu_to_be32(agno);
  286. agi->agi_length = cpu_to_be32(agsize);
  287. agi->agi_count = 0;
  288. agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp));
  289. agi->agi_level = cpu_to_be32(1);
  290. agi->agi_freecount = 0;
  291. agi->agi_newino = cpu_to_be32(NULLAGINO);
  292. agi->agi_dirino = cpu_to_be32(NULLAGINO);
  293. if (xfs_sb_version_hascrc(&mp->m_sb))
  294. uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_uuid);
  295. if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
  296. agi->agi_free_root = cpu_to_be32(XFS_FIBT_BLOCK(mp));
  297. agi->agi_free_level = cpu_to_be32(1);
  298. }
  299. for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++)
  300. agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
  301. error = xfs_bwrite(bp);
  302. xfs_buf_relse(bp);
  303. if (error)
  304. goto error0;
  305. /*
  306. * BNO btree root block
  307. */
  308. bp = xfs_growfs_get_hdr_buf(mp,
  309. XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)),
  310. BTOBB(mp->m_sb.sb_blocksize), 0,
  311. &xfs_allocbt_buf_ops);
  312. if (!bp) {
  313. error = ENOMEM;
  314. goto error0;
  315. }
  316. if (xfs_sb_version_hascrc(&mp->m_sb))
  317. xfs_btree_init_block(mp, bp, XFS_ABTB_CRC_MAGIC, 0, 1,
  318. agno, XFS_BTREE_CRC_BLOCKS);
  319. else
  320. xfs_btree_init_block(mp, bp, XFS_ABTB_MAGIC, 0, 1,
  321. agno, 0);
  322. arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
  323. arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp));
  324. arec->ar_blockcount = cpu_to_be32(
  325. agsize - be32_to_cpu(arec->ar_startblock));
  326. error = xfs_bwrite(bp);
  327. xfs_buf_relse(bp);
  328. if (error)
  329. goto error0;
  330. /*
  331. * CNT btree root block
  332. */
  333. bp = xfs_growfs_get_hdr_buf(mp,
  334. XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)),
  335. BTOBB(mp->m_sb.sb_blocksize), 0,
  336. &xfs_allocbt_buf_ops);
  337. if (!bp) {
  338. error = ENOMEM;
  339. goto error0;
  340. }
  341. if (xfs_sb_version_hascrc(&mp->m_sb))
  342. xfs_btree_init_block(mp, bp, XFS_ABTC_CRC_MAGIC, 0, 1,
  343. agno, XFS_BTREE_CRC_BLOCKS);
  344. else
  345. xfs_btree_init_block(mp, bp, XFS_ABTC_MAGIC, 0, 1,
  346. agno, 0);
  347. arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
  348. arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp));
  349. arec->ar_blockcount = cpu_to_be32(
  350. agsize - be32_to_cpu(arec->ar_startblock));
  351. nfree += be32_to_cpu(arec->ar_blockcount);
  352. error = xfs_bwrite(bp);
  353. xfs_buf_relse(bp);
  354. if (error)
  355. goto error0;
  356. /*
  357. * INO btree root block
  358. */
  359. bp = xfs_growfs_get_hdr_buf(mp,
  360. XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)),
  361. BTOBB(mp->m_sb.sb_blocksize), 0,
  362. &xfs_inobt_buf_ops);
  363. if (!bp) {
  364. error = ENOMEM;
  365. goto error0;
  366. }
  367. if (xfs_sb_version_hascrc(&mp->m_sb))
  368. xfs_btree_init_block(mp, bp, XFS_IBT_CRC_MAGIC, 0, 0,
  369. agno, XFS_BTREE_CRC_BLOCKS);
  370. else
  371. xfs_btree_init_block(mp, bp, XFS_IBT_MAGIC, 0, 0,
  372. agno, 0);
  373. error = xfs_bwrite(bp);
  374. xfs_buf_relse(bp);
  375. if (error)
  376. goto error0;
  377. /*
  378. * FINO btree root block
  379. */
  380. if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
  381. bp = xfs_growfs_get_hdr_buf(mp,
  382. XFS_AGB_TO_DADDR(mp, agno, XFS_FIBT_BLOCK(mp)),
  383. BTOBB(mp->m_sb.sb_blocksize), 0,
  384. &xfs_inobt_buf_ops);
  385. if (!bp) {
  386. error = ENOMEM;
  387. goto error0;
  388. }
  389. if (xfs_sb_version_hascrc(&mp->m_sb))
  390. xfs_btree_init_block(mp, bp, XFS_FIBT_CRC_MAGIC,
  391. 0, 0, agno,
  392. XFS_BTREE_CRC_BLOCKS);
  393. else
  394. xfs_btree_init_block(mp, bp, XFS_FIBT_MAGIC, 0,
  395. 0, agno, 0);
  396. error = xfs_bwrite(bp);
  397. xfs_buf_relse(bp);
  398. if (error)
  399. goto error0;
  400. }
  401. }
  402. xfs_trans_agblocks_delta(tp, nfree);
  403. /*
  404. * There are new blocks in the old last a.g.
  405. */
  406. if (new) {
  407. /*
  408. * Change the agi length.
  409. */
  410. error = xfs_ialloc_read_agi(mp, tp, agno, &bp);
  411. if (error) {
  412. goto error0;
  413. }
  414. ASSERT(bp);
  415. agi = XFS_BUF_TO_AGI(bp);
  416. be32_add_cpu(&agi->agi_length, new);
  417. ASSERT(nagcount == oagcount ||
  418. be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks);
  419. xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH);
  420. /*
  421. * Change agf length.
  422. */
  423. error = xfs_alloc_read_agf(mp, tp, agno, 0, &bp);
  424. if (error) {
  425. goto error0;
  426. }
  427. ASSERT(bp);
  428. agf = XFS_BUF_TO_AGF(bp);
  429. be32_add_cpu(&agf->agf_length, new);
  430. ASSERT(be32_to_cpu(agf->agf_length) ==
  431. be32_to_cpu(agi->agi_length));
  432. xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
  433. /*
  434. * Free the new space.
  435. */
  436. error = xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, agno,
  437. be32_to_cpu(agf->agf_length) - new), new);
  438. if (error) {
  439. goto error0;
  440. }
  441. }
  442. /*
  443. * Update changed superblock fields transactionally. These are not
  444. * seen by the rest of the world until the transaction commit applies
  445. * them atomically to the superblock.
  446. */
  447. if (nagcount > oagcount)
  448. xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount);
  449. if (nb > mp->m_sb.sb_dblocks)
  450. xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS,
  451. nb - mp->m_sb.sb_dblocks);
  452. if (nfree)
  453. xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, nfree);
  454. if (dpct)
  455. xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
  456. error = xfs_trans_commit(tp, 0);
  457. if (error)
  458. return error;
  459. /* New allocation groups fully initialized, so update mount struct */
  460. if (nagimax)
  461. mp->m_maxagi = nagimax;
  462. if (mp->m_sb.sb_imax_pct) {
  463. __uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct;
  464. do_div(icount, 100);
  465. mp->m_maxicount = icount << mp->m_sb.sb_inopblog;
  466. } else
  467. mp->m_maxicount = 0;
  468. xfs_set_low_space_thresholds(mp);
  469. /* update secondary superblocks. */
  470. for (agno = 1; agno < nagcount; agno++) {
  471. error = 0;
  472. /*
  473. * new secondary superblocks need to be zeroed, not read from
  474. * disk as the contents of the new area we are growing into is
  475. * completely unknown.
  476. */
  477. if (agno < oagcount) {
  478. error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
  479. XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
  480. XFS_FSS_TO_BB(mp, 1), 0, &bp,
  481. &xfs_sb_buf_ops);
  482. } else {
  483. bp = xfs_trans_get_buf(NULL, mp->m_ddev_targp,
  484. XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
  485. XFS_FSS_TO_BB(mp, 1), 0);
  486. if (bp) {
  487. bp->b_ops = &xfs_sb_buf_ops;
  488. xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
  489. } else
  490. error = ENOMEM;
  491. }
  492. /*
  493. * If we get an error reading or writing alternate superblocks,
  494. * continue. xfs_repair chooses the "best" superblock based
  495. * on most matches; if we break early, we'll leave more
  496. * superblocks un-updated than updated, and xfs_repair may
  497. * pick them over the properly-updated primary.
  498. */
  499. if (error) {
  500. xfs_warn(mp,
  501. "error %d reading secondary superblock for ag %d",
  502. error, agno);
  503. saved_error = error;
  504. continue;
  505. }
  506. xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, XFS_SB_ALL_BITS);
  507. error = xfs_bwrite(bp);
  508. xfs_buf_relse(bp);
  509. if (error) {
  510. xfs_warn(mp,
  511. "write error %d updating secondary superblock for ag %d",
  512. error, agno);
  513. saved_error = error;
  514. continue;
  515. }
  516. }
  517. return saved_error ? saved_error : error;
  518. error0:
  519. xfs_trans_cancel(tp, XFS_TRANS_ABORT);
  520. return error;
  521. }
  522. static int
  523. xfs_growfs_log_private(
  524. xfs_mount_t *mp, /* mount point for filesystem */
  525. xfs_growfs_log_t *in) /* growfs log input struct */
  526. {
  527. xfs_extlen_t nb;
  528. nb = in->newblocks;
  529. if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES))
  530. return XFS_ERROR(EINVAL);
  531. if (nb == mp->m_sb.sb_logblocks &&
  532. in->isint == (mp->m_sb.sb_logstart != 0))
  533. return XFS_ERROR(EINVAL);
  534. /*
  535. * Moving the log is hard, need new interfaces to sync
  536. * the log first, hold off all activity while moving it.
  537. * Can have shorter or longer log in the same space,
  538. * or transform internal to external log or vice versa.
  539. */
  540. return XFS_ERROR(ENOSYS);
  541. }
  542. /*
  543. * protected versions of growfs function acquire and release locks on the mount
  544. * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG,
  545. * XFS_IOC_FSGROWFSRT
  546. */
  547. int
  548. xfs_growfs_data(
  549. xfs_mount_t *mp,
  550. xfs_growfs_data_t *in)
  551. {
  552. int error;
  553. if (!capable(CAP_SYS_ADMIN))
  554. return XFS_ERROR(EPERM);
  555. if (!mutex_trylock(&mp->m_growlock))
  556. return XFS_ERROR(EWOULDBLOCK);
  557. error = xfs_growfs_data_private(mp, in);
  558. mutex_unlock(&mp->m_growlock);
  559. return error;
  560. }
  561. int
  562. xfs_growfs_log(
  563. xfs_mount_t *mp,
  564. xfs_growfs_log_t *in)
  565. {
  566. int error;
  567. if (!capable(CAP_SYS_ADMIN))
  568. return XFS_ERROR(EPERM);
  569. if (!mutex_trylock(&mp->m_growlock))
  570. return XFS_ERROR(EWOULDBLOCK);
  571. error = xfs_growfs_log_private(mp, in);
  572. mutex_unlock(&mp->m_growlock);
  573. return error;
  574. }
  575. /*
  576. * exported through ioctl XFS_IOC_FSCOUNTS
  577. */
  578. int
  579. xfs_fs_counts(
  580. xfs_mount_t *mp,
  581. xfs_fsop_counts_t *cnt)
  582. {
  583. xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
  584. spin_lock(&mp->m_sb_lock);
  585. cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
  586. cnt->freertx = mp->m_sb.sb_frextents;
  587. cnt->freeino = mp->m_sb.sb_ifree;
  588. cnt->allocino = mp->m_sb.sb_icount;
  589. spin_unlock(&mp->m_sb_lock);
  590. return 0;
  591. }
  592. /*
  593. * exported through ioctl XFS_IOC_SET_RESBLKS & XFS_IOC_GET_RESBLKS
  594. *
  595. * xfs_reserve_blocks is called to set m_resblks
  596. * in the in-core mount table. The number of unused reserved blocks
  597. * is kept in m_resblks_avail.
  598. *
  599. * Reserve the requested number of blocks if available. Otherwise return
  600. * as many as possible to satisfy the request. The actual number
  601. * reserved are returned in outval
  602. *
  603. * A null inval pointer indicates that only the current reserved blocks
  604. * available should be returned no settings are changed.
  605. */
  606. int
  607. xfs_reserve_blocks(
  608. xfs_mount_t *mp,
  609. __uint64_t *inval,
  610. xfs_fsop_resblks_t *outval)
  611. {
  612. __int64_t lcounter, delta, fdblks_delta;
  613. __uint64_t request;
  614. /* If inval is null, report current values and return */
  615. if (inval == (__uint64_t *)NULL) {
  616. if (!outval)
  617. return EINVAL;
  618. outval->resblks = mp->m_resblks;
  619. outval->resblks_avail = mp->m_resblks_avail;
  620. return 0;
  621. }
  622. request = *inval;
  623. /*
  624. * With per-cpu counters, this becomes an interesting
  625. * problem. we needto work out if we are freeing or allocation
  626. * blocks first, then we can do the modification as necessary.
  627. *
  628. * We do this under the m_sb_lock so that if we are near
  629. * ENOSPC, we will hold out any changes while we work out
  630. * what to do. This means that the amount of free space can
  631. * change while we do this, so we need to retry if we end up
  632. * trying to reserve more space than is available.
  633. *
  634. * We also use the xfs_mod_incore_sb() interface so that we
  635. * don't have to care about whether per cpu counter are
  636. * enabled, disabled or even compiled in....
  637. */
  638. retry:
  639. spin_lock(&mp->m_sb_lock);
  640. xfs_icsb_sync_counters_locked(mp, 0);
  641. /*
  642. * If our previous reservation was larger than the current value,
  643. * then move any unused blocks back to the free pool.
  644. */
  645. fdblks_delta = 0;
  646. if (mp->m_resblks > request) {
  647. lcounter = mp->m_resblks_avail - request;
  648. if (lcounter > 0) { /* release unused blocks */
  649. fdblks_delta = lcounter;
  650. mp->m_resblks_avail -= lcounter;
  651. }
  652. mp->m_resblks = request;
  653. } else {
  654. __int64_t free;
  655. free = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
  656. if (!free)
  657. goto out; /* ENOSPC and fdblks_delta = 0 */
  658. delta = request - mp->m_resblks;
  659. lcounter = free - delta;
  660. if (lcounter < 0) {
  661. /* We can't satisfy the request, just get what we can */
  662. mp->m_resblks += free;
  663. mp->m_resblks_avail += free;
  664. fdblks_delta = -free;
  665. } else {
  666. fdblks_delta = -delta;
  667. mp->m_resblks = request;
  668. mp->m_resblks_avail += delta;
  669. }
  670. }
  671. out:
  672. if (outval) {
  673. outval->resblks = mp->m_resblks;
  674. outval->resblks_avail = mp->m_resblks_avail;
  675. }
  676. spin_unlock(&mp->m_sb_lock);
  677. if (fdblks_delta) {
  678. /*
  679. * If we are putting blocks back here, m_resblks_avail is
  680. * already at its max so this will put it in the free pool.
  681. *
  682. * If we need space, we'll either succeed in getting it
  683. * from the free block count or we'll get an enospc. If
  684. * we get a ENOSPC, it means things changed while we were
  685. * calculating fdblks_delta and so we should try again to
  686. * see if there is anything left to reserve.
  687. *
  688. * Don't set the reserved flag here - we don't want to reserve
  689. * the extra reserve blocks from the reserve.....
  690. */
  691. int error;
  692. error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
  693. fdblks_delta, 0);
  694. if (error == ENOSPC)
  695. goto retry;
  696. }
  697. return 0;
  698. }
  699. /*
  700. * Dump a transaction into the log that contains no real change. This is needed
  701. * to be able to make the log dirty or stamp the current tail LSN into the log
  702. * during the covering operation.
  703. *
  704. * We cannot use an inode here for this - that will push dirty state back up
  705. * into the VFS and then periodic inode flushing will prevent log covering from
  706. * making progress. Hence we log a field in the superblock instead and use a
  707. * synchronous transaction to ensure the superblock is immediately unpinned
  708. * and can be written back.
  709. */
  710. int
  711. xfs_fs_log_dummy(
  712. xfs_mount_t *mp)
  713. {
  714. xfs_trans_t *tp;
  715. int error;
  716. tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP);
  717. error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
  718. if (error) {
  719. xfs_trans_cancel(tp, 0);
  720. return error;
  721. }
  722. /* log the UUID because it is an unchanging field */
  723. xfs_mod_sb(tp, XFS_SB_UUID);
  724. xfs_trans_set_sync(tp);
  725. return xfs_trans_commit(tp, 0);
  726. }
  727. int
  728. xfs_fs_goingdown(
  729. xfs_mount_t *mp,
  730. __uint32_t inflags)
  731. {
  732. switch (inflags) {
  733. case XFS_FSOP_GOING_FLAGS_DEFAULT: {
  734. struct super_block *sb = freeze_bdev(mp->m_super->s_bdev);
  735. if (sb && !IS_ERR(sb)) {
  736. xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
  737. thaw_bdev(sb->s_bdev, sb);
  738. }
  739. break;
  740. }
  741. case XFS_FSOP_GOING_FLAGS_LOGFLUSH:
  742. xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
  743. break;
  744. case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH:
  745. xfs_force_shutdown(mp,
  746. SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR);
  747. break;
  748. default:
  749. return XFS_ERROR(EINVAL);
  750. }
  751. return 0;
  752. }
  753. /*
  754. * Force a shutdown of the filesystem instantly while keeping the filesystem
  755. * consistent. We don't do an unmount here; just shutdown the shop, make sure
  756. * that absolutely nothing persistent happens to this filesystem after this
  757. * point.
  758. */
  759. void
  760. xfs_do_force_shutdown(
  761. xfs_mount_t *mp,
  762. int flags,
  763. char *fname,
  764. int lnnum)
  765. {
  766. int logerror;
  767. logerror = flags & SHUTDOWN_LOG_IO_ERROR;
  768. if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
  769. xfs_notice(mp,
  770. "%s(0x%x) called from line %d of file %s. Return address = 0x%p",
  771. __func__, flags, lnnum, fname, __return_address);
  772. }
  773. /*
  774. * No need to duplicate efforts.
  775. */
  776. if (XFS_FORCED_SHUTDOWN(mp) && !logerror)
  777. return;
  778. /*
  779. * This flags XFS_MOUNT_FS_SHUTDOWN, makes sure that we don't
  780. * queue up anybody new on the log reservations, and wakes up
  781. * everybody who's sleeping on log reservations to tell them
  782. * the bad news.
  783. */
  784. if (xfs_log_force_umount(mp, logerror))
  785. return;
  786. if (flags & SHUTDOWN_CORRUPT_INCORE) {
  787. xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_CORRUPT,
  788. "Corruption of in-memory data detected. Shutting down filesystem");
  789. if (XFS_ERRLEVEL_HIGH <= xfs_error_level)
  790. xfs_stack_trace();
  791. } else if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
  792. if (logerror) {
  793. xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR,
  794. "Log I/O Error Detected. Shutting down filesystem");
  795. } else if (flags & SHUTDOWN_DEVICE_REQ) {
  796. xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
  797. "All device paths lost. Shutting down filesystem");
  798. } else if (!(flags & SHUTDOWN_REMOTE_REQ)) {
  799. xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
  800. "I/O Error Detected. Shutting down filesystem");
  801. }
  802. }
  803. if (!(flags & SHUTDOWN_FORCE_UMOUNT)) {
  804. xfs_alert(mp,
  805. "Please umount the filesystem and rectify the problem(s)");
  806. }
  807. }