xfs_mount.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  4. * All Rights Reserved.
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_log_format.h"
  11. #include "xfs_trans_resv.h"
  12. #include "xfs_bit.h"
  13. #include "xfs_sb.h"
  14. #include "xfs_mount.h"
  15. #include "xfs_defer.h"
  16. #include "xfs_da_format.h"
  17. #include "xfs_da_btree.h"
  18. #include "xfs_inode.h"
  19. #include "xfs_dir2.h"
  20. #include "xfs_ialloc.h"
  21. #include "xfs_alloc.h"
  22. #include "xfs_rtalloc.h"
  23. #include "xfs_bmap.h"
  24. #include "xfs_trans.h"
  25. #include "xfs_trans_priv.h"
  26. #include "xfs_log.h"
  27. #include "xfs_error.h"
  28. #include "xfs_quota.h"
  29. #include "xfs_fsops.h"
  30. #include "xfs_trace.h"
  31. #include "xfs_icache.h"
  32. #include "xfs_sysfs.h"
  33. #include "xfs_rmap_btree.h"
  34. #include "xfs_refcount_btree.h"
  35. #include "xfs_reflink.h"
  36. #include "xfs_extent_busy.h"
  37. static DEFINE_MUTEX(xfs_uuid_table_mutex);
  38. static int xfs_uuid_table_size;
  39. static uuid_t *xfs_uuid_table;
  40. void
  41. xfs_uuid_table_free(void)
  42. {
  43. if (xfs_uuid_table_size == 0)
  44. return;
  45. kmem_free(xfs_uuid_table);
  46. xfs_uuid_table = NULL;
  47. xfs_uuid_table_size = 0;
  48. }
  49. /*
  50. * See if the UUID is unique among mounted XFS filesystems.
  51. * Mount fails if UUID is nil or a FS with the same UUID is already mounted.
  52. */
  53. STATIC int
  54. xfs_uuid_mount(
  55. struct xfs_mount *mp)
  56. {
  57. uuid_t *uuid = &mp->m_sb.sb_uuid;
  58. int hole, i;
  59. /* Publish UUID in struct super_block */
  60. uuid_copy(&mp->m_super->s_uuid, uuid);
  61. if (mp->m_flags & XFS_MOUNT_NOUUID)
  62. return 0;
  63. if (uuid_is_null(uuid)) {
  64. xfs_warn(mp, "Filesystem has null UUID - can't mount");
  65. return -EINVAL;
  66. }
  67. mutex_lock(&xfs_uuid_table_mutex);
  68. for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) {
  69. if (uuid_is_null(&xfs_uuid_table[i])) {
  70. hole = i;
  71. continue;
  72. }
  73. if (uuid_equal(uuid, &xfs_uuid_table[i]))
  74. goto out_duplicate;
  75. }
  76. if (hole < 0) {
  77. xfs_uuid_table = kmem_realloc(xfs_uuid_table,
  78. (xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table),
  79. KM_SLEEP);
  80. hole = xfs_uuid_table_size++;
  81. }
  82. xfs_uuid_table[hole] = *uuid;
  83. mutex_unlock(&xfs_uuid_table_mutex);
  84. return 0;
  85. out_duplicate:
  86. mutex_unlock(&xfs_uuid_table_mutex);
  87. xfs_warn(mp, "Filesystem has duplicate UUID %pU - can't mount", uuid);
  88. return -EINVAL;
  89. }
  90. STATIC void
  91. xfs_uuid_unmount(
  92. struct xfs_mount *mp)
  93. {
  94. uuid_t *uuid = &mp->m_sb.sb_uuid;
  95. int i;
  96. if (mp->m_flags & XFS_MOUNT_NOUUID)
  97. return;
  98. mutex_lock(&xfs_uuid_table_mutex);
  99. for (i = 0; i < xfs_uuid_table_size; i++) {
  100. if (uuid_is_null(&xfs_uuid_table[i]))
  101. continue;
  102. if (!uuid_equal(uuid, &xfs_uuid_table[i]))
  103. continue;
  104. memset(&xfs_uuid_table[i], 0, sizeof(uuid_t));
  105. break;
  106. }
  107. ASSERT(i < xfs_uuid_table_size);
  108. mutex_unlock(&xfs_uuid_table_mutex);
  109. }
  110. STATIC void
  111. __xfs_free_perag(
  112. struct rcu_head *head)
  113. {
  114. struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
  115. ASSERT(atomic_read(&pag->pag_ref) == 0);
  116. kmem_free(pag);
  117. }
  118. /*
  119. * Free up the per-ag resources associated with the mount structure.
  120. */
  121. STATIC void
  122. xfs_free_perag(
  123. xfs_mount_t *mp)
  124. {
  125. xfs_agnumber_t agno;
  126. struct xfs_perag *pag;
  127. for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
  128. spin_lock(&mp->m_perag_lock);
  129. pag = radix_tree_delete(&mp->m_perag_tree, agno);
  130. spin_unlock(&mp->m_perag_lock);
  131. ASSERT(pag);
  132. ASSERT(atomic_read(&pag->pag_ref) == 0);
  133. xfs_buf_hash_destroy(pag);
  134. mutex_destroy(&pag->pag_ici_reclaim_lock);
  135. call_rcu(&pag->rcu_head, __xfs_free_perag);
  136. }
  137. }
  138. /*
  139. * Check size of device based on the (data/realtime) block count.
  140. * Note: this check is used by the growfs code as well as mount.
  141. */
  142. int
  143. xfs_sb_validate_fsb_count(
  144. xfs_sb_t *sbp,
  145. uint64_t nblocks)
  146. {
  147. ASSERT(PAGE_SHIFT >= sbp->sb_blocklog);
  148. ASSERT(sbp->sb_blocklog >= BBSHIFT);
  149. /* Limited by ULONG_MAX of page cache index */
  150. if (nblocks >> (PAGE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
  151. return -EFBIG;
  152. return 0;
  153. }
  154. int
  155. xfs_initialize_perag(
  156. xfs_mount_t *mp,
  157. xfs_agnumber_t agcount,
  158. xfs_agnumber_t *maxagi)
  159. {
  160. xfs_agnumber_t index;
  161. xfs_agnumber_t first_initialised = NULLAGNUMBER;
  162. xfs_perag_t *pag;
  163. int error = -ENOMEM;
  164. /*
  165. * Walk the current per-ag tree so we don't try to initialise AGs
  166. * that already exist (growfs case). Allocate and insert all the
  167. * AGs we don't find ready for initialisation.
  168. */
  169. for (index = 0; index < agcount; index++) {
  170. pag = xfs_perag_get(mp, index);
  171. if (pag) {
  172. xfs_perag_put(pag);
  173. continue;
  174. }
  175. pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL);
  176. if (!pag)
  177. goto out_unwind_new_pags;
  178. pag->pag_agno = index;
  179. pag->pag_mount = mp;
  180. spin_lock_init(&pag->pag_ici_lock);
  181. mutex_init(&pag->pag_ici_reclaim_lock);
  182. INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
  183. if (xfs_buf_hash_init(pag))
  184. goto out_free_pag;
  185. init_waitqueue_head(&pag->pagb_wait);
  186. if (radix_tree_preload(GFP_NOFS))
  187. goto out_hash_destroy;
  188. spin_lock(&mp->m_perag_lock);
  189. if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
  190. BUG();
  191. spin_unlock(&mp->m_perag_lock);
  192. radix_tree_preload_end();
  193. error = -EEXIST;
  194. goto out_hash_destroy;
  195. }
  196. spin_unlock(&mp->m_perag_lock);
  197. radix_tree_preload_end();
  198. /* first new pag is fully initialized */
  199. if (first_initialised == NULLAGNUMBER)
  200. first_initialised = index;
  201. }
  202. index = xfs_set_inode_alloc(mp, agcount);
  203. if (maxagi)
  204. *maxagi = index;
  205. mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp);
  206. return 0;
  207. out_hash_destroy:
  208. xfs_buf_hash_destroy(pag);
  209. out_free_pag:
  210. mutex_destroy(&pag->pag_ici_reclaim_lock);
  211. kmem_free(pag);
  212. out_unwind_new_pags:
  213. /* unwind any prior newly initialized pags */
  214. for (index = first_initialised; index < agcount; index++) {
  215. pag = radix_tree_delete(&mp->m_perag_tree, index);
  216. if (!pag)
  217. break;
  218. xfs_buf_hash_destroy(pag);
  219. mutex_destroy(&pag->pag_ici_reclaim_lock);
  220. kmem_free(pag);
  221. }
  222. return error;
  223. }
  224. /*
  225. * xfs_readsb
  226. *
  227. * Does the initial read of the superblock.
  228. */
  229. int
  230. xfs_readsb(
  231. struct xfs_mount *mp,
  232. int flags)
  233. {
  234. unsigned int sector_size;
  235. struct xfs_buf *bp;
  236. struct xfs_sb *sbp = &mp->m_sb;
  237. int error;
  238. int loud = !(flags & XFS_MFSI_QUIET);
  239. const struct xfs_buf_ops *buf_ops;
  240. ASSERT(mp->m_sb_bp == NULL);
  241. ASSERT(mp->m_ddev_targp != NULL);
  242. /*
  243. * For the initial read, we must guess at the sector
  244. * size based on the block device. It's enough to
  245. * get the sb_sectsize out of the superblock and
  246. * then reread with the proper length.
  247. * We don't verify it yet, because it may not be complete.
  248. */
  249. sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
  250. buf_ops = NULL;
  251. /*
  252. * Allocate a (locked) buffer to hold the superblock. This will be kept
  253. * around at all times to optimize access to the superblock. Therefore,
  254. * set XBF_NO_IOACCT to make sure it doesn't hold the buftarg count
  255. * elevated.
  256. */
  257. reread:
  258. error = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR,
  259. BTOBB(sector_size), XBF_NO_IOACCT, &bp,
  260. buf_ops);
  261. if (error) {
  262. if (loud)
  263. xfs_warn(mp, "SB validate failed with error %d.", error);
  264. /* bad CRC means corrupted metadata */
  265. if (error == -EFSBADCRC)
  266. error = -EFSCORRUPTED;
  267. return error;
  268. }
  269. /*
  270. * Initialize the mount structure from the superblock.
  271. */
  272. xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
  273. /*
  274. * If we haven't validated the superblock, do so now before we try
  275. * to check the sector size and reread the superblock appropriately.
  276. */
  277. if (sbp->sb_magicnum != XFS_SB_MAGIC) {
  278. if (loud)
  279. xfs_warn(mp, "Invalid superblock magic number");
  280. error = -EINVAL;
  281. goto release_buf;
  282. }
  283. /*
  284. * We must be able to do sector-sized and sector-aligned IO.
  285. */
  286. if (sector_size > sbp->sb_sectsize) {
  287. if (loud)
  288. xfs_warn(mp, "device supports %u byte sectors (not %u)",
  289. sector_size, sbp->sb_sectsize);
  290. error = -ENOSYS;
  291. goto release_buf;
  292. }
  293. if (buf_ops == NULL) {
  294. /*
  295. * Re-read the superblock so the buffer is correctly sized,
  296. * and properly verified.
  297. */
  298. xfs_buf_relse(bp);
  299. sector_size = sbp->sb_sectsize;
  300. buf_ops = loud ? &xfs_sb_buf_ops : &xfs_sb_quiet_buf_ops;
  301. goto reread;
  302. }
  303. xfs_reinit_percpu_counters(mp);
  304. /* no need to be quiet anymore, so reset the buf ops */
  305. bp->b_ops = &xfs_sb_buf_ops;
  306. mp->m_sb_bp = bp;
  307. xfs_buf_unlock(bp);
  308. return 0;
  309. release_buf:
  310. xfs_buf_relse(bp);
  311. return error;
  312. }
  313. /*
  314. * Update alignment values based on mount options and sb values
  315. */
  316. STATIC int
  317. xfs_update_alignment(xfs_mount_t *mp)
  318. {
  319. xfs_sb_t *sbp = &(mp->m_sb);
  320. if (mp->m_dalign) {
  321. /*
  322. * If stripe unit and stripe width are not multiples
  323. * of the fs blocksize turn off alignment.
  324. */
  325. if ((BBTOB(mp->m_dalign) & mp->m_blockmask) ||
  326. (BBTOB(mp->m_swidth) & mp->m_blockmask)) {
  327. xfs_warn(mp,
  328. "alignment check failed: sunit/swidth vs. blocksize(%d)",
  329. sbp->sb_blocksize);
  330. return -EINVAL;
  331. } else {
  332. /*
  333. * Convert the stripe unit and width to FSBs.
  334. */
  335. mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign);
  336. if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) {
  337. xfs_warn(mp,
  338. "alignment check failed: sunit/swidth vs. agsize(%d)",
  339. sbp->sb_agblocks);
  340. return -EINVAL;
  341. } else if (mp->m_dalign) {
  342. mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth);
  343. } else {
  344. xfs_warn(mp,
  345. "alignment check failed: sunit(%d) less than bsize(%d)",
  346. mp->m_dalign, sbp->sb_blocksize);
  347. return -EINVAL;
  348. }
  349. }
  350. /*
  351. * Update superblock with new values
  352. * and log changes
  353. */
  354. if (xfs_sb_version_hasdalign(sbp)) {
  355. if (sbp->sb_unit != mp->m_dalign) {
  356. sbp->sb_unit = mp->m_dalign;
  357. mp->m_update_sb = true;
  358. }
  359. if (sbp->sb_width != mp->m_swidth) {
  360. sbp->sb_width = mp->m_swidth;
  361. mp->m_update_sb = true;
  362. }
  363. } else {
  364. xfs_warn(mp,
  365. "cannot change alignment: superblock does not support data alignment");
  366. return -EINVAL;
  367. }
  368. } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN &&
  369. xfs_sb_version_hasdalign(&mp->m_sb)) {
  370. mp->m_dalign = sbp->sb_unit;
  371. mp->m_swidth = sbp->sb_width;
  372. }
  373. return 0;
  374. }
  375. /*
  376. * Set the maximum inode count for this filesystem
  377. */
  378. STATIC void
  379. xfs_set_maxicount(xfs_mount_t *mp)
  380. {
  381. xfs_sb_t *sbp = &(mp->m_sb);
  382. uint64_t icount;
  383. if (sbp->sb_imax_pct) {
  384. /*
  385. * Make sure the maximum inode count is a multiple
  386. * of the units we allocate inodes in.
  387. */
  388. icount = sbp->sb_dblocks * sbp->sb_imax_pct;
  389. do_div(icount, 100);
  390. do_div(icount, mp->m_ialloc_blks);
  391. mp->m_maxicount = (icount * mp->m_ialloc_blks) <<
  392. sbp->sb_inopblog;
  393. } else {
  394. mp->m_maxicount = 0;
  395. }
  396. }
  397. /*
  398. * Set the default minimum read and write sizes unless
  399. * already specified in a mount option.
  400. * We use smaller I/O sizes when the file system
  401. * is being used for NFS service (wsync mount option).
  402. */
  403. STATIC void
  404. xfs_set_rw_sizes(xfs_mount_t *mp)
  405. {
  406. xfs_sb_t *sbp = &(mp->m_sb);
  407. int readio_log, writeio_log;
  408. if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) {
  409. if (mp->m_flags & XFS_MOUNT_WSYNC) {
  410. readio_log = XFS_WSYNC_READIO_LOG;
  411. writeio_log = XFS_WSYNC_WRITEIO_LOG;
  412. } else {
  413. readio_log = XFS_READIO_LOG_LARGE;
  414. writeio_log = XFS_WRITEIO_LOG_LARGE;
  415. }
  416. } else {
  417. readio_log = mp->m_readio_log;
  418. writeio_log = mp->m_writeio_log;
  419. }
  420. if (sbp->sb_blocklog > readio_log) {
  421. mp->m_readio_log = sbp->sb_blocklog;
  422. } else {
  423. mp->m_readio_log = readio_log;
  424. }
  425. mp->m_readio_blocks = 1 << (mp->m_readio_log - sbp->sb_blocklog);
  426. if (sbp->sb_blocklog > writeio_log) {
  427. mp->m_writeio_log = sbp->sb_blocklog;
  428. } else {
  429. mp->m_writeio_log = writeio_log;
  430. }
  431. mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog);
  432. }
  433. /*
  434. * precalculate the low space thresholds for dynamic speculative preallocation.
  435. */
  436. void
  437. xfs_set_low_space_thresholds(
  438. struct xfs_mount *mp)
  439. {
  440. int i;
  441. for (i = 0; i < XFS_LOWSP_MAX; i++) {
  442. uint64_t space = mp->m_sb.sb_dblocks;
  443. do_div(space, 100);
  444. mp->m_low_space[i] = space * (i + 1);
  445. }
  446. }
  447. /*
  448. * Set whether we're using inode alignment.
  449. */
  450. STATIC void
  451. xfs_set_inoalignment(xfs_mount_t *mp)
  452. {
  453. if (xfs_sb_version_hasalign(&mp->m_sb) &&
  454. mp->m_sb.sb_inoalignmt >= xfs_icluster_size_fsb(mp))
  455. mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1;
  456. else
  457. mp->m_inoalign_mask = 0;
  458. /*
  459. * If we are using stripe alignment, check whether
  460. * the stripe unit is a multiple of the inode alignment
  461. */
  462. if (mp->m_dalign && mp->m_inoalign_mask &&
  463. !(mp->m_dalign & mp->m_inoalign_mask))
  464. mp->m_sinoalign = mp->m_dalign;
  465. else
  466. mp->m_sinoalign = 0;
  467. }
  468. /*
  469. * Check that the data (and log if separate) is an ok size.
  470. */
  471. STATIC int
  472. xfs_check_sizes(
  473. struct xfs_mount *mp)
  474. {
  475. struct xfs_buf *bp;
  476. xfs_daddr_t d;
  477. int error;
  478. d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
  479. if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) {
  480. xfs_warn(mp, "filesystem size mismatch detected");
  481. return -EFBIG;
  482. }
  483. error = xfs_buf_read_uncached(mp->m_ddev_targp,
  484. d - XFS_FSS_TO_BB(mp, 1),
  485. XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL);
  486. if (error) {
  487. xfs_warn(mp, "last sector read failed");
  488. return error;
  489. }
  490. xfs_buf_relse(bp);
  491. if (mp->m_logdev_targp == mp->m_ddev_targp)
  492. return 0;
  493. d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
  494. if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
  495. xfs_warn(mp, "log size mismatch detected");
  496. return -EFBIG;
  497. }
  498. error = xfs_buf_read_uncached(mp->m_logdev_targp,
  499. d - XFS_FSB_TO_BB(mp, 1),
  500. XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL);
  501. if (error) {
  502. xfs_warn(mp, "log device read failed");
  503. return error;
  504. }
  505. xfs_buf_relse(bp);
  506. return 0;
  507. }
  508. /*
  509. * Clear the quotaflags in memory and in the superblock.
  510. */
  511. int
  512. xfs_mount_reset_sbqflags(
  513. struct xfs_mount *mp)
  514. {
  515. mp->m_qflags = 0;
  516. /* It is OK to look at sb_qflags in the mount path without m_sb_lock. */
  517. if (mp->m_sb.sb_qflags == 0)
  518. return 0;
  519. spin_lock(&mp->m_sb_lock);
  520. mp->m_sb.sb_qflags = 0;
  521. spin_unlock(&mp->m_sb_lock);
  522. if (!xfs_fs_writable(mp, SB_FREEZE_WRITE))
  523. return 0;
  524. return xfs_sync_sb(mp, false);
  525. }
  526. uint64_t
  527. xfs_default_resblks(xfs_mount_t *mp)
  528. {
  529. uint64_t resblks;
  530. /*
  531. * We default to 5% or 8192 fsbs of space reserved, whichever is
  532. * smaller. This is intended to cover concurrent allocation
  533. * transactions when we initially hit enospc. These each require a 4
  534. * block reservation. Hence by default we cover roughly 2000 concurrent
  535. * allocation reservations.
  536. */
  537. resblks = mp->m_sb.sb_dblocks;
  538. do_div(resblks, 20);
  539. resblks = min_t(uint64_t, resblks, 8192);
  540. return resblks;
  541. }
  542. /*
  543. * This function does the following on an initial mount of a file system:
  544. * - reads the superblock from disk and init the mount struct
  545. * - if we're a 32-bit kernel, do a size check on the superblock
  546. * so we don't mount terabyte filesystems
  547. * - init mount struct realtime fields
  548. * - allocate inode hash table for fs
  549. * - init directory manager
  550. * - perform recovery and init the log manager
  551. */
  552. int
  553. xfs_mountfs(
  554. struct xfs_mount *mp)
  555. {
  556. struct xfs_sb *sbp = &(mp->m_sb);
  557. struct xfs_inode *rip;
  558. uint64_t resblks;
  559. uint quotamount = 0;
  560. uint quotaflags = 0;
  561. int error = 0;
  562. xfs_sb_mount_common(mp, sbp);
  563. /*
  564. * Check for a mismatched features2 values. Older kernels read & wrote
  565. * into the wrong sb offset for sb_features2 on some platforms due to
  566. * xfs_sb_t not being 64bit size aligned when sb_features2 was added,
  567. * which made older superblock reading/writing routines swap it as a
  568. * 64-bit value.
  569. *
  570. * For backwards compatibility, we make both slots equal.
  571. *
  572. * If we detect a mismatched field, we OR the set bits into the existing
  573. * features2 field in case it has already been modified; we don't want
  574. * to lose any features. We then update the bad location with the ORed
  575. * value so that older kernels will see any features2 flags. The
  576. * superblock writeback code ensures the new sb_features2 is copied to
  577. * sb_bad_features2 before it is logged or written to disk.
  578. */
  579. if (xfs_sb_has_mismatched_features2(sbp)) {
  580. xfs_warn(mp, "correcting sb_features alignment problem");
  581. sbp->sb_features2 |= sbp->sb_bad_features2;
  582. mp->m_update_sb = true;
  583. /*
  584. * Re-check for ATTR2 in case it was found in bad_features2
  585. * slot.
  586. */
  587. if (xfs_sb_version_hasattr2(&mp->m_sb) &&
  588. !(mp->m_flags & XFS_MOUNT_NOATTR2))
  589. mp->m_flags |= XFS_MOUNT_ATTR2;
  590. }
  591. if (xfs_sb_version_hasattr2(&mp->m_sb) &&
  592. (mp->m_flags & XFS_MOUNT_NOATTR2)) {
  593. xfs_sb_version_removeattr2(&mp->m_sb);
  594. mp->m_update_sb = true;
  595. /* update sb_versionnum for the clearing of the morebits */
  596. if (!sbp->sb_features2)
  597. mp->m_update_sb = true;
  598. }
  599. /* always use v2 inodes by default now */
  600. if (!(mp->m_sb.sb_versionnum & XFS_SB_VERSION_NLINKBIT)) {
  601. mp->m_sb.sb_versionnum |= XFS_SB_VERSION_NLINKBIT;
  602. mp->m_update_sb = true;
  603. }
  604. /*
  605. * Check if sb_agblocks is aligned at stripe boundary
  606. * If sb_agblocks is NOT aligned turn off m_dalign since
  607. * allocator alignment is within an ag, therefore ag has
  608. * to be aligned at stripe boundary.
  609. */
  610. error = xfs_update_alignment(mp);
  611. if (error)
  612. goto out;
  613. xfs_alloc_compute_maxlevels(mp);
  614. xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK);
  615. xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK);
  616. xfs_ialloc_compute_maxlevels(mp);
  617. xfs_rmapbt_compute_maxlevels(mp);
  618. xfs_refcountbt_compute_maxlevels(mp);
  619. xfs_set_maxicount(mp);
  620. /* enable fail_at_unmount as default */
  621. mp->m_fail_unmount = true;
  622. error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname);
  623. if (error)
  624. goto out;
  625. error = xfs_sysfs_init(&mp->m_stats.xs_kobj, &xfs_stats_ktype,
  626. &mp->m_kobj, "stats");
  627. if (error)
  628. goto out_remove_sysfs;
  629. error = xfs_error_sysfs_init(mp);
  630. if (error)
  631. goto out_del_stats;
  632. error = xfs_errortag_init(mp);
  633. if (error)
  634. goto out_remove_error_sysfs;
  635. error = xfs_uuid_mount(mp);
  636. if (error)
  637. goto out_remove_errortag;
  638. /*
  639. * Set the minimum read and write sizes
  640. */
  641. xfs_set_rw_sizes(mp);
  642. /* set the low space thresholds for dynamic preallocation */
  643. xfs_set_low_space_thresholds(mp);
  644. /*
  645. * Set the inode cluster size.
  646. * This may still be overridden by the file system
  647. * block size if it is larger than the chosen cluster size.
  648. *
  649. * For v5 filesystems, scale the cluster size with the inode size to
  650. * keep a constant ratio of inode per cluster buffer, but only if mkfs
  651. * has set the inode alignment value appropriately for larger cluster
  652. * sizes.
  653. */
  654. mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE;
  655. if (xfs_sb_version_hascrc(&mp->m_sb)) {
  656. int new_size = mp->m_inode_cluster_size;
  657. new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE;
  658. if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size))
  659. mp->m_inode_cluster_size = new_size;
  660. }
  661. /*
  662. * If enabled, sparse inode chunk alignment is expected to match the
  663. * cluster size. Full inode chunk alignment must match the chunk size,
  664. * but that is checked on sb read verification...
  665. */
  666. if (xfs_sb_version_hassparseinodes(&mp->m_sb) &&
  667. mp->m_sb.sb_spino_align !=
  668. XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size)) {
  669. xfs_warn(mp,
  670. "Sparse inode block alignment (%u) must match cluster size (%llu).",
  671. mp->m_sb.sb_spino_align,
  672. XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size));
  673. error = -EINVAL;
  674. goto out_remove_uuid;
  675. }
  676. /*
  677. * Set inode alignment fields
  678. */
  679. xfs_set_inoalignment(mp);
  680. /*
  681. * Check that the data (and log if separate) is an ok size.
  682. */
  683. error = xfs_check_sizes(mp);
  684. if (error)
  685. goto out_remove_uuid;
  686. /*
  687. * Initialize realtime fields in the mount structure
  688. */
  689. error = xfs_rtmount_init(mp);
  690. if (error) {
  691. xfs_warn(mp, "RT mount failed");
  692. goto out_remove_uuid;
  693. }
  694. /*
  695. * Copies the low order bits of the timestamp and the randomly
  696. * set "sequence" number out of a UUID.
  697. */
  698. mp->m_fixedfsid[0] =
  699. (get_unaligned_be16(&sbp->sb_uuid.b[8]) << 16) |
  700. get_unaligned_be16(&sbp->sb_uuid.b[4]);
  701. mp->m_fixedfsid[1] = get_unaligned_be32(&sbp->sb_uuid.b[0]);
  702. error = xfs_da_mount(mp);
  703. if (error) {
  704. xfs_warn(mp, "Failed dir/attr init: %d", error);
  705. goto out_remove_uuid;
  706. }
  707. /*
  708. * Initialize the precomputed transaction reservations values.
  709. */
  710. xfs_trans_init(mp);
  711. /*
  712. * Allocate and initialize the per-ag data.
  713. */
  714. error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
  715. if (error) {
  716. xfs_warn(mp, "Failed per-ag init: %d", error);
  717. goto out_free_dir;
  718. }
  719. if (!sbp->sb_logblocks) {
  720. xfs_warn(mp, "no log defined");
  721. XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW, mp);
  722. error = -EFSCORRUPTED;
  723. goto out_free_perag;
  724. }
  725. /*
  726. * Log's mount-time initialization. The first part of recovery can place
  727. * some items on the AIL, to be handled when recovery is finished or
  728. * cancelled.
  729. */
  730. error = xfs_log_mount(mp, mp->m_logdev_targp,
  731. XFS_FSB_TO_DADDR(mp, sbp->sb_logstart),
  732. XFS_FSB_TO_BB(mp, sbp->sb_logblocks));
  733. if (error) {
  734. xfs_warn(mp, "log mount failed");
  735. goto out_fail_wait;
  736. }
  737. /*
  738. * Now the log is mounted, we know if it was an unclean shutdown or
  739. * not. If it was, with the first phase of recovery has completed, we
  740. * have consistent AG blocks on disk. We have not recovered EFIs yet,
  741. * but they are recovered transactionally in the second recovery phase
  742. * later.
  743. *
  744. * Hence we can safely re-initialise incore superblock counters from
  745. * the per-ag data. These may not be correct if the filesystem was not
  746. * cleanly unmounted, so we need to wait for recovery to finish before
  747. * doing this.
  748. *
  749. * If the filesystem was cleanly unmounted, then we can trust the
  750. * values in the superblock to be correct and we don't need to do
  751. * anything here.
  752. *
  753. * If we are currently making the filesystem, the initialisation will
  754. * fail as the perag data is in an undefined state.
  755. */
  756. if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
  757. !XFS_LAST_UNMOUNT_WAS_CLEAN(mp) &&
  758. !mp->m_sb.sb_inprogress) {
  759. error = xfs_initialize_perag_data(mp, sbp->sb_agcount);
  760. if (error)
  761. goto out_log_dealloc;
  762. }
  763. /*
  764. * Get and sanity-check the root inode.
  765. * Save the pointer to it in the mount structure.
  766. */
  767. error = xfs_iget(mp, NULL, sbp->sb_rootino, XFS_IGET_UNTRUSTED,
  768. XFS_ILOCK_EXCL, &rip);
  769. if (error) {
  770. xfs_warn(mp,
  771. "Failed to read root inode 0x%llx, error %d",
  772. sbp->sb_rootino, -error);
  773. goto out_log_dealloc;
  774. }
  775. ASSERT(rip != NULL);
  776. if (unlikely(!S_ISDIR(VFS_I(rip)->i_mode))) {
  777. xfs_warn(mp, "corrupted root inode %llu: not a directory",
  778. (unsigned long long)rip->i_ino);
  779. xfs_iunlock(rip, XFS_ILOCK_EXCL);
  780. XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW,
  781. mp);
  782. error = -EFSCORRUPTED;
  783. goto out_rele_rip;
  784. }
  785. mp->m_rootip = rip; /* save it */
  786. xfs_iunlock(rip, XFS_ILOCK_EXCL);
  787. /*
  788. * Initialize realtime inode pointers in the mount structure
  789. */
  790. error = xfs_rtmount_inodes(mp);
  791. if (error) {
  792. /*
  793. * Free up the root inode.
  794. */
  795. xfs_warn(mp, "failed to read RT inodes");
  796. goto out_rele_rip;
  797. }
  798. /*
  799. * If this is a read-only mount defer the superblock updates until
  800. * the next remount into writeable mode. Otherwise we would never
  801. * perform the update e.g. for the root filesystem.
  802. */
  803. if (mp->m_update_sb && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
  804. error = xfs_sync_sb(mp, false);
  805. if (error) {
  806. xfs_warn(mp, "failed to write sb changes");
  807. goto out_rtunmount;
  808. }
  809. }
  810. /*
  811. * Initialise the XFS quota management subsystem for this mount
  812. */
  813. if (XFS_IS_QUOTA_RUNNING(mp)) {
  814. error = xfs_qm_newmount(mp, &quotamount, &quotaflags);
  815. if (error)
  816. goto out_rtunmount;
  817. } else {
  818. ASSERT(!XFS_IS_QUOTA_ON(mp));
  819. /*
  820. * If a file system had quotas running earlier, but decided to
  821. * mount without -o uquota/pquota/gquota options, revoke the
  822. * quotachecked license.
  823. */
  824. if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) {
  825. xfs_notice(mp, "resetting quota flags");
  826. error = xfs_mount_reset_sbqflags(mp);
  827. if (error)
  828. goto out_rtunmount;
  829. }
  830. }
  831. /*
  832. * Finish recovering the file system. This part needed to be delayed
  833. * until after the root and real-time bitmap inodes were consistently
  834. * read in.
  835. */
  836. error = xfs_log_mount_finish(mp);
  837. if (error) {
  838. xfs_warn(mp, "log mount finish failed");
  839. goto out_rtunmount;
  840. }
  841. /*
  842. * Now the log is fully replayed, we can transition to full read-only
  843. * mode for read-only mounts. This will sync all the metadata and clean
  844. * the log so that the recovery we just performed does not have to be
  845. * replayed again on the next mount.
  846. *
  847. * We use the same quiesce mechanism as the rw->ro remount, as they are
  848. * semantically identical operations.
  849. */
  850. if ((mp->m_flags & (XFS_MOUNT_RDONLY|XFS_MOUNT_NORECOVERY)) ==
  851. XFS_MOUNT_RDONLY) {
  852. xfs_quiesce_attr(mp);
  853. }
  854. /*
  855. * Complete the quota initialisation, post-log-replay component.
  856. */
  857. if (quotamount) {
  858. ASSERT(mp->m_qflags == 0);
  859. mp->m_qflags = quotaflags;
  860. xfs_qm_mount_quotas(mp);
  861. }
  862. /*
  863. * Now we are mounted, reserve a small amount of unused space for
  864. * privileged transactions. This is needed so that transaction
  865. * space required for critical operations can dip into this pool
  866. * when at ENOSPC. This is needed for operations like create with
  867. * attr, unwritten extent conversion at ENOSPC, etc. Data allocations
  868. * are not allowed to use this reserved space.
  869. *
  870. * This may drive us straight to ENOSPC on mount, but that implies
  871. * we were already there on the last unmount. Warn if this occurs.
  872. */
  873. if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
  874. resblks = xfs_default_resblks(mp);
  875. error = xfs_reserve_blocks(mp, &resblks, NULL);
  876. if (error)
  877. xfs_warn(mp,
  878. "Unable to allocate reserve blocks. Continuing without reserve pool.");
  879. /* Recover any CoW blocks that never got remapped. */
  880. error = xfs_reflink_recover_cow(mp);
  881. if (error) {
  882. xfs_err(mp,
  883. "Error %d recovering leftover CoW allocations.", error);
  884. xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
  885. goto out_quota;
  886. }
  887. /* Reserve AG blocks for future btree expansion. */
  888. error = xfs_fs_reserve_ag_blocks(mp);
  889. if (error && error != -ENOSPC)
  890. goto out_agresv;
  891. }
  892. return 0;
  893. out_agresv:
  894. xfs_fs_unreserve_ag_blocks(mp);
  895. out_quota:
  896. xfs_qm_unmount_quotas(mp);
  897. out_rtunmount:
  898. xfs_rtunmount_inodes(mp);
  899. out_rele_rip:
  900. IRELE(rip);
  901. /* Clean out dquots that might be in memory after quotacheck. */
  902. xfs_qm_unmount(mp);
  903. /*
  904. * Cancel all delayed reclaim work and reclaim the inodes directly.
  905. * We have to do this /after/ rtunmount and qm_unmount because those
  906. * two will have scheduled delayed reclaim for the rt/quota inodes.
  907. *
  908. * This is slightly different from the unmountfs call sequence
  909. * because we could be tearing down a partially set up mount. In
  910. * particular, if log_mount_finish fails we bail out without calling
  911. * qm_unmount_quotas and therefore rely on qm_unmount to release the
  912. * quota inodes.
  913. */
  914. cancel_delayed_work_sync(&mp->m_reclaim_work);
  915. xfs_reclaim_inodes(mp, SYNC_WAIT);
  916. out_log_dealloc:
  917. mp->m_flags |= XFS_MOUNT_UNMOUNTING;
  918. xfs_log_mount_cancel(mp);
  919. out_fail_wait:
  920. if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
  921. xfs_wait_buftarg(mp->m_logdev_targp);
  922. xfs_wait_buftarg(mp->m_ddev_targp);
  923. out_free_perag:
  924. xfs_free_perag(mp);
  925. out_free_dir:
  926. xfs_da_unmount(mp);
  927. out_remove_uuid:
  928. xfs_uuid_unmount(mp);
  929. out_remove_errortag:
  930. xfs_errortag_del(mp);
  931. out_remove_error_sysfs:
  932. xfs_error_sysfs_del(mp);
  933. out_del_stats:
  934. xfs_sysfs_del(&mp->m_stats.xs_kobj);
  935. out_remove_sysfs:
  936. xfs_sysfs_del(&mp->m_kobj);
  937. out:
  938. return error;
  939. }
  940. /*
  941. * This flushes out the inodes,dquots and the superblock, unmounts the
  942. * log and makes sure that incore structures are freed.
  943. */
  944. void
  945. xfs_unmountfs(
  946. struct xfs_mount *mp)
  947. {
  948. uint64_t resblks;
  949. int error;
  950. xfs_icache_disable_reclaim(mp);
  951. xfs_fs_unreserve_ag_blocks(mp);
  952. xfs_qm_unmount_quotas(mp);
  953. xfs_rtunmount_inodes(mp);
  954. IRELE(mp->m_rootip);
  955. /*
  956. * We can potentially deadlock here if we have an inode cluster
  957. * that has been freed has its buffer still pinned in memory because
  958. * the transaction is still sitting in a iclog. The stale inodes
  959. * on that buffer will have their flush locks held until the
  960. * transaction hits the disk and the callbacks run. the inode
  961. * flush takes the flush lock unconditionally and with nothing to
  962. * push out the iclog we will never get that unlocked. hence we
  963. * need to force the log first.
  964. */
  965. xfs_log_force(mp, XFS_LOG_SYNC);
  966. /*
  967. * Wait for all busy extents to be freed, including completion of
  968. * any discard operation.
  969. */
  970. xfs_extent_busy_wait_all(mp);
  971. flush_workqueue(xfs_discard_wq);
  972. /*
  973. * We now need to tell the world we are unmounting. This will allow
  974. * us to detect that the filesystem is going away and we should error
  975. * out anything that we have been retrying in the background. This will
  976. * prevent neverending retries in AIL pushing from hanging the unmount.
  977. */
  978. mp->m_flags |= XFS_MOUNT_UNMOUNTING;
  979. /*
  980. * Flush all pending changes from the AIL.
  981. */
  982. xfs_ail_push_all_sync(mp->m_ail);
  983. /*
  984. * And reclaim all inodes. At this point there should be no dirty
  985. * inodes and none should be pinned or locked, but use synchronous
  986. * reclaim just to be sure. We can stop background inode reclaim
  987. * here as well if it is still running.
  988. */
  989. cancel_delayed_work_sync(&mp->m_reclaim_work);
  990. xfs_reclaim_inodes(mp, SYNC_WAIT);
  991. xfs_qm_unmount(mp);
  992. /*
  993. * Unreserve any blocks we have so that when we unmount we don't account
  994. * the reserved free space as used. This is really only necessary for
  995. * lazy superblock counting because it trusts the incore superblock
  996. * counters to be absolutely correct on clean unmount.
  997. *
  998. * We don't bother correcting this elsewhere for lazy superblock
  999. * counting because on mount of an unclean filesystem we reconstruct the
  1000. * correct counter value and this is irrelevant.
  1001. *
  1002. * For non-lazy counter filesystems, this doesn't matter at all because
  1003. * we only every apply deltas to the superblock and hence the incore
  1004. * value does not matter....
  1005. */
  1006. resblks = 0;
  1007. error = xfs_reserve_blocks(mp, &resblks, NULL);
  1008. if (error)
  1009. xfs_warn(mp, "Unable to free reserved block pool. "
  1010. "Freespace may not be correct on next mount.");
  1011. error = xfs_log_sbcount(mp);
  1012. if (error)
  1013. xfs_warn(mp, "Unable to update superblock counters. "
  1014. "Freespace may not be correct on next mount.");
  1015. xfs_log_unmount(mp);
  1016. xfs_da_unmount(mp);
  1017. xfs_uuid_unmount(mp);
  1018. #if defined(DEBUG)
  1019. xfs_errortag_clearall(mp);
  1020. #endif
  1021. xfs_free_perag(mp);
  1022. xfs_errortag_del(mp);
  1023. xfs_error_sysfs_del(mp);
  1024. xfs_sysfs_del(&mp->m_stats.xs_kobj);
  1025. xfs_sysfs_del(&mp->m_kobj);
  1026. }
  1027. /*
  1028. * Determine whether modifications can proceed. The caller specifies the minimum
  1029. * freeze level for which modifications should not be allowed. This allows
  1030. * certain operations to proceed while the freeze sequence is in progress, if
  1031. * necessary.
  1032. */
  1033. bool
  1034. xfs_fs_writable(
  1035. struct xfs_mount *mp,
  1036. int level)
  1037. {
  1038. ASSERT(level > SB_UNFROZEN);
  1039. if ((mp->m_super->s_writers.frozen >= level) ||
  1040. XFS_FORCED_SHUTDOWN(mp) || (mp->m_flags & XFS_MOUNT_RDONLY))
  1041. return false;
  1042. return true;
  1043. }
  1044. /*
  1045. * xfs_log_sbcount
  1046. *
  1047. * Sync the superblock counters to disk.
  1048. *
  1049. * Note this code can be called during the process of freezing, so we use the
  1050. * transaction allocator that does not block when the transaction subsystem is
  1051. * in its frozen state.
  1052. */
  1053. int
  1054. xfs_log_sbcount(xfs_mount_t *mp)
  1055. {
  1056. /* allow this to proceed during the freeze sequence... */
  1057. if (!xfs_fs_writable(mp, SB_FREEZE_COMPLETE))
  1058. return 0;
  1059. /*
  1060. * we don't need to do this if we are updating the superblock
  1061. * counters on every modification.
  1062. */
  1063. if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
  1064. return 0;
  1065. return xfs_sync_sb(mp, true);
  1066. }
  1067. /*
  1068. * Deltas for the inode count are +/-64, hence we use a large batch size
  1069. * of 128 so we don't need to take the counter lock on every update.
  1070. */
  1071. #define XFS_ICOUNT_BATCH 128
  1072. int
  1073. xfs_mod_icount(
  1074. struct xfs_mount *mp,
  1075. int64_t delta)
  1076. {
  1077. percpu_counter_add_batch(&mp->m_icount, delta, XFS_ICOUNT_BATCH);
  1078. if (__percpu_counter_compare(&mp->m_icount, 0, XFS_ICOUNT_BATCH) < 0) {
  1079. ASSERT(0);
  1080. percpu_counter_add(&mp->m_icount, -delta);
  1081. return -EINVAL;
  1082. }
  1083. return 0;
  1084. }
  1085. int
  1086. xfs_mod_ifree(
  1087. struct xfs_mount *mp,
  1088. int64_t delta)
  1089. {
  1090. percpu_counter_add(&mp->m_ifree, delta);
  1091. if (percpu_counter_compare(&mp->m_ifree, 0) < 0) {
  1092. ASSERT(0);
  1093. percpu_counter_add(&mp->m_ifree, -delta);
  1094. return -EINVAL;
  1095. }
  1096. return 0;
  1097. }
  1098. /*
  1099. * Deltas for the block count can vary from 1 to very large, but lock contention
  1100. * only occurs on frequent small block count updates such as in the delayed
  1101. * allocation path for buffered writes (page a time updates). Hence we set
  1102. * a large batch count (1024) to minimise global counter updates except when
  1103. * we get near to ENOSPC and we have to be very accurate with our updates.
  1104. */
  1105. #define XFS_FDBLOCKS_BATCH 1024
  1106. int
  1107. xfs_mod_fdblocks(
  1108. struct xfs_mount *mp,
  1109. int64_t delta,
  1110. bool rsvd)
  1111. {
  1112. int64_t lcounter;
  1113. long long res_used;
  1114. s32 batch;
  1115. if (delta > 0) {
  1116. /*
  1117. * If the reserve pool is depleted, put blocks back into it
  1118. * first. Most of the time the pool is full.
  1119. */
  1120. if (likely(mp->m_resblks == mp->m_resblks_avail)) {
  1121. percpu_counter_add(&mp->m_fdblocks, delta);
  1122. return 0;
  1123. }
  1124. spin_lock(&mp->m_sb_lock);
  1125. res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
  1126. if (res_used > delta) {
  1127. mp->m_resblks_avail += delta;
  1128. } else {
  1129. delta -= res_used;
  1130. mp->m_resblks_avail = mp->m_resblks;
  1131. percpu_counter_add(&mp->m_fdblocks, delta);
  1132. }
  1133. spin_unlock(&mp->m_sb_lock);
  1134. return 0;
  1135. }
  1136. /*
  1137. * Taking blocks away, need to be more accurate the closer we
  1138. * are to zero.
  1139. *
  1140. * If the counter has a value of less than 2 * max batch size,
  1141. * then make everything serialise as we are real close to
  1142. * ENOSPC.
  1143. */
  1144. if (__percpu_counter_compare(&mp->m_fdblocks, 2 * XFS_FDBLOCKS_BATCH,
  1145. XFS_FDBLOCKS_BATCH) < 0)
  1146. batch = 1;
  1147. else
  1148. batch = XFS_FDBLOCKS_BATCH;
  1149. percpu_counter_add_batch(&mp->m_fdblocks, delta, batch);
  1150. if (__percpu_counter_compare(&mp->m_fdblocks, mp->m_alloc_set_aside,
  1151. XFS_FDBLOCKS_BATCH) >= 0) {
  1152. /* we had space! */
  1153. return 0;
  1154. }
  1155. /*
  1156. * lock up the sb for dipping into reserves before releasing the space
  1157. * that took us to ENOSPC.
  1158. */
  1159. spin_lock(&mp->m_sb_lock);
  1160. percpu_counter_add(&mp->m_fdblocks, -delta);
  1161. if (!rsvd)
  1162. goto fdblocks_enospc;
  1163. lcounter = (long long)mp->m_resblks_avail + delta;
  1164. if (lcounter >= 0) {
  1165. mp->m_resblks_avail = lcounter;
  1166. spin_unlock(&mp->m_sb_lock);
  1167. return 0;
  1168. }
  1169. printk_once(KERN_WARNING
  1170. "Filesystem \"%s\": reserve blocks depleted! "
  1171. "Consider increasing reserve pool size.",
  1172. mp->m_fsname);
  1173. fdblocks_enospc:
  1174. spin_unlock(&mp->m_sb_lock);
  1175. return -ENOSPC;
  1176. }
  1177. int
  1178. xfs_mod_frextents(
  1179. struct xfs_mount *mp,
  1180. int64_t delta)
  1181. {
  1182. int64_t lcounter;
  1183. int ret = 0;
  1184. spin_lock(&mp->m_sb_lock);
  1185. lcounter = mp->m_sb.sb_frextents + delta;
  1186. if (lcounter < 0)
  1187. ret = -ENOSPC;
  1188. else
  1189. mp->m_sb.sb_frextents = lcounter;
  1190. spin_unlock(&mp->m_sb_lock);
  1191. return ret;
  1192. }
  1193. /*
  1194. * xfs_getsb() is called to obtain the buffer for the superblock.
  1195. * The buffer is returned locked and read in from disk.
  1196. * The buffer should be released with a call to xfs_brelse().
  1197. *
  1198. * If the flags parameter is BUF_TRYLOCK, then we'll only return
  1199. * the superblock buffer if it can be locked without sleeping.
  1200. * If it can't then we'll return NULL.
  1201. */
  1202. struct xfs_buf *
  1203. xfs_getsb(
  1204. struct xfs_mount *mp,
  1205. int flags)
  1206. {
  1207. struct xfs_buf *bp = mp->m_sb_bp;
  1208. if (!xfs_buf_trylock(bp)) {
  1209. if (flags & XBF_TRYLOCK)
  1210. return NULL;
  1211. xfs_buf_lock(bp);
  1212. }
  1213. xfs_buf_hold(bp);
  1214. ASSERT(bp->b_flags & XBF_DONE);
  1215. return bp;
  1216. }
  1217. /*
  1218. * Used to free the superblock along various error paths.
  1219. */
  1220. void
  1221. xfs_freesb(
  1222. struct xfs_mount *mp)
  1223. {
  1224. struct xfs_buf *bp = mp->m_sb_bp;
  1225. xfs_buf_lock(bp);
  1226. mp->m_sb_bp = NULL;
  1227. xfs_buf_relse(bp);
  1228. }
  1229. /*
  1230. * If the underlying (data/log/rt) device is readonly, there are some
  1231. * operations that cannot proceed.
  1232. */
  1233. int
  1234. xfs_dev_is_read_only(
  1235. struct xfs_mount *mp,
  1236. char *message)
  1237. {
  1238. if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
  1239. xfs_readonly_buftarg(mp->m_logdev_targp) ||
  1240. (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
  1241. xfs_notice(mp, "%s required on read-only device.", message);
  1242. xfs_notice(mp, "write access unavailable, cannot proceed.");
  1243. return -EROFS;
  1244. }
  1245. return 0;
  1246. }