sufile.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228
  1. /*
  2. * sufile.c - NILFS segment usage file.
  3. *
  4. * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * Written by Koji Sato.
  17. * Revised by Ryusuke Konishi.
  18. */
  19. #include <linux/kernel.h>
  20. #include <linux/fs.h>
  21. #include <linux/string.h>
  22. #include <linux/buffer_head.h>
  23. #include <linux/errno.h>
  24. #include <linux/nilfs2_fs.h>
  25. #include "mdt.h"
  26. #include "sufile.h"
  27. #include <trace/events/nilfs2.h>
  28. /**
  29. * struct nilfs_sufile_info - on-memory private data of sufile
  30. * @mi: on-memory private data of metadata file
  31. * @ncleansegs: number of clean segments
  32. * @allocmin: lower limit of allocatable segment range
  33. * @allocmax: upper limit of allocatable segment range
  34. */
  35. struct nilfs_sufile_info {
  36. struct nilfs_mdt_info mi;
  37. unsigned long ncleansegs;/* number of clean segments */
  38. __u64 allocmin; /* lower limit of allocatable segment range */
  39. __u64 allocmax; /* upper limit of allocatable segment range */
  40. };
  41. static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
  42. {
  43. return (struct nilfs_sufile_info *)NILFS_MDT(sufile);
  44. }
  45. static inline unsigned long
  46. nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
  47. {
  48. return NILFS_MDT(sufile)->mi_entries_per_block;
  49. }
  50. static unsigned long
  51. nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
  52. {
  53. __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
  54. do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
  55. return (unsigned long)t;
  56. }
  57. static unsigned long
  58. nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
  59. {
  60. __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
  61. return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
  62. }
  63. static unsigned long
  64. nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
  65. __u64 max)
  66. {
  67. return min_t(unsigned long,
  68. nilfs_sufile_segment_usages_per_block(sufile) -
  69. nilfs_sufile_get_offset(sufile, curr),
  70. max - curr + 1);
  71. }
  72. static struct nilfs_segment_usage *
  73. nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
  74. struct buffer_head *bh, void *kaddr)
  75. {
  76. return kaddr + bh_offset(bh) +
  77. nilfs_sufile_get_offset(sufile, segnum) *
  78. NILFS_MDT(sufile)->mi_entry_size;
  79. }
  80. static inline int nilfs_sufile_get_header_block(struct inode *sufile,
  81. struct buffer_head **bhp)
  82. {
  83. return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
  84. }
  85. static inline int
  86. nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
  87. int create, struct buffer_head **bhp)
  88. {
  89. return nilfs_mdt_get_block(sufile,
  90. nilfs_sufile_get_blkoff(sufile, segnum),
  91. create, NULL, bhp);
  92. }
  93. static int nilfs_sufile_delete_segment_usage_block(struct inode *sufile,
  94. __u64 segnum)
  95. {
  96. return nilfs_mdt_delete_block(sufile,
  97. nilfs_sufile_get_blkoff(sufile, segnum));
  98. }
  99. static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
  100. u64 ncleanadd, u64 ndirtyadd)
  101. {
  102. struct nilfs_sufile_header *header;
  103. void *kaddr;
  104. kaddr = kmap_atomic(header_bh->b_page);
  105. header = kaddr + bh_offset(header_bh);
  106. le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
  107. le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
  108. kunmap_atomic(kaddr);
  109. mark_buffer_dirty(header_bh);
  110. }
  111. /**
  112. * nilfs_sufile_get_ncleansegs - return the number of clean segments
  113. * @sufile: inode of segment usage file
  114. */
  115. unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
  116. {
  117. return NILFS_SUI(sufile)->ncleansegs;
  118. }
  119. /**
  120. * nilfs_sufile_updatev - modify multiple segment usages at a time
  121. * @sufile: inode of segment usage file
  122. * @segnumv: array of segment numbers
  123. * @nsegs: size of @segnumv array
  124. * @create: creation flag
  125. * @ndone: place to store number of modified segments on @segnumv
  126. * @dofunc: primitive operation for the update
  127. *
  128. * Description: nilfs_sufile_updatev() repeatedly calls @dofunc
  129. * against the given array of segments. The @dofunc is called with
  130. * buffers of a header block and the sufile block in which the target
  131. * segment usage entry is contained. If @ndone is given, the number
  132. * of successfully modified segments from the head is stored in the
  133. * place @ndone points to.
  134. *
  135. * Return Value: On success, zero is returned. On error, one of the
  136. * following negative error codes is returned.
  137. *
  138. * %-EIO - I/O error.
  139. *
  140. * %-ENOMEM - Insufficient amount of memory available.
  141. *
  142. * %-ENOENT - Given segment usage is in hole block (may be returned if
  143. * @create is zero)
  144. *
  145. * %-EINVAL - Invalid segment usage number
  146. */
  147. int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
  148. int create, size_t *ndone,
  149. void (*dofunc)(struct inode *, __u64,
  150. struct buffer_head *,
  151. struct buffer_head *))
  152. {
  153. struct buffer_head *header_bh, *bh;
  154. unsigned long blkoff, prev_blkoff;
  155. __u64 *seg;
  156. size_t nerr = 0, n = 0;
  157. int ret = 0;
  158. if (unlikely(nsegs == 0))
  159. goto out;
  160. down_write(&NILFS_MDT(sufile)->mi_sem);
  161. for (seg = segnumv; seg < segnumv + nsegs; seg++) {
  162. if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
  163. nilfs_msg(sufile->i_sb, KERN_WARNING,
  164. "%s: invalid segment number: %llu",
  165. __func__, (unsigned long long)*seg);
  166. nerr++;
  167. }
  168. }
  169. if (nerr > 0) {
  170. ret = -EINVAL;
  171. goto out_sem;
  172. }
  173. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  174. if (ret < 0)
  175. goto out_sem;
  176. seg = segnumv;
  177. blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
  178. ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
  179. if (ret < 0)
  180. goto out_header;
  181. for (;;) {
  182. dofunc(sufile, *seg, header_bh, bh);
  183. if (++seg >= segnumv + nsegs)
  184. break;
  185. prev_blkoff = blkoff;
  186. blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
  187. if (blkoff == prev_blkoff)
  188. continue;
  189. /* get different block */
  190. brelse(bh);
  191. ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
  192. if (unlikely(ret < 0))
  193. goto out_header;
  194. }
  195. brelse(bh);
  196. out_header:
  197. n = seg - segnumv;
  198. brelse(header_bh);
  199. out_sem:
  200. up_write(&NILFS_MDT(sufile)->mi_sem);
  201. out:
  202. if (ndone)
  203. *ndone = n;
  204. return ret;
  205. }
  206. int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
  207. void (*dofunc)(struct inode *, __u64,
  208. struct buffer_head *,
  209. struct buffer_head *))
  210. {
  211. struct buffer_head *header_bh, *bh;
  212. int ret;
  213. if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
  214. nilfs_msg(sufile->i_sb, KERN_WARNING,
  215. "%s: invalid segment number: %llu",
  216. __func__, (unsigned long long)segnum);
  217. return -EINVAL;
  218. }
  219. down_write(&NILFS_MDT(sufile)->mi_sem);
  220. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  221. if (ret < 0)
  222. goto out_sem;
  223. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
  224. if (!ret) {
  225. dofunc(sufile, segnum, header_bh, bh);
  226. brelse(bh);
  227. }
  228. brelse(header_bh);
  229. out_sem:
  230. up_write(&NILFS_MDT(sufile)->mi_sem);
  231. return ret;
  232. }
  233. /**
  234. * nilfs_sufile_set_alloc_range - limit range of segment to be allocated
  235. * @sufile: inode of segment usage file
  236. * @start: minimum segment number of allocatable region (inclusive)
  237. * @end: maximum segment number of allocatable region (inclusive)
  238. *
  239. * Return Value: On success, 0 is returned. On error, one of the
  240. * following negative error codes is returned.
  241. *
  242. * %-ERANGE - invalid segment region
  243. */
  244. int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
  245. {
  246. struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
  247. __u64 nsegs;
  248. int ret = -ERANGE;
  249. down_write(&NILFS_MDT(sufile)->mi_sem);
  250. nsegs = nilfs_sufile_get_nsegments(sufile);
  251. if (start <= end && end < nsegs) {
  252. sui->allocmin = start;
  253. sui->allocmax = end;
  254. ret = 0;
  255. }
  256. up_write(&NILFS_MDT(sufile)->mi_sem);
  257. return ret;
  258. }
  259. /**
  260. * nilfs_sufile_alloc - allocate a segment
  261. * @sufile: inode of segment usage file
  262. * @segnump: pointer to segment number
  263. *
  264. * Description: nilfs_sufile_alloc() allocates a clean segment.
  265. *
  266. * Return Value: On success, 0 is returned and the segment number of the
  267. * allocated segment is stored in the place pointed by @segnump. On error, one
  268. * of the following negative error codes is returned.
  269. *
  270. * %-EIO - I/O error.
  271. *
  272. * %-ENOMEM - Insufficient amount of memory available.
  273. *
  274. * %-ENOSPC - No clean segment left.
  275. */
  276. int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
  277. {
  278. struct buffer_head *header_bh, *su_bh;
  279. struct nilfs_sufile_header *header;
  280. struct nilfs_segment_usage *su;
  281. struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
  282. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  283. __u64 segnum, maxsegnum, last_alloc;
  284. void *kaddr;
  285. unsigned long nsegments, nsus, cnt;
  286. int ret, j;
  287. down_write(&NILFS_MDT(sufile)->mi_sem);
  288. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  289. if (ret < 0)
  290. goto out_sem;
  291. kaddr = kmap_atomic(header_bh->b_page);
  292. header = kaddr + bh_offset(header_bh);
  293. last_alloc = le64_to_cpu(header->sh_last_alloc);
  294. kunmap_atomic(kaddr);
  295. nsegments = nilfs_sufile_get_nsegments(sufile);
  296. maxsegnum = sui->allocmax;
  297. segnum = last_alloc + 1;
  298. if (segnum < sui->allocmin || segnum > sui->allocmax)
  299. segnum = sui->allocmin;
  300. for (cnt = 0; cnt < nsegments; cnt += nsus) {
  301. if (segnum > maxsegnum) {
  302. if (cnt < sui->allocmax - sui->allocmin + 1) {
  303. /*
  304. * wrap around in the limited region.
  305. * if allocation started from
  306. * sui->allocmin, this never happens.
  307. */
  308. segnum = sui->allocmin;
  309. maxsegnum = last_alloc;
  310. } else if (segnum > sui->allocmin &&
  311. sui->allocmax + 1 < nsegments) {
  312. segnum = sui->allocmax + 1;
  313. maxsegnum = nsegments - 1;
  314. } else if (sui->allocmin > 0) {
  315. segnum = 0;
  316. maxsegnum = sui->allocmin - 1;
  317. } else {
  318. break; /* never happens */
  319. }
  320. }
  321. trace_nilfs2_segment_usage_check(sufile, segnum, cnt);
  322. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
  323. &su_bh);
  324. if (ret < 0)
  325. goto out_header;
  326. kaddr = kmap_atomic(su_bh->b_page);
  327. su = nilfs_sufile_block_get_segment_usage(
  328. sufile, segnum, su_bh, kaddr);
  329. nsus = nilfs_sufile_segment_usages_in_block(
  330. sufile, segnum, maxsegnum);
  331. for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
  332. if (!nilfs_segment_usage_clean(su))
  333. continue;
  334. /* found a clean segment */
  335. nilfs_segment_usage_set_dirty(su);
  336. kunmap_atomic(kaddr);
  337. kaddr = kmap_atomic(header_bh->b_page);
  338. header = kaddr + bh_offset(header_bh);
  339. le64_add_cpu(&header->sh_ncleansegs, -1);
  340. le64_add_cpu(&header->sh_ndirtysegs, 1);
  341. header->sh_last_alloc = cpu_to_le64(segnum);
  342. kunmap_atomic(kaddr);
  343. sui->ncleansegs--;
  344. mark_buffer_dirty(header_bh);
  345. mark_buffer_dirty(su_bh);
  346. nilfs_mdt_mark_dirty(sufile);
  347. brelse(su_bh);
  348. *segnump = segnum;
  349. trace_nilfs2_segment_usage_allocated(sufile, segnum);
  350. goto out_header;
  351. }
  352. kunmap_atomic(kaddr);
  353. brelse(su_bh);
  354. }
  355. /* no segments left */
  356. ret = -ENOSPC;
  357. out_header:
  358. brelse(header_bh);
  359. out_sem:
  360. up_write(&NILFS_MDT(sufile)->mi_sem);
  361. return ret;
  362. }
  363. void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
  364. struct buffer_head *header_bh,
  365. struct buffer_head *su_bh)
  366. {
  367. struct nilfs_segment_usage *su;
  368. void *kaddr;
  369. kaddr = kmap_atomic(su_bh->b_page);
  370. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  371. if (unlikely(!nilfs_segment_usage_clean(su))) {
  372. nilfs_msg(sufile->i_sb, KERN_WARNING,
  373. "%s: segment %llu must be clean", __func__,
  374. (unsigned long long)segnum);
  375. kunmap_atomic(kaddr);
  376. return;
  377. }
  378. nilfs_segment_usage_set_dirty(su);
  379. kunmap_atomic(kaddr);
  380. nilfs_sufile_mod_counter(header_bh, -1, 1);
  381. NILFS_SUI(sufile)->ncleansegs--;
  382. mark_buffer_dirty(su_bh);
  383. nilfs_mdt_mark_dirty(sufile);
  384. }
  385. void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
  386. struct buffer_head *header_bh,
  387. struct buffer_head *su_bh)
  388. {
  389. struct nilfs_segment_usage *su;
  390. void *kaddr;
  391. int clean, dirty;
  392. kaddr = kmap_atomic(su_bh->b_page);
  393. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  394. if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
  395. su->su_nblocks == cpu_to_le32(0)) {
  396. kunmap_atomic(kaddr);
  397. return;
  398. }
  399. clean = nilfs_segment_usage_clean(su);
  400. dirty = nilfs_segment_usage_dirty(su);
  401. /* make the segment garbage */
  402. su->su_lastmod = cpu_to_le64(0);
  403. su->su_nblocks = cpu_to_le32(0);
  404. su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
  405. kunmap_atomic(kaddr);
  406. nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
  407. NILFS_SUI(sufile)->ncleansegs -= clean;
  408. mark_buffer_dirty(su_bh);
  409. nilfs_mdt_mark_dirty(sufile);
  410. }
  411. void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
  412. struct buffer_head *header_bh,
  413. struct buffer_head *su_bh)
  414. {
  415. struct nilfs_segment_usage *su;
  416. void *kaddr;
  417. int sudirty;
  418. kaddr = kmap_atomic(su_bh->b_page);
  419. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  420. if (nilfs_segment_usage_clean(su)) {
  421. nilfs_msg(sufile->i_sb, KERN_WARNING,
  422. "%s: segment %llu is already clean",
  423. __func__, (unsigned long long)segnum);
  424. kunmap_atomic(kaddr);
  425. return;
  426. }
  427. WARN_ON(nilfs_segment_usage_error(su));
  428. WARN_ON(!nilfs_segment_usage_dirty(su));
  429. sudirty = nilfs_segment_usage_dirty(su);
  430. nilfs_segment_usage_set_clean(su);
  431. kunmap_atomic(kaddr);
  432. mark_buffer_dirty(su_bh);
  433. nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
  434. NILFS_SUI(sufile)->ncleansegs++;
  435. nilfs_mdt_mark_dirty(sufile);
  436. trace_nilfs2_segment_usage_freed(sufile, segnum);
  437. }
  438. /**
  439. * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty
  440. * @sufile: inode of segment usage file
  441. * @segnum: segment number
  442. */
  443. int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
  444. {
  445. struct buffer_head *bh;
  446. int ret;
  447. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
  448. if (!ret) {
  449. mark_buffer_dirty(bh);
  450. nilfs_mdt_mark_dirty(sufile);
  451. brelse(bh);
  452. }
  453. return ret;
  454. }
  455. /**
  456. * nilfs_sufile_set_segment_usage - set usage of a segment
  457. * @sufile: inode of segment usage file
  458. * @segnum: segment number
  459. * @nblocks: number of live blocks in the segment
  460. * @modtime: modification time (option)
  461. */
  462. int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
  463. unsigned long nblocks, time_t modtime)
  464. {
  465. struct buffer_head *bh;
  466. struct nilfs_segment_usage *su;
  467. void *kaddr;
  468. int ret;
  469. down_write(&NILFS_MDT(sufile)->mi_sem);
  470. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
  471. if (ret < 0)
  472. goto out_sem;
  473. kaddr = kmap_atomic(bh->b_page);
  474. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
  475. WARN_ON(nilfs_segment_usage_error(su));
  476. if (modtime)
  477. su->su_lastmod = cpu_to_le64(modtime);
  478. su->su_nblocks = cpu_to_le32(nblocks);
  479. kunmap_atomic(kaddr);
  480. mark_buffer_dirty(bh);
  481. nilfs_mdt_mark_dirty(sufile);
  482. brelse(bh);
  483. out_sem:
  484. up_write(&NILFS_MDT(sufile)->mi_sem);
  485. return ret;
  486. }
  487. /**
  488. * nilfs_sufile_get_stat - get segment usage statistics
  489. * @sufile: inode of segment usage file
  490. * @stat: pointer to a structure of segment usage statistics
  491. *
  492. * Description: nilfs_sufile_get_stat() returns information about segment
  493. * usage.
  494. *
  495. * Return Value: On success, 0 is returned, and segment usage information is
  496. * stored in the place pointed by @stat. On error, one of the following
  497. * negative error codes is returned.
  498. *
  499. * %-EIO - I/O error.
  500. *
  501. * %-ENOMEM - Insufficient amount of memory available.
  502. */
  503. int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
  504. {
  505. struct buffer_head *header_bh;
  506. struct nilfs_sufile_header *header;
  507. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  508. void *kaddr;
  509. int ret;
  510. down_read(&NILFS_MDT(sufile)->mi_sem);
  511. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  512. if (ret < 0)
  513. goto out_sem;
  514. kaddr = kmap_atomic(header_bh->b_page);
  515. header = kaddr + bh_offset(header_bh);
  516. sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
  517. sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
  518. sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
  519. sustat->ss_ctime = nilfs->ns_ctime;
  520. sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
  521. spin_lock(&nilfs->ns_last_segment_lock);
  522. sustat->ss_prot_seq = nilfs->ns_prot_seq;
  523. spin_unlock(&nilfs->ns_last_segment_lock);
  524. kunmap_atomic(kaddr);
  525. brelse(header_bh);
  526. out_sem:
  527. up_read(&NILFS_MDT(sufile)->mi_sem);
  528. return ret;
  529. }
  530. void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
  531. struct buffer_head *header_bh,
  532. struct buffer_head *su_bh)
  533. {
  534. struct nilfs_segment_usage *su;
  535. void *kaddr;
  536. int suclean;
  537. kaddr = kmap_atomic(su_bh->b_page);
  538. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  539. if (nilfs_segment_usage_error(su)) {
  540. kunmap_atomic(kaddr);
  541. return;
  542. }
  543. suclean = nilfs_segment_usage_clean(su);
  544. nilfs_segment_usage_set_error(su);
  545. kunmap_atomic(kaddr);
  546. if (suclean) {
  547. nilfs_sufile_mod_counter(header_bh, -1, 0);
  548. NILFS_SUI(sufile)->ncleansegs--;
  549. }
  550. mark_buffer_dirty(su_bh);
  551. nilfs_mdt_mark_dirty(sufile);
  552. }
  553. /**
  554. * nilfs_sufile_truncate_range - truncate range of segment array
  555. * @sufile: inode of segment usage file
  556. * @start: start segment number (inclusive)
  557. * @end: end segment number (inclusive)
  558. *
  559. * Return Value: On success, 0 is returned. On error, one of the
  560. * following negative error codes is returned.
  561. *
  562. * %-EIO - I/O error.
  563. *
  564. * %-ENOMEM - Insufficient amount of memory available.
  565. *
  566. * %-EINVAL - Invalid number of segments specified
  567. *
  568. * %-EBUSY - Dirty or active segments are present in the range
  569. */
  570. static int nilfs_sufile_truncate_range(struct inode *sufile,
  571. __u64 start, __u64 end)
  572. {
  573. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  574. struct buffer_head *header_bh;
  575. struct buffer_head *su_bh;
  576. struct nilfs_segment_usage *su, *su2;
  577. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  578. unsigned long segusages_per_block;
  579. unsigned long nsegs, ncleaned;
  580. __u64 segnum;
  581. void *kaddr;
  582. ssize_t n, nc;
  583. int ret;
  584. int j;
  585. nsegs = nilfs_sufile_get_nsegments(sufile);
  586. ret = -EINVAL;
  587. if (start > end || start >= nsegs)
  588. goto out;
  589. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  590. if (ret < 0)
  591. goto out;
  592. segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
  593. ncleaned = 0;
  594. for (segnum = start; segnum <= end; segnum += n) {
  595. n = min_t(unsigned long,
  596. segusages_per_block -
  597. nilfs_sufile_get_offset(sufile, segnum),
  598. end - segnum + 1);
  599. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
  600. &su_bh);
  601. if (ret < 0) {
  602. if (ret != -ENOENT)
  603. goto out_header;
  604. /* hole */
  605. continue;
  606. }
  607. kaddr = kmap_atomic(su_bh->b_page);
  608. su = nilfs_sufile_block_get_segment_usage(
  609. sufile, segnum, su_bh, kaddr);
  610. su2 = su;
  611. for (j = 0; j < n; j++, su = (void *)su + susz) {
  612. if ((le32_to_cpu(su->su_flags) &
  613. ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) ||
  614. nilfs_segment_is_active(nilfs, segnum + j)) {
  615. ret = -EBUSY;
  616. kunmap_atomic(kaddr);
  617. brelse(su_bh);
  618. goto out_header;
  619. }
  620. }
  621. nc = 0;
  622. for (su = su2, j = 0; j < n; j++, su = (void *)su + susz) {
  623. if (nilfs_segment_usage_error(su)) {
  624. nilfs_segment_usage_set_clean(su);
  625. nc++;
  626. }
  627. }
  628. kunmap_atomic(kaddr);
  629. if (nc > 0) {
  630. mark_buffer_dirty(su_bh);
  631. ncleaned += nc;
  632. }
  633. brelse(su_bh);
  634. if (n == segusages_per_block) {
  635. /* make hole */
  636. nilfs_sufile_delete_segment_usage_block(sufile, segnum);
  637. }
  638. }
  639. ret = 0;
  640. out_header:
  641. if (ncleaned > 0) {
  642. NILFS_SUI(sufile)->ncleansegs += ncleaned;
  643. nilfs_sufile_mod_counter(header_bh, ncleaned, 0);
  644. nilfs_mdt_mark_dirty(sufile);
  645. }
  646. brelse(header_bh);
  647. out:
  648. return ret;
  649. }
  650. /**
  651. * nilfs_sufile_resize - resize segment array
  652. * @sufile: inode of segment usage file
  653. * @newnsegs: new number of segments
  654. *
  655. * Return Value: On success, 0 is returned. On error, one of the
  656. * following negative error codes is returned.
  657. *
  658. * %-EIO - I/O error.
  659. *
  660. * %-ENOMEM - Insufficient amount of memory available.
  661. *
  662. * %-ENOSPC - Enough free space is not left for shrinking
  663. *
  664. * %-EBUSY - Dirty or active segments exist in the region to be truncated
  665. */
  666. int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
  667. {
  668. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  669. struct buffer_head *header_bh;
  670. struct nilfs_sufile_header *header;
  671. struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
  672. void *kaddr;
  673. unsigned long nsegs, nrsvsegs;
  674. int ret = 0;
  675. down_write(&NILFS_MDT(sufile)->mi_sem);
  676. nsegs = nilfs_sufile_get_nsegments(sufile);
  677. if (nsegs == newnsegs)
  678. goto out;
  679. ret = -ENOSPC;
  680. nrsvsegs = nilfs_nrsvsegs(nilfs, newnsegs);
  681. if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->ncleansegs)
  682. goto out;
  683. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  684. if (ret < 0)
  685. goto out;
  686. if (newnsegs > nsegs) {
  687. sui->ncleansegs += newnsegs - nsegs;
  688. } else /* newnsegs < nsegs */ {
  689. ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1);
  690. if (ret < 0)
  691. goto out_header;
  692. sui->ncleansegs -= nsegs - newnsegs;
  693. }
  694. kaddr = kmap_atomic(header_bh->b_page);
  695. header = kaddr + bh_offset(header_bh);
  696. header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
  697. kunmap_atomic(kaddr);
  698. mark_buffer_dirty(header_bh);
  699. nilfs_mdt_mark_dirty(sufile);
  700. nilfs_set_nsegments(nilfs, newnsegs);
  701. out_header:
  702. brelse(header_bh);
  703. out:
  704. up_write(&NILFS_MDT(sufile)->mi_sem);
  705. return ret;
  706. }
  707. /**
  708. * nilfs_sufile_get_suinfo -
  709. * @sufile: inode of segment usage file
  710. * @segnum: segment number to start looking
  711. * @buf: array of suinfo
  712. * @sisz: byte size of suinfo
  713. * @nsi: size of suinfo array
  714. *
  715. * Description:
  716. *
  717. * Return Value: On success, 0 is returned and .... On error, one of the
  718. * following negative error codes is returned.
  719. *
  720. * %-EIO - I/O error.
  721. *
  722. * %-ENOMEM - Insufficient amount of memory available.
  723. */
  724. ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
  725. unsigned int sisz, size_t nsi)
  726. {
  727. struct buffer_head *su_bh;
  728. struct nilfs_segment_usage *su;
  729. struct nilfs_suinfo *si = buf;
  730. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  731. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  732. void *kaddr;
  733. unsigned long nsegs, segusages_per_block;
  734. ssize_t n;
  735. int ret, i, j;
  736. down_read(&NILFS_MDT(sufile)->mi_sem);
  737. segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
  738. nsegs = min_t(unsigned long,
  739. nilfs_sufile_get_nsegments(sufile) - segnum,
  740. nsi);
  741. for (i = 0; i < nsegs; i += n, segnum += n) {
  742. n = min_t(unsigned long,
  743. segusages_per_block -
  744. nilfs_sufile_get_offset(sufile, segnum),
  745. nsegs - i);
  746. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
  747. &su_bh);
  748. if (ret < 0) {
  749. if (ret != -ENOENT)
  750. goto out;
  751. /* hole */
  752. memset(si, 0, sisz * n);
  753. si = (void *)si + sisz * n;
  754. continue;
  755. }
  756. kaddr = kmap_atomic(su_bh->b_page);
  757. su = nilfs_sufile_block_get_segment_usage(
  758. sufile, segnum, su_bh, kaddr);
  759. for (j = 0; j < n;
  760. j++, su = (void *)su + susz, si = (void *)si + sisz) {
  761. si->sui_lastmod = le64_to_cpu(su->su_lastmod);
  762. si->sui_nblocks = le32_to_cpu(su->su_nblocks);
  763. si->sui_flags = le32_to_cpu(su->su_flags) &
  764. ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
  765. if (nilfs_segment_is_active(nilfs, segnum + j))
  766. si->sui_flags |=
  767. (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
  768. }
  769. kunmap_atomic(kaddr);
  770. brelse(su_bh);
  771. }
  772. ret = nsegs;
  773. out:
  774. up_read(&NILFS_MDT(sufile)->mi_sem);
  775. return ret;
  776. }
  777. /**
  778. * nilfs_sufile_set_suinfo - sets segment usage info
  779. * @sufile: inode of segment usage file
  780. * @buf: array of suinfo_update
  781. * @supsz: byte size of suinfo_update
  782. * @nsup: size of suinfo_update array
  783. *
  784. * Description: Takes an array of nilfs_suinfo_update structs and updates
  785. * segment usage accordingly. Only the fields indicated by the sup_flags
  786. * are updated.
  787. *
  788. * Return Value: On success, 0 is returned. On error, one of the
  789. * following negative error codes is returned.
  790. *
  791. * %-EIO - I/O error.
  792. *
  793. * %-ENOMEM - Insufficient amount of memory available.
  794. *
  795. * %-EINVAL - Invalid values in input (segment number, flags or nblocks)
  796. */
  797. ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
  798. unsigned int supsz, size_t nsup)
  799. {
  800. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  801. struct buffer_head *header_bh, *bh;
  802. struct nilfs_suinfo_update *sup, *supend = buf + supsz * nsup;
  803. struct nilfs_segment_usage *su;
  804. void *kaddr;
  805. unsigned long blkoff, prev_blkoff;
  806. int cleansi, cleansu, dirtysi, dirtysu;
  807. long ncleaned = 0, ndirtied = 0;
  808. int ret = 0;
  809. if (unlikely(nsup == 0))
  810. return ret;
  811. for (sup = buf; sup < supend; sup = (void *)sup + supsz) {
  812. if (sup->sup_segnum >= nilfs->ns_nsegments
  813. || (sup->sup_flags &
  814. (~0UL << __NR_NILFS_SUINFO_UPDATE_FIELDS))
  815. || (nilfs_suinfo_update_nblocks(sup) &&
  816. sup->sup_sui.sui_nblocks >
  817. nilfs->ns_blocks_per_segment))
  818. return -EINVAL;
  819. }
  820. down_write(&NILFS_MDT(sufile)->mi_sem);
  821. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  822. if (ret < 0)
  823. goto out_sem;
  824. sup = buf;
  825. blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum);
  826. ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh);
  827. if (ret < 0)
  828. goto out_header;
  829. for (;;) {
  830. kaddr = kmap_atomic(bh->b_page);
  831. su = nilfs_sufile_block_get_segment_usage(
  832. sufile, sup->sup_segnum, bh, kaddr);
  833. if (nilfs_suinfo_update_lastmod(sup))
  834. su->su_lastmod = cpu_to_le64(sup->sup_sui.sui_lastmod);
  835. if (nilfs_suinfo_update_nblocks(sup))
  836. su->su_nblocks = cpu_to_le32(sup->sup_sui.sui_nblocks);
  837. if (nilfs_suinfo_update_flags(sup)) {
  838. /*
  839. * Active flag is a virtual flag projected by running
  840. * nilfs kernel code - drop it not to write it to
  841. * disk.
  842. */
  843. sup->sup_sui.sui_flags &=
  844. ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
  845. cleansi = nilfs_suinfo_clean(&sup->sup_sui);
  846. cleansu = nilfs_segment_usage_clean(su);
  847. dirtysi = nilfs_suinfo_dirty(&sup->sup_sui);
  848. dirtysu = nilfs_segment_usage_dirty(su);
  849. if (cleansi && !cleansu)
  850. ++ncleaned;
  851. else if (!cleansi && cleansu)
  852. --ncleaned;
  853. if (dirtysi && !dirtysu)
  854. ++ndirtied;
  855. else if (!dirtysi && dirtysu)
  856. --ndirtied;
  857. su->su_flags = cpu_to_le32(sup->sup_sui.sui_flags);
  858. }
  859. kunmap_atomic(kaddr);
  860. sup = (void *)sup + supsz;
  861. if (sup >= supend)
  862. break;
  863. prev_blkoff = blkoff;
  864. blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum);
  865. if (blkoff == prev_blkoff)
  866. continue;
  867. /* get different block */
  868. mark_buffer_dirty(bh);
  869. put_bh(bh);
  870. ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh);
  871. if (unlikely(ret < 0))
  872. goto out_mark;
  873. }
  874. mark_buffer_dirty(bh);
  875. put_bh(bh);
  876. out_mark:
  877. if (ncleaned || ndirtied) {
  878. nilfs_sufile_mod_counter(header_bh, (u64)ncleaned,
  879. (u64)ndirtied);
  880. NILFS_SUI(sufile)->ncleansegs += ncleaned;
  881. }
  882. nilfs_mdt_mark_dirty(sufile);
  883. out_header:
  884. put_bh(header_bh);
  885. out_sem:
  886. up_write(&NILFS_MDT(sufile)->mi_sem);
  887. return ret;
  888. }
  889. /**
  890. * nilfs_sufile_trim_fs() - trim ioctl handle function
  891. * @sufile: inode of segment usage file
  892. * @range: fstrim_range structure
  893. *
  894. * start: First Byte to trim
  895. * len: number of Bytes to trim from start
  896. * minlen: minimum extent length in Bytes
  897. *
  898. * Decription: nilfs_sufile_trim_fs goes through all segments containing bytes
  899. * from start to start+len. start is rounded up to the next block boundary
  900. * and start+len is rounded down. For each clean segment blkdev_issue_discard
  901. * function is invoked.
  902. *
  903. * Return Value: On success, 0 is returned or negative error code, otherwise.
  904. */
  905. int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
  906. {
  907. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  908. struct buffer_head *su_bh;
  909. struct nilfs_segment_usage *su;
  910. void *kaddr;
  911. size_t n, i, susz = NILFS_MDT(sufile)->mi_entry_size;
  912. sector_t seg_start, seg_end, start_block, end_block;
  913. sector_t start = 0, nblocks = 0;
  914. u64 segnum, segnum_end, minlen, len, max_blocks, ndiscarded = 0;
  915. int ret = 0;
  916. unsigned int sects_per_block;
  917. sects_per_block = (1 << nilfs->ns_blocksize_bits) /
  918. bdev_logical_block_size(nilfs->ns_bdev);
  919. len = range->len >> nilfs->ns_blocksize_bits;
  920. minlen = range->minlen >> nilfs->ns_blocksize_bits;
  921. max_blocks = ((u64)nilfs->ns_nsegments * nilfs->ns_blocks_per_segment);
  922. if (!len || range->start >= max_blocks << nilfs->ns_blocksize_bits)
  923. return -EINVAL;
  924. start_block = (range->start + nilfs->ns_blocksize - 1) >>
  925. nilfs->ns_blocksize_bits;
  926. /*
  927. * range->len can be very large (actually, it is set to
  928. * ULLONG_MAX by default) - truncate upper end of the range
  929. * carefully so as not to overflow.
  930. */
  931. if (max_blocks - start_block < len)
  932. end_block = max_blocks - 1;
  933. else
  934. end_block = start_block + len - 1;
  935. segnum = nilfs_get_segnum_of_block(nilfs, start_block);
  936. segnum_end = nilfs_get_segnum_of_block(nilfs, end_block);
  937. down_read(&NILFS_MDT(sufile)->mi_sem);
  938. while (segnum <= segnum_end) {
  939. n = nilfs_sufile_segment_usages_in_block(sufile, segnum,
  940. segnum_end);
  941. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
  942. &su_bh);
  943. if (ret < 0) {
  944. if (ret != -ENOENT)
  945. goto out_sem;
  946. /* hole */
  947. segnum += n;
  948. continue;
  949. }
  950. kaddr = kmap_atomic(su_bh->b_page);
  951. su = nilfs_sufile_block_get_segment_usage(sufile, segnum,
  952. su_bh, kaddr);
  953. for (i = 0; i < n; ++i, ++segnum, su = (void *)su + susz) {
  954. if (!nilfs_segment_usage_clean(su))
  955. continue;
  956. nilfs_get_segment_range(nilfs, segnum, &seg_start,
  957. &seg_end);
  958. if (!nblocks) {
  959. /* start new extent */
  960. start = seg_start;
  961. nblocks = seg_end - seg_start + 1;
  962. continue;
  963. }
  964. if (start + nblocks == seg_start) {
  965. /* add to previous extent */
  966. nblocks += seg_end - seg_start + 1;
  967. continue;
  968. }
  969. /* discard previous extent */
  970. if (start < start_block) {
  971. nblocks -= start_block - start;
  972. start = start_block;
  973. }
  974. if (nblocks >= minlen) {
  975. kunmap_atomic(kaddr);
  976. ret = blkdev_issue_discard(nilfs->ns_bdev,
  977. start * sects_per_block,
  978. nblocks * sects_per_block,
  979. GFP_NOFS, 0);
  980. if (ret < 0) {
  981. put_bh(su_bh);
  982. goto out_sem;
  983. }
  984. ndiscarded += nblocks;
  985. kaddr = kmap_atomic(su_bh->b_page);
  986. su = nilfs_sufile_block_get_segment_usage(
  987. sufile, segnum, su_bh, kaddr);
  988. }
  989. /* start new extent */
  990. start = seg_start;
  991. nblocks = seg_end - seg_start + 1;
  992. }
  993. kunmap_atomic(kaddr);
  994. put_bh(su_bh);
  995. }
  996. if (nblocks) {
  997. /* discard last extent */
  998. if (start < start_block) {
  999. nblocks -= start_block - start;
  1000. start = start_block;
  1001. }
  1002. if (start + nblocks > end_block + 1)
  1003. nblocks = end_block - start + 1;
  1004. if (nblocks >= minlen) {
  1005. ret = blkdev_issue_discard(nilfs->ns_bdev,
  1006. start * sects_per_block,
  1007. nblocks * sects_per_block,
  1008. GFP_NOFS, 0);
  1009. if (!ret)
  1010. ndiscarded += nblocks;
  1011. }
  1012. }
  1013. out_sem:
  1014. up_read(&NILFS_MDT(sufile)->mi_sem);
  1015. range->len = ndiscarded << nilfs->ns_blocksize_bits;
  1016. return ret;
  1017. }
  1018. /**
  1019. * nilfs_sufile_read - read or get sufile inode
  1020. * @sb: super block instance
  1021. * @susize: size of a segment usage entry
  1022. * @raw_inode: on-disk sufile inode
  1023. * @inodep: buffer to store the inode
  1024. */
  1025. int nilfs_sufile_read(struct super_block *sb, size_t susize,
  1026. struct nilfs_inode *raw_inode, struct inode **inodep)
  1027. {
  1028. struct inode *sufile;
  1029. struct nilfs_sufile_info *sui;
  1030. struct buffer_head *header_bh;
  1031. struct nilfs_sufile_header *header;
  1032. void *kaddr;
  1033. int err;
  1034. if (susize > sb->s_blocksize) {
  1035. nilfs_msg(sb, KERN_ERR,
  1036. "too large segment usage size: %zu bytes", susize);
  1037. return -EINVAL;
  1038. } else if (susize < NILFS_MIN_SEGMENT_USAGE_SIZE) {
  1039. nilfs_msg(sb, KERN_ERR,
  1040. "too small segment usage size: %zu bytes", susize);
  1041. return -EINVAL;
  1042. }
  1043. sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO);
  1044. if (unlikely(!sufile))
  1045. return -ENOMEM;
  1046. if (!(sufile->i_state & I_NEW))
  1047. goto out;
  1048. err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui));
  1049. if (err)
  1050. goto failed;
  1051. nilfs_mdt_set_entry_size(sufile, susize,
  1052. sizeof(struct nilfs_sufile_header));
  1053. err = nilfs_read_inode_common(sufile, raw_inode);
  1054. if (err)
  1055. goto failed;
  1056. err = nilfs_sufile_get_header_block(sufile, &header_bh);
  1057. if (err)
  1058. goto failed;
  1059. sui = NILFS_SUI(sufile);
  1060. kaddr = kmap_atomic(header_bh->b_page);
  1061. header = kaddr + bh_offset(header_bh);
  1062. sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
  1063. kunmap_atomic(kaddr);
  1064. brelse(header_bh);
  1065. sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
  1066. sui->allocmin = 0;
  1067. unlock_new_inode(sufile);
  1068. out:
  1069. *inodep = sufile;
  1070. return 0;
  1071. failed:
  1072. iget_failed(sufile);
  1073. return err;
  1074. }