sufile.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227
  1. /*
  2. * sufile.c - NILFS segment usage file.
  3. *
  4. * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * Written by Koji Sato.
  17. * Revised by Ryusuke Konishi.
  18. */
  19. #include <linux/kernel.h>
  20. #include <linux/fs.h>
  21. #include <linux/string.h>
  22. #include <linux/buffer_head.h>
  23. #include <linux/errno.h>
  24. #include <linux/nilfs2_fs.h>
  25. #include "mdt.h"
  26. #include "sufile.h"
  27. #include <trace/events/nilfs2.h>
  28. /**
  29. * struct nilfs_sufile_info - on-memory private data of sufile
  30. * @mi: on-memory private data of metadata file
  31. * @ncleansegs: number of clean segments
  32. * @allocmin: lower limit of allocatable segment range
  33. * @allocmax: upper limit of allocatable segment range
  34. */
  35. struct nilfs_sufile_info {
  36. struct nilfs_mdt_info mi;
  37. unsigned long ncleansegs;/* number of clean segments */
  38. __u64 allocmin; /* lower limit of allocatable segment range */
  39. __u64 allocmax; /* upper limit of allocatable segment range */
  40. };
  41. static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
  42. {
  43. return (struct nilfs_sufile_info *)NILFS_MDT(sufile);
  44. }
  45. static inline unsigned long
  46. nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
  47. {
  48. return NILFS_MDT(sufile)->mi_entries_per_block;
  49. }
  50. static unsigned long
  51. nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
  52. {
  53. __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
  54. do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
  55. return (unsigned long)t;
  56. }
  57. static unsigned long
  58. nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum)
  59. {
  60. __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
  61. return do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
  62. }
  63. static unsigned long
  64. nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
  65. __u64 max)
  66. {
  67. return min_t(unsigned long,
  68. nilfs_sufile_segment_usages_per_block(sufile) -
  69. nilfs_sufile_get_offset(sufile, curr),
  70. max - curr + 1);
  71. }
  72. static struct nilfs_segment_usage *
  73. nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
  74. struct buffer_head *bh, void *kaddr)
  75. {
  76. return kaddr + bh_offset(bh) +
  77. nilfs_sufile_get_offset(sufile, segnum) *
  78. NILFS_MDT(sufile)->mi_entry_size;
  79. }
  80. static inline int nilfs_sufile_get_header_block(struct inode *sufile,
  81. struct buffer_head **bhp)
  82. {
  83. return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp);
  84. }
  85. static inline int
  86. nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum,
  87. int create, struct buffer_head **bhp)
  88. {
  89. return nilfs_mdt_get_block(sufile,
  90. nilfs_sufile_get_blkoff(sufile, segnum),
  91. create, NULL, bhp);
  92. }
  93. static int nilfs_sufile_delete_segment_usage_block(struct inode *sufile,
  94. __u64 segnum)
  95. {
  96. return nilfs_mdt_delete_block(sufile,
  97. nilfs_sufile_get_blkoff(sufile, segnum));
  98. }
  99. static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
  100. u64 ncleanadd, u64 ndirtyadd)
  101. {
  102. struct nilfs_sufile_header *header;
  103. void *kaddr;
  104. kaddr = kmap_atomic(header_bh->b_page);
  105. header = kaddr + bh_offset(header_bh);
  106. le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
  107. le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
  108. kunmap_atomic(kaddr);
  109. mark_buffer_dirty(header_bh);
  110. }
  111. /**
  112. * nilfs_sufile_get_ncleansegs - return the number of clean segments
  113. * @sufile: inode of segment usage file
  114. */
  115. unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
  116. {
  117. return NILFS_SUI(sufile)->ncleansegs;
  118. }
  119. /**
  120. * nilfs_sufile_updatev - modify multiple segment usages at a time
  121. * @sufile: inode of segment usage file
  122. * @segnumv: array of segment numbers
  123. * @nsegs: size of @segnumv array
  124. * @create: creation flag
  125. * @ndone: place to store number of modified segments on @segnumv
  126. * @dofunc: primitive operation for the update
  127. *
  128. * Description: nilfs_sufile_updatev() repeatedly calls @dofunc
  129. * against the given array of segments. The @dofunc is called with
  130. * buffers of a header block and the sufile block in which the target
  131. * segment usage entry is contained. If @ndone is given, the number
  132. * of successfully modified segments from the head is stored in the
  133. * place @ndone points to.
  134. *
  135. * Return Value: On success, zero is returned. On error, one of the
  136. * following negative error codes is returned.
  137. *
  138. * %-EIO - I/O error.
  139. *
  140. * %-ENOMEM - Insufficient amount of memory available.
  141. *
  142. * %-ENOENT - Given segment usage is in hole block (may be returned if
  143. * @create is zero)
  144. *
  145. * %-EINVAL - Invalid segment usage number
  146. */
  147. int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
  148. int create, size_t *ndone,
  149. void (*dofunc)(struct inode *, __u64,
  150. struct buffer_head *,
  151. struct buffer_head *))
  152. {
  153. struct buffer_head *header_bh, *bh;
  154. unsigned long blkoff, prev_blkoff;
  155. __u64 *seg;
  156. size_t nerr = 0, n = 0;
  157. int ret = 0;
  158. if (unlikely(nsegs == 0))
  159. goto out;
  160. down_write(&NILFS_MDT(sufile)->mi_sem);
  161. for (seg = segnumv; seg < segnumv + nsegs; seg++) {
  162. if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
  163. printk(KERN_WARNING
  164. "%s: invalid segment number: %llu\n", __func__,
  165. (unsigned long long)*seg);
  166. nerr++;
  167. }
  168. }
  169. if (nerr > 0) {
  170. ret = -EINVAL;
  171. goto out_sem;
  172. }
  173. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  174. if (ret < 0)
  175. goto out_sem;
  176. seg = segnumv;
  177. blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
  178. ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
  179. if (ret < 0)
  180. goto out_header;
  181. for (;;) {
  182. dofunc(sufile, *seg, header_bh, bh);
  183. if (++seg >= segnumv + nsegs)
  184. break;
  185. prev_blkoff = blkoff;
  186. blkoff = nilfs_sufile_get_blkoff(sufile, *seg);
  187. if (blkoff == prev_blkoff)
  188. continue;
  189. /* get different block */
  190. brelse(bh);
  191. ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh);
  192. if (unlikely(ret < 0))
  193. goto out_header;
  194. }
  195. brelse(bh);
  196. out_header:
  197. n = seg - segnumv;
  198. brelse(header_bh);
  199. out_sem:
  200. up_write(&NILFS_MDT(sufile)->mi_sem);
  201. out:
  202. if (ndone)
  203. *ndone = n;
  204. return ret;
  205. }
  206. int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
  207. void (*dofunc)(struct inode *, __u64,
  208. struct buffer_head *,
  209. struct buffer_head *))
  210. {
  211. struct buffer_head *header_bh, *bh;
  212. int ret;
  213. if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
  214. printk(KERN_WARNING "%s: invalid segment number: %llu\n",
  215. __func__, (unsigned long long)segnum);
  216. return -EINVAL;
  217. }
  218. down_write(&NILFS_MDT(sufile)->mi_sem);
  219. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  220. if (ret < 0)
  221. goto out_sem;
  222. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh);
  223. if (!ret) {
  224. dofunc(sufile, segnum, header_bh, bh);
  225. brelse(bh);
  226. }
  227. brelse(header_bh);
  228. out_sem:
  229. up_write(&NILFS_MDT(sufile)->mi_sem);
  230. return ret;
  231. }
  232. /**
  233. * nilfs_sufile_set_alloc_range - limit range of segment to be allocated
  234. * @sufile: inode of segment usage file
  235. * @start: minimum segment number of allocatable region (inclusive)
  236. * @end: maximum segment number of allocatable region (inclusive)
  237. *
  238. * Return Value: On success, 0 is returned. On error, one of the
  239. * following negative error codes is returned.
  240. *
  241. * %-ERANGE - invalid segment region
  242. */
  243. int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
  244. {
  245. struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
  246. __u64 nsegs;
  247. int ret = -ERANGE;
  248. down_write(&NILFS_MDT(sufile)->mi_sem);
  249. nsegs = nilfs_sufile_get_nsegments(sufile);
  250. if (start <= end && end < nsegs) {
  251. sui->allocmin = start;
  252. sui->allocmax = end;
  253. ret = 0;
  254. }
  255. up_write(&NILFS_MDT(sufile)->mi_sem);
  256. return ret;
  257. }
  258. /**
  259. * nilfs_sufile_alloc - allocate a segment
  260. * @sufile: inode of segment usage file
  261. * @segnump: pointer to segment number
  262. *
  263. * Description: nilfs_sufile_alloc() allocates a clean segment.
  264. *
  265. * Return Value: On success, 0 is returned and the segment number of the
  266. * allocated segment is stored in the place pointed by @segnump. On error, one
  267. * of the following negative error codes is returned.
  268. *
  269. * %-EIO - I/O error.
  270. *
  271. * %-ENOMEM - Insufficient amount of memory available.
  272. *
  273. * %-ENOSPC - No clean segment left.
  274. */
  275. int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
  276. {
  277. struct buffer_head *header_bh, *su_bh;
  278. struct nilfs_sufile_header *header;
  279. struct nilfs_segment_usage *su;
  280. struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
  281. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  282. __u64 segnum, maxsegnum, last_alloc;
  283. void *kaddr;
  284. unsigned long nsegments, nsus, cnt;
  285. int ret, j;
  286. down_write(&NILFS_MDT(sufile)->mi_sem);
  287. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  288. if (ret < 0)
  289. goto out_sem;
  290. kaddr = kmap_atomic(header_bh->b_page);
  291. header = kaddr + bh_offset(header_bh);
  292. last_alloc = le64_to_cpu(header->sh_last_alloc);
  293. kunmap_atomic(kaddr);
  294. nsegments = nilfs_sufile_get_nsegments(sufile);
  295. maxsegnum = sui->allocmax;
  296. segnum = last_alloc + 1;
  297. if (segnum < sui->allocmin || segnum > sui->allocmax)
  298. segnum = sui->allocmin;
  299. for (cnt = 0; cnt < nsegments; cnt += nsus) {
  300. if (segnum > maxsegnum) {
  301. if (cnt < sui->allocmax - sui->allocmin + 1) {
  302. /*
  303. * wrap around in the limited region.
  304. * if allocation started from
  305. * sui->allocmin, this never happens.
  306. */
  307. segnum = sui->allocmin;
  308. maxsegnum = last_alloc;
  309. } else if (segnum > sui->allocmin &&
  310. sui->allocmax + 1 < nsegments) {
  311. segnum = sui->allocmax + 1;
  312. maxsegnum = nsegments - 1;
  313. } else if (sui->allocmin > 0) {
  314. segnum = 0;
  315. maxsegnum = sui->allocmin - 1;
  316. } else {
  317. break; /* never happens */
  318. }
  319. }
  320. trace_nilfs2_segment_usage_check(sufile, segnum, cnt);
  321. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1,
  322. &su_bh);
  323. if (ret < 0)
  324. goto out_header;
  325. kaddr = kmap_atomic(su_bh->b_page);
  326. su = nilfs_sufile_block_get_segment_usage(
  327. sufile, segnum, su_bh, kaddr);
  328. nsus = nilfs_sufile_segment_usages_in_block(
  329. sufile, segnum, maxsegnum);
  330. for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) {
  331. if (!nilfs_segment_usage_clean(su))
  332. continue;
  333. /* found a clean segment */
  334. nilfs_segment_usage_set_dirty(su);
  335. kunmap_atomic(kaddr);
  336. kaddr = kmap_atomic(header_bh->b_page);
  337. header = kaddr + bh_offset(header_bh);
  338. le64_add_cpu(&header->sh_ncleansegs, -1);
  339. le64_add_cpu(&header->sh_ndirtysegs, 1);
  340. header->sh_last_alloc = cpu_to_le64(segnum);
  341. kunmap_atomic(kaddr);
  342. sui->ncleansegs--;
  343. mark_buffer_dirty(header_bh);
  344. mark_buffer_dirty(su_bh);
  345. nilfs_mdt_mark_dirty(sufile);
  346. brelse(su_bh);
  347. *segnump = segnum;
  348. trace_nilfs2_segment_usage_allocated(sufile, segnum);
  349. goto out_header;
  350. }
  351. kunmap_atomic(kaddr);
  352. brelse(su_bh);
  353. }
  354. /* no segments left */
  355. ret = -ENOSPC;
  356. out_header:
  357. brelse(header_bh);
  358. out_sem:
  359. up_write(&NILFS_MDT(sufile)->mi_sem);
  360. return ret;
  361. }
  362. void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
  363. struct buffer_head *header_bh,
  364. struct buffer_head *su_bh)
  365. {
  366. struct nilfs_segment_usage *su;
  367. void *kaddr;
  368. kaddr = kmap_atomic(su_bh->b_page);
  369. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  370. if (unlikely(!nilfs_segment_usage_clean(su))) {
  371. printk(KERN_WARNING "%s: segment %llu must be clean\n",
  372. __func__, (unsigned long long)segnum);
  373. kunmap_atomic(kaddr);
  374. return;
  375. }
  376. nilfs_segment_usage_set_dirty(su);
  377. kunmap_atomic(kaddr);
  378. nilfs_sufile_mod_counter(header_bh, -1, 1);
  379. NILFS_SUI(sufile)->ncleansegs--;
  380. mark_buffer_dirty(su_bh);
  381. nilfs_mdt_mark_dirty(sufile);
  382. }
  383. void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
  384. struct buffer_head *header_bh,
  385. struct buffer_head *su_bh)
  386. {
  387. struct nilfs_segment_usage *su;
  388. void *kaddr;
  389. int clean, dirty;
  390. kaddr = kmap_atomic(su_bh->b_page);
  391. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  392. if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
  393. su->su_nblocks == cpu_to_le32(0)) {
  394. kunmap_atomic(kaddr);
  395. return;
  396. }
  397. clean = nilfs_segment_usage_clean(su);
  398. dirty = nilfs_segment_usage_dirty(su);
  399. /* make the segment garbage */
  400. su->su_lastmod = cpu_to_le64(0);
  401. su->su_nblocks = cpu_to_le32(0);
  402. su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
  403. kunmap_atomic(kaddr);
  404. nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
  405. NILFS_SUI(sufile)->ncleansegs -= clean;
  406. mark_buffer_dirty(su_bh);
  407. nilfs_mdt_mark_dirty(sufile);
  408. }
  409. void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
  410. struct buffer_head *header_bh,
  411. struct buffer_head *su_bh)
  412. {
  413. struct nilfs_segment_usage *su;
  414. void *kaddr;
  415. int sudirty;
  416. kaddr = kmap_atomic(su_bh->b_page);
  417. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  418. if (nilfs_segment_usage_clean(su)) {
  419. printk(KERN_WARNING "%s: segment %llu is already clean\n",
  420. __func__, (unsigned long long)segnum);
  421. kunmap_atomic(kaddr);
  422. return;
  423. }
  424. WARN_ON(nilfs_segment_usage_error(su));
  425. WARN_ON(!nilfs_segment_usage_dirty(su));
  426. sudirty = nilfs_segment_usage_dirty(su);
  427. nilfs_segment_usage_set_clean(su);
  428. kunmap_atomic(kaddr);
  429. mark_buffer_dirty(su_bh);
  430. nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
  431. NILFS_SUI(sufile)->ncleansegs++;
  432. nilfs_mdt_mark_dirty(sufile);
  433. trace_nilfs2_segment_usage_freed(sufile, segnum);
  434. }
  435. /**
  436. * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty
  437. * @sufile: inode of segment usage file
  438. * @segnum: segment number
  439. */
  440. int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
  441. {
  442. struct buffer_head *bh;
  443. int ret;
  444. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
  445. if (!ret) {
  446. mark_buffer_dirty(bh);
  447. nilfs_mdt_mark_dirty(sufile);
  448. brelse(bh);
  449. }
  450. return ret;
  451. }
  452. /**
  453. * nilfs_sufile_set_segment_usage - set usage of a segment
  454. * @sufile: inode of segment usage file
  455. * @segnum: segment number
  456. * @nblocks: number of live blocks in the segment
  457. * @modtime: modification time (option)
  458. */
  459. int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
  460. unsigned long nblocks, time_t modtime)
  461. {
  462. struct buffer_head *bh;
  463. struct nilfs_segment_usage *su;
  464. void *kaddr;
  465. int ret;
  466. down_write(&NILFS_MDT(sufile)->mi_sem);
  467. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
  468. if (ret < 0)
  469. goto out_sem;
  470. kaddr = kmap_atomic(bh->b_page);
  471. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
  472. WARN_ON(nilfs_segment_usage_error(su));
  473. if (modtime)
  474. su->su_lastmod = cpu_to_le64(modtime);
  475. su->su_nblocks = cpu_to_le32(nblocks);
  476. kunmap_atomic(kaddr);
  477. mark_buffer_dirty(bh);
  478. nilfs_mdt_mark_dirty(sufile);
  479. brelse(bh);
  480. out_sem:
  481. up_write(&NILFS_MDT(sufile)->mi_sem);
  482. return ret;
  483. }
  484. /**
  485. * nilfs_sufile_get_stat - get segment usage statistics
  486. * @sufile: inode of segment usage file
  487. * @stat: pointer to a structure of segment usage statistics
  488. *
  489. * Description: nilfs_sufile_get_stat() returns information about segment
  490. * usage.
  491. *
  492. * Return Value: On success, 0 is returned, and segment usage information is
  493. * stored in the place pointed by @stat. On error, one of the following
  494. * negative error codes is returned.
  495. *
  496. * %-EIO - I/O error.
  497. *
  498. * %-ENOMEM - Insufficient amount of memory available.
  499. */
  500. int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
  501. {
  502. struct buffer_head *header_bh;
  503. struct nilfs_sufile_header *header;
  504. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  505. void *kaddr;
  506. int ret;
  507. down_read(&NILFS_MDT(sufile)->mi_sem);
  508. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  509. if (ret < 0)
  510. goto out_sem;
  511. kaddr = kmap_atomic(header_bh->b_page);
  512. header = kaddr + bh_offset(header_bh);
  513. sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
  514. sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
  515. sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
  516. sustat->ss_ctime = nilfs->ns_ctime;
  517. sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime;
  518. spin_lock(&nilfs->ns_last_segment_lock);
  519. sustat->ss_prot_seq = nilfs->ns_prot_seq;
  520. spin_unlock(&nilfs->ns_last_segment_lock);
  521. kunmap_atomic(kaddr);
  522. brelse(header_bh);
  523. out_sem:
  524. up_read(&NILFS_MDT(sufile)->mi_sem);
  525. return ret;
  526. }
  527. void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
  528. struct buffer_head *header_bh,
  529. struct buffer_head *su_bh)
  530. {
  531. struct nilfs_segment_usage *su;
  532. void *kaddr;
  533. int suclean;
  534. kaddr = kmap_atomic(su_bh->b_page);
  535. su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
  536. if (nilfs_segment_usage_error(su)) {
  537. kunmap_atomic(kaddr);
  538. return;
  539. }
  540. suclean = nilfs_segment_usage_clean(su);
  541. nilfs_segment_usage_set_error(su);
  542. kunmap_atomic(kaddr);
  543. if (suclean) {
  544. nilfs_sufile_mod_counter(header_bh, -1, 0);
  545. NILFS_SUI(sufile)->ncleansegs--;
  546. }
  547. mark_buffer_dirty(su_bh);
  548. nilfs_mdt_mark_dirty(sufile);
  549. }
  550. /**
  551. * nilfs_sufile_truncate_range - truncate range of segment array
  552. * @sufile: inode of segment usage file
  553. * @start: start segment number (inclusive)
  554. * @end: end segment number (inclusive)
  555. *
  556. * Return Value: On success, 0 is returned. On error, one of the
  557. * following negative error codes is returned.
  558. *
  559. * %-EIO - I/O error.
  560. *
  561. * %-ENOMEM - Insufficient amount of memory available.
  562. *
  563. * %-EINVAL - Invalid number of segments specified
  564. *
  565. * %-EBUSY - Dirty or active segments are present in the range
  566. */
  567. static int nilfs_sufile_truncate_range(struct inode *sufile,
  568. __u64 start, __u64 end)
  569. {
  570. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  571. struct buffer_head *header_bh;
  572. struct buffer_head *su_bh;
  573. struct nilfs_segment_usage *su, *su2;
  574. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  575. unsigned long segusages_per_block;
  576. unsigned long nsegs, ncleaned;
  577. __u64 segnum;
  578. void *kaddr;
  579. ssize_t n, nc;
  580. int ret;
  581. int j;
  582. nsegs = nilfs_sufile_get_nsegments(sufile);
  583. ret = -EINVAL;
  584. if (start > end || start >= nsegs)
  585. goto out;
  586. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  587. if (ret < 0)
  588. goto out;
  589. segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
  590. ncleaned = 0;
  591. for (segnum = start; segnum <= end; segnum += n) {
  592. n = min_t(unsigned long,
  593. segusages_per_block -
  594. nilfs_sufile_get_offset(sufile, segnum),
  595. end - segnum + 1);
  596. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
  597. &su_bh);
  598. if (ret < 0) {
  599. if (ret != -ENOENT)
  600. goto out_header;
  601. /* hole */
  602. continue;
  603. }
  604. kaddr = kmap_atomic(su_bh->b_page);
  605. su = nilfs_sufile_block_get_segment_usage(
  606. sufile, segnum, su_bh, kaddr);
  607. su2 = su;
  608. for (j = 0; j < n; j++, su = (void *)su + susz) {
  609. if ((le32_to_cpu(su->su_flags) &
  610. ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) ||
  611. nilfs_segment_is_active(nilfs, segnum + j)) {
  612. ret = -EBUSY;
  613. kunmap_atomic(kaddr);
  614. brelse(su_bh);
  615. goto out_header;
  616. }
  617. }
  618. nc = 0;
  619. for (su = su2, j = 0; j < n; j++, su = (void *)su + susz) {
  620. if (nilfs_segment_usage_error(su)) {
  621. nilfs_segment_usage_set_clean(su);
  622. nc++;
  623. }
  624. }
  625. kunmap_atomic(kaddr);
  626. if (nc > 0) {
  627. mark_buffer_dirty(su_bh);
  628. ncleaned += nc;
  629. }
  630. brelse(su_bh);
  631. if (n == segusages_per_block) {
  632. /* make hole */
  633. nilfs_sufile_delete_segment_usage_block(sufile, segnum);
  634. }
  635. }
  636. ret = 0;
  637. out_header:
  638. if (ncleaned > 0) {
  639. NILFS_SUI(sufile)->ncleansegs += ncleaned;
  640. nilfs_sufile_mod_counter(header_bh, ncleaned, 0);
  641. nilfs_mdt_mark_dirty(sufile);
  642. }
  643. brelse(header_bh);
  644. out:
  645. return ret;
  646. }
  647. /**
  648. * nilfs_sufile_resize - resize segment array
  649. * @sufile: inode of segment usage file
  650. * @newnsegs: new number of segments
  651. *
  652. * Return Value: On success, 0 is returned. On error, one of the
  653. * following negative error codes is returned.
  654. *
  655. * %-EIO - I/O error.
  656. *
  657. * %-ENOMEM - Insufficient amount of memory available.
  658. *
  659. * %-ENOSPC - Enough free space is not left for shrinking
  660. *
  661. * %-EBUSY - Dirty or active segments exist in the region to be truncated
  662. */
  663. int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
  664. {
  665. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  666. struct buffer_head *header_bh;
  667. struct nilfs_sufile_header *header;
  668. struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
  669. void *kaddr;
  670. unsigned long nsegs, nrsvsegs;
  671. int ret = 0;
  672. down_write(&NILFS_MDT(sufile)->mi_sem);
  673. nsegs = nilfs_sufile_get_nsegments(sufile);
  674. if (nsegs == newnsegs)
  675. goto out;
  676. ret = -ENOSPC;
  677. nrsvsegs = nilfs_nrsvsegs(nilfs, newnsegs);
  678. if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->ncleansegs)
  679. goto out;
  680. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  681. if (ret < 0)
  682. goto out;
  683. if (newnsegs > nsegs) {
  684. sui->ncleansegs += newnsegs - nsegs;
  685. } else /* newnsegs < nsegs */ {
  686. ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1);
  687. if (ret < 0)
  688. goto out_header;
  689. sui->ncleansegs -= nsegs - newnsegs;
  690. }
  691. kaddr = kmap_atomic(header_bh->b_page);
  692. header = kaddr + bh_offset(header_bh);
  693. header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
  694. kunmap_atomic(kaddr);
  695. mark_buffer_dirty(header_bh);
  696. nilfs_mdt_mark_dirty(sufile);
  697. nilfs_set_nsegments(nilfs, newnsegs);
  698. out_header:
  699. brelse(header_bh);
  700. out:
  701. up_write(&NILFS_MDT(sufile)->mi_sem);
  702. return ret;
  703. }
  704. /**
  705. * nilfs_sufile_get_suinfo -
  706. * @sufile: inode of segment usage file
  707. * @segnum: segment number to start looking
  708. * @buf: array of suinfo
  709. * @sisz: byte size of suinfo
  710. * @nsi: size of suinfo array
  711. *
  712. * Description:
  713. *
  714. * Return Value: On success, 0 is returned and .... On error, one of the
  715. * following negative error codes is returned.
  716. *
  717. * %-EIO - I/O error.
  718. *
  719. * %-ENOMEM - Insufficient amount of memory available.
  720. */
  721. ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
  722. unsigned int sisz, size_t nsi)
  723. {
  724. struct buffer_head *su_bh;
  725. struct nilfs_segment_usage *su;
  726. struct nilfs_suinfo *si = buf;
  727. size_t susz = NILFS_MDT(sufile)->mi_entry_size;
  728. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  729. void *kaddr;
  730. unsigned long nsegs, segusages_per_block;
  731. ssize_t n;
  732. int ret, i, j;
  733. down_read(&NILFS_MDT(sufile)->mi_sem);
  734. segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile);
  735. nsegs = min_t(unsigned long,
  736. nilfs_sufile_get_nsegments(sufile) - segnum,
  737. nsi);
  738. for (i = 0; i < nsegs; i += n, segnum += n) {
  739. n = min_t(unsigned long,
  740. segusages_per_block -
  741. nilfs_sufile_get_offset(sufile, segnum),
  742. nsegs - i);
  743. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
  744. &su_bh);
  745. if (ret < 0) {
  746. if (ret != -ENOENT)
  747. goto out;
  748. /* hole */
  749. memset(si, 0, sisz * n);
  750. si = (void *)si + sisz * n;
  751. continue;
  752. }
  753. kaddr = kmap_atomic(su_bh->b_page);
  754. su = nilfs_sufile_block_get_segment_usage(
  755. sufile, segnum, su_bh, kaddr);
  756. for (j = 0; j < n;
  757. j++, su = (void *)su + susz, si = (void *)si + sisz) {
  758. si->sui_lastmod = le64_to_cpu(su->su_lastmod);
  759. si->sui_nblocks = le32_to_cpu(su->su_nblocks);
  760. si->sui_flags = le32_to_cpu(su->su_flags) &
  761. ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
  762. if (nilfs_segment_is_active(nilfs, segnum + j))
  763. si->sui_flags |=
  764. (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
  765. }
  766. kunmap_atomic(kaddr);
  767. brelse(su_bh);
  768. }
  769. ret = nsegs;
  770. out:
  771. up_read(&NILFS_MDT(sufile)->mi_sem);
  772. return ret;
  773. }
  774. /**
  775. * nilfs_sufile_set_suinfo - sets segment usage info
  776. * @sufile: inode of segment usage file
  777. * @buf: array of suinfo_update
  778. * @supsz: byte size of suinfo_update
  779. * @nsup: size of suinfo_update array
  780. *
  781. * Description: Takes an array of nilfs_suinfo_update structs and updates
  782. * segment usage accordingly. Only the fields indicated by the sup_flags
  783. * are updated.
  784. *
  785. * Return Value: On success, 0 is returned. On error, one of the
  786. * following negative error codes is returned.
  787. *
  788. * %-EIO - I/O error.
  789. *
  790. * %-ENOMEM - Insufficient amount of memory available.
  791. *
  792. * %-EINVAL - Invalid values in input (segment number, flags or nblocks)
  793. */
  794. ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
  795. unsigned int supsz, size_t nsup)
  796. {
  797. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  798. struct buffer_head *header_bh, *bh;
  799. struct nilfs_suinfo_update *sup, *supend = buf + supsz * nsup;
  800. struct nilfs_segment_usage *su;
  801. void *kaddr;
  802. unsigned long blkoff, prev_blkoff;
  803. int cleansi, cleansu, dirtysi, dirtysu;
  804. long ncleaned = 0, ndirtied = 0;
  805. int ret = 0;
  806. if (unlikely(nsup == 0))
  807. return ret;
  808. for (sup = buf; sup < supend; sup = (void *)sup + supsz) {
  809. if (sup->sup_segnum >= nilfs->ns_nsegments
  810. || (sup->sup_flags &
  811. (~0UL << __NR_NILFS_SUINFO_UPDATE_FIELDS))
  812. || (nilfs_suinfo_update_nblocks(sup) &&
  813. sup->sup_sui.sui_nblocks >
  814. nilfs->ns_blocks_per_segment))
  815. return -EINVAL;
  816. }
  817. down_write(&NILFS_MDT(sufile)->mi_sem);
  818. ret = nilfs_sufile_get_header_block(sufile, &header_bh);
  819. if (ret < 0)
  820. goto out_sem;
  821. sup = buf;
  822. blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum);
  823. ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh);
  824. if (ret < 0)
  825. goto out_header;
  826. for (;;) {
  827. kaddr = kmap_atomic(bh->b_page);
  828. su = nilfs_sufile_block_get_segment_usage(
  829. sufile, sup->sup_segnum, bh, kaddr);
  830. if (nilfs_suinfo_update_lastmod(sup))
  831. su->su_lastmod = cpu_to_le64(sup->sup_sui.sui_lastmod);
  832. if (nilfs_suinfo_update_nblocks(sup))
  833. su->su_nblocks = cpu_to_le32(sup->sup_sui.sui_nblocks);
  834. if (nilfs_suinfo_update_flags(sup)) {
  835. /*
  836. * Active flag is a virtual flag projected by running
  837. * nilfs kernel code - drop it not to write it to
  838. * disk.
  839. */
  840. sup->sup_sui.sui_flags &=
  841. ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
  842. cleansi = nilfs_suinfo_clean(&sup->sup_sui);
  843. cleansu = nilfs_segment_usage_clean(su);
  844. dirtysi = nilfs_suinfo_dirty(&sup->sup_sui);
  845. dirtysu = nilfs_segment_usage_dirty(su);
  846. if (cleansi && !cleansu)
  847. ++ncleaned;
  848. else if (!cleansi && cleansu)
  849. --ncleaned;
  850. if (dirtysi && !dirtysu)
  851. ++ndirtied;
  852. else if (!dirtysi && dirtysu)
  853. --ndirtied;
  854. su->su_flags = cpu_to_le32(sup->sup_sui.sui_flags);
  855. }
  856. kunmap_atomic(kaddr);
  857. sup = (void *)sup + supsz;
  858. if (sup >= supend)
  859. break;
  860. prev_blkoff = blkoff;
  861. blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum);
  862. if (blkoff == prev_blkoff)
  863. continue;
  864. /* get different block */
  865. mark_buffer_dirty(bh);
  866. put_bh(bh);
  867. ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh);
  868. if (unlikely(ret < 0))
  869. goto out_mark;
  870. }
  871. mark_buffer_dirty(bh);
  872. put_bh(bh);
  873. out_mark:
  874. if (ncleaned || ndirtied) {
  875. nilfs_sufile_mod_counter(header_bh, (u64)ncleaned,
  876. (u64)ndirtied);
  877. NILFS_SUI(sufile)->ncleansegs += ncleaned;
  878. }
  879. nilfs_mdt_mark_dirty(sufile);
  880. out_header:
  881. put_bh(header_bh);
  882. out_sem:
  883. up_write(&NILFS_MDT(sufile)->mi_sem);
  884. return ret;
  885. }
  886. /**
  887. * nilfs_sufile_trim_fs() - trim ioctl handle function
  888. * @sufile: inode of segment usage file
  889. * @range: fstrim_range structure
  890. *
  891. * start: First Byte to trim
  892. * len: number of Bytes to trim from start
  893. * minlen: minimum extent length in Bytes
  894. *
  895. * Decription: nilfs_sufile_trim_fs goes through all segments containing bytes
  896. * from start to start+len. start is rounded up to the next block boundary
  897. * and start+len is rounded down. For each clean segment blkdev_issue_discard
  898. * function is invoked.
  899. *
  900. * Return Value: On success, 0 is returned or negative error code, otherwise.
  901. */
  902. int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
  903. {
  904. struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
  905. struct buffer_head *su_bh;
  906. struct nilfs_segment_usage *su;
  907. void *kaddr;
  908. size_t n, i, susz = NILFS_MDT(sufile)->mi_entry_size;
  909. sector_t seg_start, seg_end, start_block, end_block;
  910. sector_t start = 0, nblocks = 0;
  911. u64 segnum, segnum_end, minlen, len, max_blocks, ndiscarded = 0;
  912. int ret = 0;
  913. unsigned int sects_per_block;
  914. sects_per_block = (1 << nilfs->ns_blocksize_bits) /
  915. bdev_logical_block_size(nilfs->ns_bdev);
  916. len = range->len >> nilfs->ns_blocksize_bits;
  917. minlen = range->minlen >> nilfs->ns_blocksize_bits;
  918. max_blocks = ((u64)nilfs->ns_nsegments * nilfs->ns_blocks_per_segment);
  919. if (!len || range->start >= max_blocks << nilfs->ns_blocksize_bits)
  920. return -EINVAL;
  921. start_block = (range->start + nilfs->ns_blocksize - 1) >>
  922. nilfs->ns_blocksize_bits;
  923. /*
  924. * range->len can be very large (actually, it is set to
  925. * ULLONG_MAX by default) - truncate upper end of the range
  926. * carefully so as not to overflow.
  927. */
  928. if (max_blocks - start_block < len)
  929. end_block = max_blocks - 1;
  930. else
  931. end_block = start_block + len - 1;
  932. segnum = nilfs_get_segnum_of_block(nilfs, start_block);
  933. segnum_end = nilfs_get_segnum_of_block(nilfs, end_block);
  934. down_read(&NILFS_MDT(sufile)->mi_sem);
  935. while (segnum <= segnum_end) {
  936. n = nilfs_sufile_segment_usages_in_block(sufile, segnum,
  937. segnum_end);
  938. ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0,
  939. &su_bh);
  940. if (ret < 0) {
  941. if (ret != -ENOENT)
  942. goto out_sem;
  943. /* hole */
  944. segnum += n;
  945. continue;
  946. }
  947. kaddr = kmap_atomic(su_bh->b_page);
  948. su = nilfs_sufile_block_get_segment_usage(sufile, segnum,
  949. su_bh, kaddr);
  950. for (i = 0; i < n; ++i, ++segnum, su = (void *)su + susz) {
  951. if (!nilfs_segment_usage_clean(su))
  952. continue;
  953. nilfs_get_segment_range(nilfs, segnum, &seg_start,
  954. &seg_end);
  955. if (!nblocks) {
  956. /* start new extent */
  957. start = seg_start;
  958. nblocks = seg_end - seg_start + 1;
  959. continue;
  960. }
  961. if (start + nblocks == seg_start) {
  962. /* add to previous extent */
  963. nblocks += seg_end - seg_start + 1;
  964. continue;
  965. }
  966. /* discard previous extent */
  967. if (start < start_block) {
  968. nblocks -= start_block - start;
  969. start = start_block;
  970. }
  971. if (nblocks >= minlen) {
  972. kunmap_atomic(kaddr);
  973. ret = blkdev_issue_discard(nilfs->ns_bdev,
  974. start * sects_per_block,
  975. nblocks * sects_per_block,
  976. GFP_NOFS, 0);
  977. if (ret < 0) {
  978. put_bh(su_bh);
  979. goto out_sem;
  980. }
  981. ndiscarded += nblocks;
  982. kaddr = kmap_atomic(su_bh->b_page);
  983. su = nilfs_sufile_block_get_segment_usage(
  984. sufile, segnum, su_bh, kaddr);
  985. }
  986. /* start new extent */
  987. start = seg_start;
  988. nblocks = seg_end - seg_start + 1;
  989. }
  990. kunmap_atomic(kaddr);
  991. put_bh(su_bh);
  992. }
  993. if (nblocks) {
  994. /* discard last extent */
  995. if (start < start_block) {
  996. nblocks -= start_block - start;
  997. start = start_block;
  998. }
  999. if (start + nblocks > end_block + 1)
  1000. nblocks = end_block - start + 1;
  1001. if (nblocks >= minlen) {
  1002. ret = blkdev_issue_discard(nilfs->ns_bdev,
  1003. start * sects_per_block,
  1004. nblocks * sects_per_block,
  1005. GFP_NOFS, 0);
  1006. if (!ret)
  1007. ndiscarded += nblocks;
  1008. }
  1009. }
  1010. out_sem:
  1011. up_read(&NILFS_MDT(sufile)->mi_sem);
  1012. range->len = ndiscarded << nilfs->ns_blocksize_bits;
  1013. return ret;
  1014. }
  1015. /**
  1016. * nilfs_sufile_read - read or get sufile inode
  1017. * @sb: super block instance
  1018. * @susize: size of a segment usage entry
  1019. * @raw_inode: on-disk sufile inode
  1020. * @inodep: buffer to store the inode
  1021. */
  1022. int nilfs_sufile_read(struct super_block *sb, size_t susize,
  1023. struct nilfs_inode *raw_inode, struct inode **inodep)
  1024. {
  1025. struct inode *sufile;
  1026. struct nilfs_sufile_info *sui;
  1027. struct buffer_head *header_bh;
  1028. struct nilfs_sufile_header *header;
  1029. void *kaddr;
  1030. int err;
  1031. if (susize > sb->s_blocksize) {
  1032. printk(KERN_ERR
  1033. "NILFS: too large segment usage size: %zu bytes.\n",
  1034. susize);
  1035. return -EINVAL;
  1036. } else if (susize < NILFS_MIN_SEGMENT_USAGE_SIZE) {
  1037. printk(KERN_ERR
  1038. "NILFS: too small segment usage size: %zu bytes.\n",
  1039. susize);
  1040. return -EINVAL;
  1041. }
  1042. sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO);
  1043. if (unlikely(!sufile))
  1044. return -ENOMEM;
  1045. if (!(sufile->i_state & I_NEW))
  1046. goto out;
  1047. err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui));
  1048. if (err)
  1049. goto failed;
  1050. nilfs_mdt_set_entry_size(sufile, susize,
  1051. sizeof(struct nilfs_sufile_header));
  1052. err = nilfs_read_inode_common(sufile, raw_inode);
  1053. if (err)
  1054. goto failed;
  1055. err = nilfs_sufile_get_header_block(sufile, &header_bh);
  1056. if (err)
  1057. goto failed;
  1058. sui = NILFS_SUI(sufile);
  1059. kaddr = kmap_atomic(header_bh->b_page);
  1060. header = kaddr + bh_offset(header_bh);
  1061. sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
  1062. kunmap_atomic(kaddr);
  1063. brelse(header_bh);
  1064. sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
  1065. sui->allocmin = 0;
  1066. unlock_new_inode(sufile);
  1067. out:
  1068. *inodep = sufile;
  1069. return 0;
  1070. failed:
  1071. iget_failed(sufile);
  1072. return err;
  1073. }