super.c 23 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013
  1. /*
  2. * Copyright (C) International Business Machines Corp., 2000-2004
  3. * Portions Copyright (C) Christoph Hellwig, 2001-2002
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  13. * the GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/fs.h>
  20. #include <linux/module.h>
  21. #include <linux/parser.h>
  22. #include <linux/completion.h>
  23. #include <linux/vfs.h>
  24. #include <linux/quotaops.h>
  25. #include <linux/mount.h>
  26. #include <linux/moduleparam.h>
  27. #include <linux/kthread.h>
  28. #include <linux/posix_acl.h>
  29. #include <linux/buffer_head.h>
  30. #include <linux/exportfs.h>
  31. #include <linux/crc32.h>
  32. #include <linux/slab.h>
  33. #include <asm/uaccess.h>
  34. #include <linux/seq_file.h>
  35. #include <linux/blkdev.h>
  36. #include "jfs_incore.h"
  37. #include "jfs_filsys.h"
  38. #include "jfs_inode.h"
  39. #include "jfs_metapage.h"
  40. #include "jfs_superblock.h"
  41. #include "jfs_dmap.h"
  42. #include "jfs_imap.h"
  43. #include "jfs_acl.h"
  44. #include "jfs_debug.h"
  45. #include "jfs_xattr.h"
  46. MODULE_DESCRIPTION("The Journaled Filesystem (JFS)");
  47. MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM");
  48. MODULE_LICENSE("GPL");
  49. static struct kmem_cache *jfs_inode_cachep;
  50. static const struct super_operations jfs_super_operations;
  51. static const struct export_operations jfs_export_operations;
  52. static struct file_system_type jfs_fs_type;
  53. #define MAX_COMMIT_THREADS 64
  54. static int commit_threads;
  55. module_param(commit_threads, int, 0);
  56. MODULE_PARM_DESC(commit_threads, "Number of commit threads");
  57. static struct task_struct *jfsCommitThread[MAX_COMMIT_THREADS];
  58. struct task_struct *jfsIOthread;
  59. struct task_struct *jfsSyncThread;
  60. #ifdef CONFIG_JFS_DEBUG
  61. int jfsloglevel = JFS_LOGLEVEL_WARN;
  62. module_param(jfsloglevel, int, 0644);
  63. MODULE_PARM_DESC(jfsloglevel, "Specify JFS loglevel (0, 1 or 2)");
  64. #endif
  65. static void jfs_handle_error(struct super_block *sb)
  66. {
  67. struct jfs_sb_info *sbi = JFS_SBI(sb);
  68. if (sb->s_flags & MS_RDONLY)
  69. return;
  70. updateSuper(sb, FM_DIRTY);
  71. if (sbi->flag & JFS_ERR_PANIC)
  72. panic("JFS (device %s): panic forced after error\n",
  73. sb->s_id);
  74. else if (sbi->flag & JFS_ERR_REMOUNT_RO) {
  75. jfs_err("ERROR: (device %s): remounting filesystem as read-only\n",
  76. sb->s_id);
  77. sb->s_flags |= MS_RDONLY;
  78. }
  79. /* nothing is done for continue beyond marking the superblock dirty */
  80. }
  81. void jfs_error(struct super_block *sb, const char *fmt, ...)
  82. {
  83. struct va_format vaf;
  84. va_list args;
  85. va_start(args, fmt);
  86. vaf.fmt = fmt;
  87. vaf.va = &args;
  88. pr_err("ERROR: (device %s): %pf: %pV\n",
  89. sb->s_id, __builtin_return_address(0), &vaf);
  90. va_end(args);
  91. jfs_handle_error(sb);
  92. }
  93. static struct inode *jfs_alloc_inode(struct super_block *sb)
  94. {
  95. struct jfs_inode_info *jfs_inode;
  96. jfs_inode = kmem_cache_alloc(jfs_inode_cachep, GFP_NOFS);
  97. if (!jfs_inode)
  98. return NULL;
  99. #ifdef CONFIG_QUOTA
  100. memset(&jfs_inode->i_dquot, 0, sizeof(jfs_inode->i_dquot));
  101. #endif
  102. return &jfs_inode->vfs_inode;
  103. }
  104. static void jfs_i_callback(struct rcu_head *head)
  105. {
  106. struct inode *inode = container_of(head, struct inode, i_rcu);
  107. struct jfs_inode_info *ji = JFS_IP(inode);
  108. kmem_cache_free(jfs_inode_cachep, ji);
  109. }
  110. static void jfs_destroy_inode(struct inode *inode)
  111. {
  112. struct jfs_inode_info *ji = JFS_IP(inode);
  113. BUG_ON(!list_empty(&ji->anon_inode_list));
  114. spin_lock_irq(&ji->ag_lock);
  115. if (ji->active_ag != -1) {
  116. struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
  117. atomic_dec(&bmap->db_active[ji->active_ag]);
  118. ji->active_ag = -1;
  119. }
  120. spin_unlock_irq(&ji->ag_lock);
  121. call_rcu(&inode->i_rcu, jfs_i_callback);
  122. }
  123. static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf)
  124. {
  125. struct jfs_sb_info *sbi = JFS_SBI(dentry->d_sb);
  126. s64 maxinodes;
  127. struct inomap *imap = JFS_IP(sbi->ipimap)->i_imap;
  128. jfs_info("In jfs_statfs");
  129. buf->f_type = JFS_SUPER_MAGIC;
  130. buf->f_bsize = sbi->bsize;
  131. buf->f_blocks = sbi->bmap->db_mapsize;
  132. buf->f_bfree = sbi->bmap->db_nfree;
  133. buf->f_bavail = sbi->bmap->db_nfree;
  134. /*
  135. * If we really return the number of allocated & free inodes, some
  136. * applications will fail because they won't see enough free inodes.
  137. * We'll try to calculate some guess as to how many inodes we can
  138. * really allocate
  139. *
  140. * buf->f_files = atomic_read(&imap->im_numinos);
  141. * buf->f_ffree = atomic_read(&imap->im_numfree);
  142. */
  143. maxinodes = min((s64) atomic_read(&imap->im_numinos) +
  144. ((sbi->bmap->db_nfree >> imap->im_l2nbperiext)
  145. << L2INOSPEREXT), (s64) 0xffffffffLL);
  146. buf->f_files = maxinodes;
  147. buf->f_ffree = maxinodes - (atomic_read(&imap->im_numinos) -
  148. atomic_read(&imap->im_numfree));
  149. buf->f_fsid.val[0] = (u32)crc32_le(0, sbi->uuid, sizeof(sbi->uuid)/2);
  150. buf->f_fsid.val[1] = (u32)crc32_le(0, sbi->uuid + sizeof(sbi->uuid)/2,
  151. sizeof(sbi->uuid)/2);
  152. buf->f_namelen = JFS_NAME_MAX;
  153. return 0;
  154. }
  155. static void jfs_put_super(struct super_block *sb)
  156. {
  157. struct jfs_sb_info *sbi = JFS_SBI(sb);
  158. int rc;
  159. jfs_info("In jfs_put_super");
  160. dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
  161. rc = jfs_umount(sb);
  162. if (rc)
  163. jfs_err("jfs_umount failed with return code %d", rc);
  164. unload_nls(sbi->nls_tab);
  165. truncate_inode_pages(sbi->direct_inode->i_mapping, 0);
  166. iput(sbi->direct_inode);
  167. kfree(sbi);
  168. }
  169. enum {
  170. Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize,
  171. Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota,
  172. Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask,
  173. Opt_discard, Opt_nodiscard, Opt_discard_minblk
  174. };
  175. static const match_table_t tokens = {
  176. {Opt_integrity, "integrity"},
  177. {Opt_nointegrity, "nointegrity"},
  178. {Opt_iocharset, "iocharset=%s"},
  179. {Opt_resize, "resize=%u"},
  180. {Opt_resize_nosize, "resize"},
  181. {Opt_errors, "errors=%s"},
  182. {Opt_ignore, "noquota"},
  183. {Opt_ignore, "quota"},
  184. {Opt_usrquota, "usrquota"},
  185. {Opt_grpquota, "grpquota"},
  186. {Opt_uid, "uid=%u"},
  187. {Opt_gid, "gid=%u"},
  188. {Opt_umask, "umask=%u"},
  189. {Opt_discard, "discard"},
  190. {Opt_nodiscard, "nodiscard"},
  191. {Opt_discard_minblk, "discard=%u"},
  192. {Opt_err, NULL}
  193. };
  194. static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
  195. int *flag)
  196. {
  197. void *nls_map = (void *)-1; /* -1: no change; NULL: none */
  198. char *p;
  199. struct jfs_sb_info *sbi = JFS_SBI(sb);
  200. *newLVSize = 0;
  201. if (!options)
  202. return 1;
  203. while ((p = strsep(&options, ",")) != NULL) {
  204. substring_t args[MAX_OPT_ARGS];
  205. int token;
  206. if (!*p)
  207. continue;
  208. token = match_token(p, tokens, args);
  209. switch (token) {
  210. case Opt_integrity:
  211. *flag &= ~JFS_NOINTEGRITY;
  212. break;
  213. case Opt_nointegrity:
  214. *flag |= JFS_NOINTEGRITY;
  215. break;
  216. case Opt_ignore:
  217. /* Silently ignore the quota options */
  218. /* Don't do anything ;-) */
  219. break;
  220. case Opt_iocharset:
  221. if (nls_map && nls_map != (void *) -1)
  222. unload_nls(nls_map);
  223. if (!strcmp(args[0].from, "none"))
  224. nls_map = NULL;
  225. else {
  226. nls_map = load_nls(args[0].from);
  227. if (!nls_map) {
  228. pr_err("JFS: charset not found\n");
  229. goto cleanup;
  230. }
  231. }
  232. break;
  233. case Opt_resize:
  234. {
  235. char *resize = args[0].from;
  236. int rc = kstrtoll(resize, 0, newLVSize);
  237. if (rc)
  238. goto cleanup;
  239. break;
  240. }
  241. case Opt_resize_nosize:
  242. {
  243. *newLVSize = sb->s_bdev->bd_inode->i_size >>
  244. sb->s_blocksize_bits;
  245. if (*newLVSize == 0)
  246. pr_err("JFS: Cannot determine volume size\n");
  247. break;
  248. }
  249. case Opt_errors:
  250. {
  251. char *errors = args[0].from;
  252. if (!errors || !*errors)
  253. goto cleanup;
  254. if (!strcmp(errors, "continue")) {
  255. *flag &= ~JFS_ERR_REMOUNT_RO;
  256. *flag &= ~JFS_ERR_PANIC;
  257. *flag |= JFS_ERR_CONTINUE;
  258. } else if (!strcmp(errors, "remount-ro")) {
  259. *flag &= ~JFS_ERR_CONTINUE;
  260. *flag &= ~JFS_ERR_PANIC;
  261. *flag |= JFS_ERR_REMOUNT_RO;
  262. } else if (!strcmp(errors, "panic")) {
  263. *flag &= ~JFS_ERR_CONTINUE;
  264. *flag &= ~JFS_ERR_REMOUNT_RO;
  265. *flag |= JFS_ERR_PANIC;
  266. } else {
  267. pr_err("JFS: %s is an invalid error handler\n",
  268. errors);
  269. goto cleanup;
  270. }
  271. break;
  272. }
  273. #ifdef CONFIG_QUOTA
  274. case Opt_quota:
  275. case Opt_usrquota:
  276. *flag |= JFS_USRQUOTA;
  277. break;
  278. case Opt_grpquota:
  279. *flag |= JFS_GRPQUOTA;
  280. break;
  281. #else
  282. case Opt_usrquota:
  283. case Opt_grpquota:
  284. case Opt_quota:
  285. pr_err("JFS: quota operations not supported\n");
  286. break;
  287. #endif
  288. case Opt_uid:
  289. {
  290. char *uid = args[0].from;
  291. uid_t val;
  292. int rc = kstrtouint(uid, 0, &val);
  293. if (rc)
  294. goto cleanup;
  295. sbi->uid = make_kuid(current_user_ns(), val);
  296. if (!uid_valid(sbi->uid))
  297. goto cleanup;
  298. break;
  299. }
  300. case Opt_gid:
  301. {
  302. char *gid = args[0].from;
  303. gid_t val;
  304. int rc = kstrtouint(gid, 0, &val);
  305. if (rc)
  306. goto cleanup;
  307. sbi->gid = make_kgid(current_user_ns(), val);
  308. if (!gid_valid(sbi->gid))
  309. goto cleanup;
  310. break;
  311. }
  312. case Opt_umask:
  313. {
  314. char *umask = args[0].from;
  315. int rc = kstrtouint(umask, 8, &sbi->umask);
  316. if (rc)
  317. goto cleanup;
  318. if (sbi->umask & ~0777) {
  319. pr_err("JFS: Invalid value of umask\n");
  320. goto cleanup;
  321. }
  322. break;
  323. }
  324. case Opt_discard:
  325. {
  326. struct request_queue *q = bdev_get_queue(sb->s_bdev);
  327. /* if set to 1, even copying files will cause
  328. * trimming :O
  329. * -> user has more control over the online trimming
  330. */
  331. sbi->minblks_trim = 64;
  332. if (blk_queue_discard(q))
  333. *flag |= JFS_DISCARD;
  334. else
  335. pr_err("JFS: discard option not supported on device\n");
  336. break;
  337. }
  338. case Opt_nodiscard:
  339. *flag &= ~JFS_DISCARD;
  340. break;
  341. case Opt_discard_minblk:
  342. {
  343. struct request_queue *q = bdev_get_queue(sb->s_bdev);
  344. char *minblks_trim = args[0].from;
  345. int rc;
  346. if (blk_queue_discard(q)) {
  347. *flag |= JFS_DISCARD;
  348. rc = kstrtouint(minblks_trim, 0,
  349. &sbi->minblks_trim);
  350. if (rc)
  351. goto cleanup;
  352. } else
  353. pr_err("JFS: discard option not supported on device\n");
  354. break;
  355. }
  356. default:
  357. printk("jfs: Unrecognized mount option \"%s\" or missing value\n",
  358. p);
  359. goto cleanup;
  360. }
  361. }
  362. if (nls_map != (void *) -1) {
  363. /* Discard old (if remount) */
  364. unload_nls(sbi->nls_tab);
  365. sbi->nls_tab = nls_map;
  366. }
  367. return 1;
  368. cleanup:
  369. if (nls_map && nls_map != (void *) -1)
  370. unload_nls(nls_map);
  371. return 0;
  372. }
  373. static int jfs_remount(struct super_block *sb, int *flags, char *data)
  374. {
  375. s64 newLVSize = 0;
  376. int rc = 0;
  377. int flag = JFS_SBI(sb)->flag;
  378. int ret;
  379. sync_filesystem(sb);
  380. if (!parse_options(data, sb, &newLVSize, &flag))
  381. return -EINVAL;
  382. if (newLVSize) {
  383. if (sb->s_flags & MS_RDONLY) {
  384. pr_err("JFS: resize requires volume to be mounted read-write\n");
  385. return -EROFS;
  386. }
  387. rc = jfs_extendfs(sb, newLVSize, 0);
  388. if (rc)
  389. return rc;
  390. }
  391. if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) {
  392. /*
  393. * Invalidate any previously read metadata. fsck may have
  394. * changed the on-disk data since we mounted r/o
  395. */
  396. truncate_inode_pages(JFS_SBI(sb)->direct_inode->i_mapping, 0);
  397. JFS_SBI(sb)->flag = flag;
  398. ret = jfs_mount_rw(sb, 1);
  399. /* mark the fs r/w for quota activity */
  400. sb->s_flags &= ~MS_RDONLY;
  401. dquot_resume(sb, -1);
  402. return ret;
  403. }
  404. if ((!(sb->s_flags & MS_RDONLY)) && (*flags & MS_RDONLY)) {
  405. rc = dquot_suspend(sb, -1);
  406. if (rc < 0)
  407. return rc;
  408. rc = jfs_umount_rw(sb);
  409. JFS_SBI(sb)->flag = flag;
  410. return rc;
  411. }
  412. if ((JFS_SBI(sb)->flag & JFS_NOINTEGRITY) != (flag & JFS_NOINTEGRITY))
  413. if (!(sb->s_flags & MS_RDONLY)) {
  414. rc = jfs_umount_rw(sb);
  415. if (rc)
  416. return rc;
  417. JFS_SBI(sb)->flag = flag;
  418. ret = jfs_mount_rw(sb, 1);
  419. return ret;
  420. }
  421. JFS_SBI(sb)->flag = flag;
  422. return 0;
  423. }
  424. static int jfs_fill_super(struct super_block *sb, void *data, int silent)
  425. {
  426. struct jfs_sb_info *sbi;
  427. struct inode *inode;
  428. int rc;
  429. s64 newLVSize = 0;
  430. int flag, ret = -EINVAL;
  431. jfs_info("In jfs_read_super: s_flags=0x%lx", sb->s_flags);
  432. if (!new_valid_dev(sb->s_bdev->bd_dev))
  433. return -EOVERFLOW;
  434. sbi = kzalloc(sizeof(struct jfs_sb_info), GFP_KERNEL);
  435. if (!sbi)
  436. return -ENOMEM;
  437. sb->s_fs_info = sbi;
  438. sb->s_max_links = JFS_LINK_MAX;
  439. sbi->sb = sb;
  440. sbi->uid = INVALID_UID;
  441. sbi->gid = INVALID_GID;
  442. sbi->umask = -1;
  443. /* initialize the mount flag and determine the default error handler */
  444. flag = JFS_ERR_REMOUNT_RO;
  445. if (!parse_options((char *) data, sb, &newLVSize, &flag))
  446. goto out_kfree;
  447. sbi->flag = flag;
  448. #ifdef CONFIG_JFS_POSIX_ACL
  449. sb->s_flags |= MS_POSIXACL;
  450. #endif
  451. if (newLVSize) {
  452. pr_err("resize option for remount only\n");
  453. goto out_kfree;
  454. }
  455. /*
  456. * Initialize blocksize to 4K.
  457. */
  458. sb_set_blocksize(sb, PSIZE);
  459. /*
  460. * Set method vectors.
  461. */
  462. sb->s_op = &jfs_super_operations;
  463. sb->s_export_op = &jfs_export_operations;
  464. sb->s_xattr = jfs_xattr_handlers;
  465. #ifdef CONFIG_QUOTA
  466. sb->dq_op = &dquot_operations;
  467. sb->s_qcop = &dquot_quotactl_ops;
  468. sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
  469. #endif
  470. /*
  471. * Initialize direct-mapping inode/address-space
  472. */
  473. inode = new_inode(sb);
  474. if (inode == NULL) {
  475. ret = -ENOMEM;
  476. goto out_unload;
  477. }
  478. inode->i_ino = 0;
  479. inode->i_size = sb->s_bdev->bd_inode->i_size;
  480. inode->i_mapping->a_ops = &jfs_metapage_aops;
  481. hlist_add_fake(&inode->i_hash);
  482. mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
  483. sbi->direct_inode = inode;
  484. rc = jfs_mount(sb);
  485. if (rc) {
  486. if (!silent)
  487. jfs_err("jfs_mount failed w/return code = %d", rc);
  488. goto out_mount_failed;
  489. }
  490. if (sb->s_flags & MS_RDONLY)
  491. sbi->log = NULL;
  492. else {
  493. rc = jfs_mount_rw(sb, 0);
  494. if (rc) {
  495. if (!silent) {
  496. jfs_err("jfs_mount_rw failed, return code = %d",
  497. rc);
  498. }
  499. goto out_no_rw;
  500. }
  501. }
  502. sb->s_magic = JFS_SUPER_MAGIC;
  503. if (sbi->mntflag & JFS_OS2)
  504. sb->s_d_op = &jfs_ci_dentry_operations;
  505. inode = jfs_iget(sb, ROOT_I);
  506. if (IS_ERR(inode)) {
  507. ret = PTR_ERR(inode);
  508. goto out_no_rw;
  509. }
  510. sb->s_root = d_make_root(inode);
  511. if (!sb->s_root)
  512. goto out_no_root;
  513. /* logical blocks are represented by 40 bits in pxd_t, etc. */
  514. sb->s_maxbytes = ((u64) sb->s_blocksize) << 40;
  515. #if BITS_PER_LONG == 32
  516. /*
  517. * Page cache is indexed by long.
  518. * I would use MAX_LFS_FILESIZE, but it's only half as big
  519. */
  520. sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1,
  521. (u64)sb->s_maxbytes);
  522. #endif
  523. sb->s_time_gran = 1;
  524. return 0;
  525. out_no_root:
  526. jfs_err("jfs_read_super: get root dentry failed");
  527. out_no_rw:
  528. rc = jfs_umount(sb);
  529. if (rc)
  530. jfs_err("jfs_umount failed with return code %d", rc);
  531. out_mount_failed:
  532. filemap_write_and_wait(sbi->direct_inode->i_mapping);
  533. truncate_inode_pages(sbi->direct_inode->i_mapping, 0);
  534. make_bad_inode(sbi->direct_inode);
  535. iput(sbi->direct_inode);
  536. sbi->direct_inode = NULL;
  537. out_unload:
  538. if (sbi->nls_tab)
  539. unload_nls(sbi->nls_tab);
  540. out_kfree:
  541. kfree(sbi);
  542. return ret;
  543. }
  544. static int jfs_freeze(struct super_block *sb)
  545. {
  546. struct jfs_sb_info *sbi = JFS_SBI(sb);
  547. struct jfs_log *log = sbi->log;
  548. int rc = 0;
  549. if (!(sb->s_flags & MS_RDONLY)) {
  550. txQuiesce(sb);
  551. rc = lmLogShutdown(log);
  552. if (rc) {
  553. jfs_error(sb, "lmLogShutdown failed\n");
  554. /* let operations fail rather than hang */
  555. txResume(sb);
  556. return rc;
  557. }
  558. rc = updateSuper(sb, FM_CLEAN);
  559. if (rc) {
  560. jfs_err("jfs_freeze: updateSuper failed\n");
  561. /*
  562. * Don't fail here. Everything succeeded except
  563. * marking the superblock clean, so there's really
  564. * no harm in leaving it frozen for now.
  565. */
  566. }
  567. }
  568. return 0;
  569. }
  570. static int jfs_unfreeze(struct super_block *sb)
  571. {
  572. struct jfs_sb_info *sbi = JFS_SBI(sb);
  573. struct jfs_log *log = sbi->log;
  574. int rc = 0;
  575. if (!(sb->s_flags & MS_RDONLY)) {
  576. rc = updateSuper(sb, FM_MOUNT);
  577. if (rc) {
  578. jfs_error(sb, "updateSuper failed\n");
  579. goto out;
  580. }
  581. rc = lmLogInit(log);
  582. if (rc)
  583. jfs_error(sb, "lmLogInit failed\n");
  584. out:
  585. txResume(sb);
  586. }
  587. return rc;
  588. }
  589. static struct dentry *jfs_do_mount(struct file_system_type *fs_type,
  590. int flags, const char *dev_name, void *data)
  591. {
  592. return mount_bdev(fs_type, flags, dev_name, data, jfs_fill_super);
  593. }
  594. static int jfs_sync_fs(struct super_block *sb, int wait)
  595. {
  596. struct jfs_log *log = JFS_SBI(sb)->log;
  597. /* log == NULL indicates read-only mount */
  598. if (log) {
  599. /*
  600. * Write quota structures to quota file, sync_blockdev() will
  601. * write them to disk later
  602. */
  603. dquot_writeback_dquots(sb, -1);
  604. jfs_flush_journal(log, wait);
  605. jfs_syncpt(log, 0);
  606. }
  607. return 0;
  608. }
  609. static int jfs_show_options(struct seq_file *seq, struct dentry *root)
  610. {
  611. struct jfs_sb_info *sbi = JFS_SBI(root->d_sb);
  612. if (uid_valid(sbi->uid))
  613. seq_printf(seq, ",uid=%d", from_kuid(&init_user_ns, sbi->uid));
  614. if (gid_valid(sbi->gid))
  615. seq_printf(seq, ",gid=%d", from_kgid(&init_user_ns, sbi->gid));
  616. if (sbi->umask != -1)
  617. seq_printf(seq, ",umask=%03o", sbi->umask);
  618. if (sbi->flag & JFS_NOINTEGRITY)
  619. seq_puts(seq, ",nointegrity");
  620. if (sbi->flag & JFS_DISCARD)
  621. seq_printf(seq, ",discard=%u", sbi->minblks_trim);
  622. if (sbi->nls_tab)
  623. seq_printf(seq, ",iocharset=%s", sbi->nls_tab->charset);
  624. if (sbi->flag & JFS_ERR_CONTINUE)
  625. seq_printf(seq, ",errors=continue");
  626. if (sbi->flag & JFS_ERR_PANIC)
  627. seq_printf(seq, ",errors=panic");
  628. #ifdef CONFIG_QUOTA
  629. if (sbi->flag & JFS_USRQUOTA)
  630. seq_puts(seq, ",usrquota");
  631. if (sbi->flag & JFS_GRPQUOTA)
  632. seq_puts(seq, ",grpquota");
  633. #endif
  634. return 0;
  635. }
  636. #ifdef CONFIG_QUOTA
  637. /* Read data from quotafile - avoid pagecache and such because we cannot afford
  638. * acquiring the locks... As quota files are never truncated and quota code
  639. * itself serializes the operations (and no one else should touch the files)
  640. * we don't have to be afraid of races */
  641. static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data,
  642. size_t len, loff_t off)
  643. {
  644. struct inode *inode = sb_dqopt(sb)->files[type];
  645. sector_t blk = off >> sb->s_blocksize_bits;
  646. int err = 0;
  647. int offset = off & (sb->s_blocksize - 1);
  648. int tocopy;
  649. size_t toread;
  650. struct buffer_head tmp_bh;
  651. struct buffer_head *bh;
  652. loff_t i_size = i_size_read(inode);
  653. if (off > i_size)
  654. return 0;
  655. if (off+len > i_size)
  656. len = i_size-off;
  657. toread = len;
  658. while (toread > 0) {
  659. tocopy = sb->s_blocksize - offset < toread ?
  660. sb->s_blocksize - offset : toread;
  661. tmp_bh.b_state = 0;
  662. tmp_bh.b_size = 1 << inode->i_blkbits;
  663. err = jfs_get_block(inode, blk, &tmp_bh, 0);
  664. if (err)
  665. return err;
  666. if (!buffer_mapped(&tmp_bh)) /* A hole? */
  667. memset(data, 0, tocopy);
  668. else {
  669. bh = sb_bread(sb, tmp_bh.b_blocknr);
  670. if (!bh)
  671. return -EIO;
  672. memcpy(data, bh->b_data+offset, tocopy);
  673. brelse(bh);
  674. }
  675. offset = 0;
  676. toread -= tocopy;
  677. data += tocopy;
  678. blk++;
  679. }
  680. return len;
  681. }
  682. /* Write to quotafile */
  683. static ssize_t jfs_quota_write(struct super_block *sb, int type,
  684. const char *data, size_t len, loff_t off)
  685. {
  686. struct inode *inode = sb_dqopt(sb)->files[type];
  687. sector_t blk = off >> sb->s_blocksize_bits;
  688. int err = 0;
  689. int offset = off & (sb->s_blocksize - 1);
  690. int tocopy;
  691. size_t towrite = len;
  692. struct buffer_head tmp_bh;
  693. struct buffer_head *bh;
  694. mutex_lock(&inode->i_mutex);
  695. while (towrite > 0) {
  696. tocopy = sb->s_blocksize - offset < towrite ?
  697. sb->s_blocksize - offset : towrite;
  698. tmp_bh.b_state = 0;
  699. tmp_bh.b_size = 1 << inode->i_blkbits;
  700. err = jfs_get_block(inode, blk, &tmp_bh, 1);
  701. if (err)
  702. goto out;
  703. if (offset || tocopy != sb->s_blocksize)
  704. bh = sb_bread(sb, tmp_bh.b_blocknr);
  705. else
  706. bh = sb_getblk(sb, tmp_bh.b_blocknr);
  707. if (!bh) {
  708. err = -EIO;
  709. goto out;
  710. }
  711. lock_buffer(bh);
  712. memcpy(bh->b_data+offset, data, tocopy);
  713. flush_dcache_page(bh->b_page);
  714. set_buffer_uptodate(bh);
  715. mark_buffer_dirty(bh);
  716. unlock_buffer(bh);
  717. brelse(bh);
  718. offset = 0;
  719. towrite -= tocopy;
  720. data += tocopy;
  721. blk++;
  722. }
  723. out:
  724. if (len == towrite) {
  725. mutex_unlock(&inode->i_mutex);
  726. return err;
  727. }
  728. if (inode->i_size < off+len-towrite)
  729. i_size_write(inode, off+len-towrite);
  730. inode->i_version++;
  731. inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  732. mark_inode_dirty(inode);
  733. mutex_unlock(&inode->i_mutex);
  734. return len - towrite;
  735. }
  736. static struct dquot **jfs_get_dquots(struct inode *inode)
  737. {
  738. return JFS_IP(inode)->i_dquot;
  739. }
  740. #endif
  741. static const struct super_operations jfs_super_operations = {
  742. .alloc_inode = jfs_alloc_inode,
  743. .destroy_inode = jfs_destroy_inode,
  744. .dirty_inode = jfs_dirty_inode,
  745. .write_inode = jfs_write_inode,
  746. .evict_inode = jfs_evict_inode,
  747. .put_super = jfs_put_super,
  748. .sync_fs = jfs_sync_fs,
  749. .freeze_fs = jfs_freeze,
  750. .unfreeze_fs = jfs_unfreeze,
  751. .statfs = jfs_statfs,
  752. .remount_fs = jfs_remount,
  753. .show_options = jfs_show_options,
  754. #ifdef CONFIG_QUOTA
  755. .quota_read = jfs_quota_read,
  756. .quota_write = jfs_quota_write,
  757. .get_dquots = jfs_get_dquots,
  758. #endif
  759. };
  760. static const struct export_operations jfs_export_operations = {
  761. .fh_to_dentry = jfs_fh_to_dentry,
  762. .fh_to_parent = jfs_fh_to_parent,
  763. .get_parent = jfs_get_parent,
  764. };
  765. static struct file_system_type jfs_fs_type = {
  766. .owner = THIS_MODULE,
  767. .name = "jfs",
  768. .mount = jfs_do_mount,
  769. .kill_sb = kill_block_super,
  770. .fs_flags = FS_REQUIRES_DEV,
  771. };
  772. MODULE_ALIAS_FS("jfs");
  773. static void init_once(void *foo)
  774. {
  775. struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo;
  776. memset(jfs_ip, 0, sizeof(struct jfs_inode_info));
  777. INIT_LIST_HEAD(&jfs_ip->anon_inode_list);
  778. init_rwsem(&jfs_ip->rdwrlock);
  779. mutex_init(&jfs_ip->commit_mutex);
  780. init_rwsem(&jfs_ip->xattr_sem);
  781. spin_lock_init(&jfs_ip->ag_lock);
  782. jfs_ip->active_ag = -1;
  783. inode_init_once(&jfs_ip->vfs_inode);
  784. }
  785. static int __init init_jfs_fs(void)
  786. {
  787. int i;
  788. int rc;
  789. jfs_inode_cachep =
  790. kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
  791. SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
  792. init_once);
  793. if (jfs_inode_cachep == NULL)
  794. return -ENOMEM;
  795. /*
  796. * Metapage initialization
  797. */
  798. rc = metapage_init();
  799. if (rc) {
  800. jfs_err("metapage_init failed w/rc = %d", rc);
  801. goto free_slab;
  802. }
  803. /*
  804. * Transaction Manager initialization
  805. */
  806. rc = txInit();
  807. if (rc) {
  808. jfs_err("txInit failed w/rc = %d", rc);
  809. goto free_metapage;
  810. }
  811. /*
  812. * I/O completion thread (endio)
  813. */
  814. jfsIOthread = kthread_run(jfsIOWait, NULL, "jfsIO");
  815. if (IS_ERR(jfsIOthread)) {
  816. rc = PTR_ERR(jfsIOthread);
  817. jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
  818. goto end_txmngr;
  819. }
  820. if (commit_threads < 1)
  821. commit_threads = num_online_cpus();
  822. if (commit_threads > MAX_COMMIT_THREADS)
  823. commit_threads = MAX_COMMIT_THREADS;
  824. for (i = 0; i < commit_threads; i++) {
  825. jfsCommitThread[i] = kthread_run(jfs_lazycommit, NULL,
  826. "jfsCommit");
  827. if (IS_ERR(jfsCommitThread[i])) {
  828. rc = PTR_ERR(jfsCommitThread[i]);
  829. jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
  830. commit_threads = i;
  831. goto kill_committask;
  832. }
  833. }
  834. jfsSyncThread = kthread_run(jfs_sync, NULL, "jfsSync");
  835. if (IS_ERR(jfsSyncThread)) {
  836. rc = PTR_ERR(jfsSyncThread);
  837. jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
  838. goto kill_committask;
  839. }
  840. #ifdef PROC_FS_JFS
  841. jfs_proc_init();
  842. #endif
  843. rc = register_filesystem(&jfs_fs_type);
  844. if (!rc)
  845. return 0;
  846. #ifdef PROC_FS_JFS
  847. jfs_proc_clean();
  848. #endif
  849. kthread_stop(jfsSyncThread);
  850. kill_committask:
  851. for (i = 0; i < commit_threads; i++)
  852. kthread_stop(jfsCommitThread[i]);
  853. kthread_stop(jfsIOthread);
  854. end_txmngr:
  855. txExit();
  856. free_metapage:
  857. metapage_exit();
  858. free_slab:
  859. kmem_cache_destroy(jfs_inode_cachep);
  860. return rc;
  861. }
  862. static void __exit exit_jfs_fs(void)
  863. {
  864. int i;
  865. jfs_info("exit_jfs_fs called");
  866. txExit();
  867. metapage_exit();
  868. kthread_stop(jfsIOthread);
  869. for (i = 0; i < commit_threads; i++)
  870. kthread_stop(jfsCommitThread[i]);
  871. kthread_stop(jfsSyncThread);
  872. #ifdef PROC_FS_JFS
  873. jfs_proc_clean();
  874. #endif
  875. unregister_filesystem(&jfs_fs_type);
  876. /*
  877. * Make sure all delayed rcu free inodes are flushed before we
  878. * destroy cache.
  879. */
  880. rcu_barrier();
  881. kmem_cache_destroy(jfs_inode_cachep);
  882. }
  883. module_init(init_jfs_fs)
  884. module_exit(exit_jfs_fs)