inode.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425
  1. /*
  2. * QNX4 file system, Linux implementation.
  3. *
  4. * Version : 0.2.1
  5. *
  6. * Using parts of the xiafs filesystem.
  7. *
  8. * History :
  9. *
  10. * 01-06-1998 by Richard Frowijn : first release.
  11. * 20-06-1998 by Frank Denis : Linux 2.1.99+ support, boot signature, misc.
  12. * 30-06-1998 by Frank Denis : first step to write inodes.
  13. */
  14. #include <linux/module.h>
  15. #include <linux/init.h>
  16. #include <linux/slab.h>
  17. #include <linux/highuid.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/buffer_head.h>
  20. #include <linux/writeback.h>
  21. #include <linux/statfs.h>
  22. #include "qnx4.h"
  23. #define QNX4_VERSION 4
  24. #define QNX4_BMNAME ".bitmap"
  25. static const struct super_operations qnx4_sops;
  26. static struct inode *qnx4_alloc_inode(struct super_block *sb);
  27. static void qnx4_destroy_inode(struct inode *inode);
  28. static int qnx4_remount(struct super_block *sb, int *flags, char *data);
  29. static int qnx4_statfs(struct dentry *, struct kstatfs *);
  30. static const struct super_operations qnx4_sops =
  31. {
  32. .alloc_inode = qnx4_alloc_inode,
  33. .destroy_inode = qnx4_destroy_inode,
  34. .statfs = qnx4_statfs,
  35. .remount_fs = qnx4_remount,
  36. };
  37. static int qnx4_remount(struct super_block *sb, int *flags, char *data)
  38. {
  39. struct qnx4_sb_info *qs;
  40. qs = qnx4_sb(sb);
  41. qs->Version = QNX4_VERSION;
  42. *flags |= MS_RDONLY;
  43. return 0;
  44. }
  45. static int qnx4_get_block( struct inode *inode, sector_t iblock, struct buffer_head *bh, int create )
  46. {
  47. unsigned long phys;
  48. QNX4DEBUG((KERN_INFO "qnx4: qnx4_get_block inode=[%ld] iblock=[%ld]\n",inode->i_ino,iblock));
  49. phys = qnx4_block_map( inode, iblock );
  50. if ( phys ) {
  51. // logical block is before EOF
  52. map_bh(bh, inode->i_sb, phys);
  53. }
  54. return 0;
  55. }
  56. static inline u32 try_extent(qnx4_xtnt_t *extent, u32 *offset)
  57. {
  58. u32 size = le32_to_cpu(extent->xtnt_size);
  59. if (*offset < size)
  60. return le32_to_cpu(extent->xtnt_blk) + *offset - 1;
  61. *offset -= size;
  62. return 0;
  63. }
  64. unsigned long qnx4_block_map( struct inode *inode, long iblock )
  65. {
  66. int ix;
  67. long i_xblk;
  68. struct buffer_head *bh = NULL;
  69. struct qnx4_xblk *xblk = NULL;
  70. struct qnx4_inode_entry *qnx4_inode = qnx4_raw_inode(inode);
  71. u16 nxtnt = le16_to_cpu(qnx4_inode->di_num_xtnts);
  72. u32 offset = iblock;
  73. u32 block = try_extent(&qnx4_inode->di_first_xtnt, &offset);
  74. if (block) {
  75. // iblock is in the first extent. This is easy.
  76. } else {
  77. // iblock is beyond first extent. We have to follow the extent chain.
  78. i_xblk = le32_to_cpu(qnx4_inode->di_xblk);
  79. ix = 0;
  80. while ( --nxtnt > 0 ) {
  81. if ( ix == 0 ) {
  82. // read next xtnt block.
  83. bh = sb_bread(inode->i_sb, i_xblk - 1);
  84. if ( !bh ) {
  85. QNX4DEBUG((KERN_ERR "qnx4: I/O error reading xtnt block [%ld])\n", i_xblk - 1));
  86. return -EIO;
  87. }
  88. xblk = (struct qnx4_xblk*)bh->b_data;
  89. if ( memcmp( xblk->xblk_signature, "IamXblk", 7 ) ) {
  90. QNX4DEBUG((KERN_ERR "qnx4: block at %ld is not a valid xtnt\n", qnx4_inode->i_xblk));
  91. return -EIO;
  92. }
  93. }
  94. block = try_extent(&xblk->xblk_xtnts[ix], &offset);
  95. if (block) {
  96. // got it!
  97. break;
  98. }
  99. if ( ++ix >= xblk->xblk_num_xtnts ) {
  100. i_xblk = le32_to_cpu(xblk->xblk_next_xblk);
  101. ix = 0;
  102. brelse( bh );
  103. bh = NULL;
  104. }
  105. }
  106. if ( bh )
  107. brelse( bh );
  108. }
  109. QNX4DEBUG((KERN_INFO "qnx4: mapping block %ld of inode %ld = %ld\n",iblock,inode->i_ino,block));
  110. return block;
  111. }
  112. static int qnx4_statfs(struct dentry *dentry, struct kstatfs *buf)
  113. {
  114. struct super_block *sb = dentry->d_sb;
  115. u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
  116. buf->f_type = sb->s_magic;
  117. buf->f_bsize = sb->s_blocksize;
  118. buf->f_blocks = le32_to_cpu(qnx4_sb(sb)->BitMap->di_size) * 8;
  119. buf->f_bfree = qnx4_count_free_blocks(sb);
  120. buf->f_bavail = buf->f_bfree;
  121. buf->f_namelen = QNX4_NAME_MAX;
  122. buf->f_fsid.val[0] = (u32)id;
  123. buf->f_fsid.val[1] = (u32)(id >> 32);
  124. return 0;
  125. }
  126. /*
  127. * Check the root directory of the filesystem to make sure
  128. * it really _is_ a qnx4 filesystem, and to check the size
  129. * of the directory entry.
  130. */
  131. static const char *qnx4_checkroot(struct super_block *sb,
  132. struct qnx4_super_block *s)
  133. {
  134. struct buffer_head *bh;
  135. struct qnx4_inode_entry *rootdir;
  136. int rd, rl;
  137. int i, j;
  138. if (s->RootDir.di_fname[0] != '/' || s->RootDir.di_fname[1] != '\0')
  139. return "no qnx4 filesystem (no root dir).";
  140. QNX4DEBUG((KERN_NOTICE "QNX4 filesystem found on dev %s.\n", sb->s_id));
  141. rd = le32_to_cpu(s->RootDir.di_first_xtnt.xtnt_blk) - 1;
  142. rl = le32_to_cpu(s->RootDir.di_first_xtnt.xtnt_size);
  143. for (j = 0; j < rl; j++) {
  144. bh = sb_bread(sb, rd + j); /* root dir, first block */
  145. if (bh == NULL)
  146. return "unable to read root entry.";
  147. rootdir = (struct qnx4_inode_entry *) bh->b_data;
  148. for (i = 0; i < QNX4_INODES_PER_BLOCK; i++, rootdir++) {
  149. QNX4DEBUG((KERN_INFO "rootdir entry found : [%s]\n", rootdir->di_fname));
  150. if (strcmp(rootdir->di_fname, QNX4_BMNAME) != 0)
  151. continue;
  152. qnx4_sb(sb)->BitMap = kmemdup(rootdir,
  153. sizeof(struct qnx4_inode_entry),
  154. GFP_KERNEL);
  155. brelse(bh);
  156. if (!qnx4_sb(sb)->BitMap)
  157. return "not enough memory for bitmap inode";
  158. /* keep bitmap inode known */
  159. return NULL;
  160. }
  161. brelse(bh);
  162. }
  163. return "bitmap file not found.";
  164. }
  165. static int qnx4_fill_super(struct super_block *s, void *data, int silent)
  166. {
  167. struct buffer_head *bh;
  168. struct inode *root;
  169. const char *errmsg;
  170. struct qnx4_sb_info *qs;
  171. qs = kzalloc(sizeof(struct qnx4_sb_info), GFP_KERNEL);
  172. if (!qs)
  173. return -ENOMEM;
  174. s->s_fs_info = qs;
  175. sb_set_blocksize(s, QNX4_BLOCK_SIZE);
  176. s->s_op = &qnx4_sops;
  177. s->s_magic = QNX4_SUPER_MAGIC;
  178. s->s_flags |= MS_RDONLY; /* Yup, read-only yet */
  179. /* Check the superblock signature. Since the qnx4 code is
  180. dangerous, we should leave as quickly as possible
  181. if we don't belong here... */
  182. bh = sb_bread(s, 1);
  183. if (!bh) {
  184. printk(KERN_ERR "qnx4: unable to read the superblock\n");
  185. return -EINVAL;
  186. }
  187. /* check before allocating dentries, inodes, .. */
  188. errmsg = qnx4_checkroot(s, (struct qnx4_super_block *) bh->b_data);
  189. brelse(bh);
  190. if (errmsg != NULL) {
  191. if (!silent)
  192. printk(KERN_ERR "qnx4: %s\n", errmsg);
  193. return -EINVAL;
  194. }
  195. /* does root not have inode number QNX4_ROOT_INO ?? */
  196. root = qnx4_iget(s, QNX4_ROOT_INO * QNX4_INODES_PER_BLOCK);
  197. if (IS_ERR(root)) {
  198. printk(KERN_ERR "qnx4: get inode failed\n");
  199. return PTR_ERR(root);
  200. }
  201. s->s_root = d_make_root(root);
  202. if (s->s_root == NULL)
  203. return -ENOMEM;
  204. return 0;
  205. }
  206. static void qnx4_kill_sb(struct super_block *sb)
  207. {
  208. struct qnx4_sb_info *qs = qnx4_sb(sb);
  209. kill_block_super(sb);
  210. if (qs) {
  211. kfree(qs->BitMap);
  212. kfree(qs);
  213. }
  214. }
  215. static int qnx4_readpage(struct file *file, struct page *page)
  216. {
  217. return block_read_full_page(page,qnx4_get_block);
  218. }
  219. static sector_t qnx4_bmap(struct address_space *mapping, sector_t block)
  220. {
  221. return generic_block_bmap(mapping,block,qnx4_get_block);
  222. }
  223. static const struct address_space_operations qnx4_aops = {
  224. .readpage = qnx4_readpage,
  225. .bmap = qnx4_bmap
  226. };
  227. struct inode *qnx4_iget(struct super_block *sb, unsigned long ino)
  228. {
  229. struct buffer_head *bh;
  230. struct qnx4_inode_entry *raw_inode;
  231. int block;
  232. struct qnx4_inode_entry *qnx4_inode;
  233. struct inode *inode;
  234. inode = iget_locked(sb, ino);
  235. if (!inode)
  236. return ERR_PTR(-ENOMEM);
  237. if (!(inode->i_state & I_NEW))
  238. return inode;
  239. qnx4_inode = qnx4_raw_inode(inode);
  240. inode->i_mode = 0;
  241. QNX4DEBUG((KERN_INFO "reading inode : [%d]\n", ino));
  242. if (!ino) {
  243. printk(KERN_ERR "qnx4: bad inode number on dev %s: %lu is "
  244. "out of range\n",
  245. sb->s_id, ino);
  246. iget_failed(inode);
  247. return ERR_PTR(-EIO);
  248. }
  249. block = ino / QNX4_INODES_PER_BLOCK;
  250. if (!(bh = sb_bread(sb, block))) {
  251. printk(KERN_ERR "qnx4: major problem: unable to read inode from dev "
  252. "%s\n", sb->s_id);
  253. iget_failed(inode);
  254. return ERR_PTR(-EIO);
  255. }
  256. raw_inode = ((struct qnx4_inode_entry *) bh->b_data) +
  257. (ino % QNX4_INODES_PER_BLOCK);
  258. inode->i_mode = le16_to_cpu(raw_inode->di_mode);
  259. i_uid_write(inode, (uid_t)le16_to_cpu(raw_inode->di_uid));
  260. i_gid_write(inode, (gid_t)le16_to_cpu(raw_inode->di_gid));
  261. set_nlink(inode, le16_to_cpu(raw_inode->di_nlink));
  262. inode->i_size = le32_to_cpu(raw_inode->di_size);
  263. inode->i_mtime.tv_sec = le32_to_cpu(raw_inode->di_mtime);
  264. inode->i_mtime.tv_nsec = 0;
  265. inode->i_atime.tv_sec = le32_to_cpu(raw_inode->di_atime);
  266. inode->i_atime.tv_nsec = 0;
  267. inode->i_ctime.tv_sec = le32_to_cpu(raw_inode->di_ctime);
  268. inode->i_ctime.tv_nsec = 0;
  269. inode->i_blocks = le32_to_cpu(raw_inode->di_first_xtnt.xtnt_size);
  270. memcpy(qnx4_inode, raw_inode, QNX4_DIR_ENTRY_SIZE);
  271. if (S_ISREG(inode->i_mode)) {
  272. inode->i_fop = &generic_ro_fops;
  273. inode->i_mapping->a_ops = &qnx4_aops;
  274. qnx4_i(inode)->mmu_private = inode->i_size;
  275. } else if (S_ISDIR(inode->i_mode)) {
  276. inode->i_op = &qnx4_dir_inode_operations;
  277. inode->i_fop = &qnx4_dir_operations;
  278. } else if (S_ISLNK(inode->i_mode)) {
  279. inode->i_op = &page_symlink_inode_operations;
  280. inode->i_mapping->a_ops = &qnx4_aops;
  281. qnx4_i(inode)->mmu_private = inode->i_size;
  282. } else {
  283. printk(KERN_ERR "qnx4: bad inode %lu on dev %s\n",
  284. ino, sb->s_id);
  285. iget_failed(inode);
  286. brelse(bh);
  287. return ERR_PTR(-EIO);
  288. }
  289. brelse(bh);
  290. unlock_new_inode(inode);
  291. return inode;
  292. }
  293. static struct kmem_cache *qnx4_inode_cachep;
  294. static struct inode *qnx4_alloc_inode(struct super_block *sb)
  295. {
  296. struct qnx4_inode_info *ei;
  297. ei = kmem_cache_alloc(qnx4_inode_cachep, GFP_KERNEL);
  298. if (!ei)
  299. return NULL;
  300. return &ei->vfs_inode;
  301. }
  302. static void qnx4_i_callback(struct rcu_head *head)
  303. {
  304. struct inode *inode = container_of(head, struct inode, i_rcu);
  305. kmem_cache_free(qnx4_inode_cachep, qnx4_i(inode));
  306. }
  307. static void qnx4_destroy_inode(struct inode *inode)
  308. {
  309. call_rcu(&inode->i_rcu, qnx4_i_callback);
  310. }
  311. static void init_once(void *foo)
  312. {
  313. struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo;
  314. inode_init_once(&ei->vfs_inode);
  315. }
  316. static int init_inodecache(void)
  317. {
  318. qnx4_inode_cachep = kmem_cache_create("qnx4_inode_cache",
  319. sizeof(struct qnx4_inode_info),
  320. 0, (SLAB_RECLAIM_ACCOUNT|
  321. SLAB_MEM_SPREAD),
  322. init_once);
  323. if (qnx4_inode_cachep == NULL)
  324. return -ENOMEM;
  325. return 0;
  326. }
  327. static void destroy_inodecache(void)
  328. {
  329. /*
  330. * Make sure all delayed rcu free inodes are flushed before we
  331. * destroy cache.
  332. */
  333. rcu_barrier();
  334. kmem_cache_destroy(qnx4_inode_cachep);
  335. }
  336. static struct dentry *qnx4_mount(struct file_system_type *fs_type,
  337. int flags, const char *dev_name, void *data)
  338. {
  339. return mount_bdev(fs_type, flags, dev_name, data, qnx4_fill_super);
  340. }
  341. static struct file_system_type qnx4_fs_type = {
  342. .owner = THIS_MODULE,
  343. .name = "qnx4",
  344. .mount = qnx4_mount,
  345. .kill_sb = qnx4_kill_sb,
  346. .fs_flags = FS_REQUIRES_DEV,
  347. };
  348. MODULE_ALIAS_FS("qnx4");
  349. static int __init init_qnx4_fs(void)
  350. {
  351. int err;
  352. err = init_inodecache();
  353. if (err)
  354. return err;
  355. err = register_filesystem(&qnx4_fs_type);
  356. if (err) {
  357. destroy_inodecache();
  358. return err;
  359. }
  360. printk(KERN_INFO "QNX4 filesystem 0.2.3 registered.\n");
  361. return 0;
  362. }
  363. static void __exit exit_qnx4_fs(void)
  364. {
  365. unregister_filesystem(&qnx4_fs_type);
  366. destroy_inodecache();
  367. }
  368. module_init(init_qnx4_fs)
  369. module_exit(exit_qnx4_fs)
  370. MODULE_LICENSE("GPL");