inode.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001
  1. /*
  2. * Compressed rom filesystem for Linux.
  3. *
  4. * Copyright (C) 1999 Linus Torvalds.
  5. *
  6. * This file is released under the GPL.
  7. */
  8. /*
  9. * These are the VFS interfaces to the compressed rom filesystem.
  10. * The actual compression is based on zlib, see the other files.
  11. */
  12. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13. #include <linux/module.h>
  14. #include <linux/fs.h>
  15. #include <linux/file.h>
  16. #include <linux/pagemap.h>
  17. #include <linux/pfn_t.h>
  18. #include <linux/ramfs.h>
  19. #include <linux/init.h>
  20. #include <linux/string.h>
  21. #include <linux/blkdev.h>
  22. #include <linux/mtd/mtd.h>
  23. #include <linux/mtd/super.h>
  24. #include <linux/slab.h>
  25. #include <linux/vfs.h>
  26. #include <linux/mutex.h>
  27. #include <uapi/linux/cramfs_fs.h>
  28. #include <linux/uaccess.h>
  29. #include "internal.h"
  30. /*
  31. * cramfs super-block data in memory
  32. */
  33. struct cramfs_sb_info {
  34. unsigned long magic;
  35. unsigned long size;
  36. unsigned long blocks;
  37. unsigned long files;
  38. unsigned long flags;
  39. void *linear_virt_addr;
  40. resource_size_t linear_phys_addr;
  41. size_t mtd_point_size;
  42. };
  43. static inline struct cramfs_sb_info *CRAMFS_SB(struct super_block *sb)
  44. {
  45. return sb->s_fs_info;
  46. }
  47. static const struct super_operations cramfs_ops;
  48. static const struct inode_operations cramfs_dir_inode_operations;
  49. static const struct file_operations cramfs_directory_operations;
  50. static const struct file_operations cramfs_physmem_fops;
  51. static const struct address_space_operations cramfs_aops;
  52. static DEFINE_MUTEX(read_mutex);
  53. /* These macros may change in future, to provide better st_ino semantics. */
  54. #define OFFSET(x) ((x)->i_ino)
  55. static unsigned long cramino(const struct cramfs_inode *cino, unsigned int offset)
  56. {
  57. if (!cino->offset)
  58. return offset + 1;
  59. if (!cino->size)
  60. return offset + 1;
  61. /*
  62. * The file mode test fixes buggy mkcramfs implementations where
  63. * cramfs_inode->offset is set to a non zero value for entries
  64. * which did not contain data, like devices node and fifos.
  65. */
  66. switch (cino->mode & S_IFMT) {
  67. case S_IFREG:
  68. case S_IFDIR:
  69. case S_IFLNK:
  70. return cino->offset << 2;
  71. default:
  72. break;
  73. }
  74. return offset + 1;
  75. }
  76. static struct inode *get_cramfs_inode(struct super_block *sb,
  77. const struct cramfs_inode *cramfs_inode, unsigned int offset)
  78. {
  79. struct inode *inode;
  80. static struct timespec64 zerotime;
  81. inode = iget_locked(sb, cramino(cramfs_inode, offset));
  82. if (!inode)
  83. return ERR_PTR(-ENOMEM);
  84. if (!(inode->i_state & I_NEW))
  85. return inode;
  86. switch (cramfs_inode->mode & S_IFMT) {
  87. case S_IFREG:
  88. inode->i_fop = &generic_ro_fops;
  89. inode->i_data.a_ops = &cramfs_aops;
  90. if (IS_ENABLED(CONFIG_CRAMFS_MTD) &&
  91. CRAMFS_SB(sb)->flags & CRAMFS_FLAG_EXT_BLOCK_POINTERS &&
  92. CRAMFS_SB(sb)->linear_phys_addr)
  93. inode->i_fop = &cramfs_physmem_fops;
  94. break;
  95. case S_IFDIR:
  96. inode->i_op = &cramfs_dir_inode_operations;
  97. inode->i_fop = &cramfs_directory_operations;
  98. break;
  99. case S_IFLNK:
  100. inode->i_op = &page_symlink_inode_operations;
  101. inode_nohighmem(inode);
  102. inode->i_data.a_ops = &cramfs_aops;
  103. break;
  104. default:
  105. init_special_inode(inode, cramfs_inode->mode,
  106. old_decode_dev(cramfs_inode->size));
  107. }
  108. inode->i_mode = cramfs_inode->mode;
  109. i_uid_write(inode, cramfs_inode->uid);
  110. i_gid_write(inode, cramfs_inode->gid);
  111. /* if the lower 2 bits are zero, the inode contains data */
  112. if (!(inode->i_ino & 3)) {
  113. inode->i_size = cramfs_inode->size;
  114. inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
  115. }
  116. /* Struct copy intentional */
  117. inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime;
  118. /* inode->i_nlink is left 1 - arguably wrong for directories,
  119. but it's the best we can do without reading the directory
  120. contents. 1 yields the right result in GNU find, even
  121. without -noleaf option. */
  122. unlock_new_inode(inode);
  123. return inode;
  124. }
  125. /*
  126. * We have our own block cache: don't fill up the buffer cache
  127. * with the rom-image, because the way the filesystem is set
  128. * up the accesses should be fairly regular and cached in the
  129. * page cache and dentry tree anyway..
  130. *
  131. * This also acts as a way to guarantee contiguous areas of up to
  132. * BLKS_PER_BUF*PAGE_SIZE, so that the caller doesn't need to
  133. * worry about end-of-buffer issues even when decompressing a full
  134. * page cache.
  135. *
  136. * Note: This is all optimized away at compile time when
  137. * CONFIG_CRAMFS_BLOCKDEV=n.
  138. */
  139. #define READ_BUFFERS (2)
  140. /* NEXT_BUFFER(): Loop over [0..(READ_BUFFERS-1)]. */
  141. #define NEXT_BUFFER(_ix) ((_ix) ^ 1)
  142. /*
  143. * BLKS_PER_BUF_SHIFT should be at least 2 to allow for "compressed"
  144. * data that takes up more space than the original and with unlucky
  145. * alignment.
  146. */
  147. #define BLKS_PER_BUF_SHIFT (2)
  148. #define BLKS_PER_BUF (1 << BLKS_PER_BUF_SHIFT)
  149. #define BUFFER_SIZE (BLKS_PER_BUF*PAGE_SIZE)
  150. static unsigned char read_buffers[READ_BUFFERS][BUFFER_SIZE];
  151. static unsigned buffer_blocknr[READ_BUFFERS];
  152. static struct super_block *buffer_dev[READ_BUFFERS];
  153. static int next_buffer;
  154. /*
  155. * Populate our block cache and return a pointer to it.
  156. */
  157. static void *cramfs_blkdev_read(struct super_block *sb, unsigned int offset,
  158. unsigned int len)
  159. {
  160. struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
  161. struct page *pages[BLKS_PER_BUF];
  162. unsigned i, blocknr, buffer;
  163. unsigned long devsize;
  164. char *data;
  165. if (!len)
  166. return NULL;
  167. blocknr = offset >> PAGE_SHIFT;
  168. offset &= PAGE_SIZE - 1;
  169. /* Check if an existing buffer already has the data.. */
  170. for (i = 0; i < READ_BUFFERS; i++) {
  171. unsigned int blk_offset;
  172. if (buffer_dev[i] != sb)
  173. continue;
  174. if (blocknr < buffer_blocknr[i])
  175. continue;
  176. blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_SHIFT;
  177. blk_offset += offset;
  178. if (blk_offset + len > BUFFER_SIZE)
  179. continue;
  180. return read_buffers[i] + blk_offset;
  181. }
  182. devsize = mapping->host->i_size >> PAGE_SHIFT;
  183. /* Ok, read in BLKS_PER_BUF pages completely first. */
  184. for (i = 0; i < BLKS_PER_BUF; i++) {
  185. struct page *page = NULL;
  186. if (blocknr + i < devsize) {
  187. page = read_mapping_page(mapping, blocknr + i, NULL);
  188. /* synchronous error? */
  189. if (IS_ERR(page))
  190. page = NULL;
  191. }
  192. pages[i] = page;
  193. }
  194. for (i = 0; i < BLKS_PER_BUF; i++) {
  195. struct page *page = pages[i];
  196. if (page) {
  197. wait_on_page_locked(page);
  198. if (!PageUptodate(page)) {
  199. /* asynchronous error */
  200. put_page(page);
  201. pages[i] = NULL;
  202. }
  203. }
  204. }
  205. buffer = next_buffer;
  206. next_buffer = NEXT_BUFFER(buffer);
  207. buffer_blocknr[buffer] = blocknr;
  208. buffer_dev[buffer] = sb;
  209. data = read_buffers[buffer];
  210. for (i = 0; i < BLKS_PER_BUF; i++) {
  211. struct page *page = pages[i];
  212. if (page) {
  213. memcpy(data, kmap(page), PAGE_SIZE);
  214. kunmap(page);
  215. put_page(page);
  216. } else
  217. memset(data, 0, PAGE_SIZE);
  218. data += PAGE_SIZE;
  219. }
  220. return read_buffers[buffer] + offset;
  221. }
  222. /*
  223. * Return a pointer to the linearly addressed cramfs image in memory.
  224. */
  225. static void *cramfs_direct_read(struct super_block *sb, unsigned int offset,
  226. unsigned int len)
  227. {
  228. struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
  229. if (!len)
  230. return NULL;
  231. if (len > sbi->size || offset > sbi->size - len)
  232. return page_address(ZERO_PAGE(0));
  233. return sbi->linear_virt_addr + offset;
  234. }
  235. /*
  236. * Returns a pointer to a buffer containing at least LEN bytes of
  237. * filesystem starting at byte offset OFFSET into the filesystem.
  238. */
  239. static void *cramfs_read(struct super_block *sb, unsigned int offset,
  240. unsigned int len)
  241. {
  242. struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
  243. if (IS_ENABLED(CONFIG_CRAMFS_MTD) && sbi->linear_virt_addr)
  244. return cramfs_direct_read(sb, offset, len);
  245. else if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV))
  246. return cramfs_blkdev_read(sb, offset, len);
  247. else
  248. return NULL;
  249. }
  250. /*
  251. * For a mapping to be possible, we need a range of uncompressed and
  252. * contiguous blocks. Return the offset for the first block and number of
  253. * valid blocks for which that is true, or zero otherwise.
  254. */
  255. static u32 cramfs_get_block_range(struct inode *inode, u32 pgoff, u32 *pages)
  256. {
  257. struct cramfs_sb_info *sbi = CRAMFS_SB(inode->i_sb);
  258. int i;
  259. u32 *blockptrs, first_block_addr;
  260. /*
  261. * We can dereference memory directly here as this code may be
  262. * reached only when there is a direct filesystem image mapping
  263. * available in memory.
  264. */
  265. blockptrs = (u32 *)(sbi->linear_virt_addr + OFFSET(inode) + pgoff * 4);
  266. first_block_addr = blockptrs[0] & ~CRAMFS_BLK_FLAGS;
  267. i = 0;
  268. do {
  269. u32 block_off = i * (PAGE_SIZE >> CRAMFS_BLK_DIRECT_PTR_SHIFT);
  270. u32 expect = (first_block_addr + block_off) |
  271. CRAMFS_BLK_FLAG_DIRECT_PTR |
  272. CRAMFS_BLK_FLAG_UNCOMPRESSED;
  273. if (blockptrs[i] != expect) {
  274. pr_debug("range: block %d/%d got %#x expects %#x\n",
  275. pgoff+i, pgoff + *pages - 1,
  276. blockptrs[i], expect);
  277. if (i == 0)
  278. return 0;
  279. break;
  280. }
  281. } while (++i < *pages);
  282. *pages = i;
  283. return first_block_addr << CRAMFS_BLK_DIRECT_PTR_SHIFT;
  284. }
  285. #ifdef CONFIG_MMU
  286. /*
  287. * Return true if the last page of a file in the filesystem image contains
  288. * some other data that doesn't belong to that file. It is assumed that the
  289. * last block is CRAMFS_BLK_FLAG_DIRECT_PTR | CRAMFS_BLK_FLAG_UNCOMPRESSED
  290. * (verified by cramfs_get_block_range() and directly accessible in memory.
  291. */
  292. static bool cramfs_last_page_is_shared(struct inode *inode)
  293. {
  294. struct cramfs_sb_info *sbi = CRAMFS_SB(inode->i_sb);
  295. u32 partial, last_page, blockaddr, *blockptrs;
  296. char *tail_data;
  297. partial = offset_in_page(inode->i_size);
  298. if (!partial)
  299. return false;
  300. last_page = inode->i_size >> PAGE_SHIFT;
  301. blockptrs = (u32 *)(sbi->linear_virt_addr + OFFSET(inode));
  302. blockaddr = blockptrs[last_page] & ~CRAMFS_BLK_FLAGS;
  303. blockaddr <<= CRAMFS_BLK_DIRECT_PTR_SHIFT;
  304. tail_data = sbi->linear_virt_addr + blockaddr + partial;
  305. return memchr_inv(tail_data, 0, PAGE_SIZE - partial) ? true : false;
  306. }
  307. static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
  308. {
  309. struct inode *inode = file_inode(file);
  310. struct cramfs_sb_info *sbi = CRAMFS_SB(inode->i_sb);
  311. unsigned int pages, max_pages, offset;
  312. unsigned long address, pgoff = vma->vm_pgoff;
  313. char *bailout_reason;
  314. int ret;
  315. ret = generic_file_readonly_mmap(file, vma);
  316. if (ret)
  317. return ret;
  318. /*
  319. * Now try to pre-populate ptes for this vma with a direct
  320. * mapping avoiding memory allocation when possible.
  321. */
  322. /* Could COW work here? */
  323. bailout_reason = "vma is writable";
  324. if (vma->vm_flags & VM_WRITE)
  325. goto bailout;
  326. max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  327. bailout_reason = "beyond file limit";
  328. if (pgoff >= max_pages)
  329. goto bailout;
  330. pages = min(vma_pages(vma), max_pages - pgoff);
  331. offset = cramfs_get_block_range(inode, pgoff, &pages);
  332. bailout_reason = "unsuitable block layout";
  333. if (!offset)
  334. goto bailout;
  335. address = sbi->linear_phys_addr + offset;
  336. bailout_reason = "data is not page aligned";
  337. if (!PAGE_ALIGNED(address))
  338. goto bailout;
  339. /* Don't map the last page if it contains some other data */
  340. if (pgoff + pages == max_pages && cramfs_last_page_is_shared(inode)) {
  341. pr_debug("mmap: %s: last page is shared\n",
  342. file_dentry(file)->d_name.name);
  343. pages--;
  344. }
  345. if (!pages) {
  346. bailout_reason = "no suitable block remaining";
  347. goto bailout;
  348. }
  349. if (pages == vma_pages(vma)) {
  350. /*
  351. * The entire vma is mappable. remap_pfn_range() will
  352. * make it distinguishable from a non-direct mapping
  353. * in /proc/<pid>/maps by substituting the file offset
  354. * with the actual physical address.
  355. */
  356. ret = remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT,
  357. pages * PAGE_SIZE, vma->vm_page_prot);
  358. } else {
  359. /*
  360. * Let's create a mixed map if we can't map it all.
  361. * The normal paging machinery will take care of the
  362. * unpopulated ptes via cramfs_readpage().
  363. */
  364. int i;
  365. vma->vm_flags |= VM_MIXEDMAP;
  366. for (i = 0; i < pages && !ret; i++) {
  367. unsigned long off = i * PAGE_SIZE;
  368. pfn_t pfn = phys_to_pfn_t(address + off, PFN_DEV);
  369. ret = vm_insert_mixed(vma, vma->vm_start + off, pfn);
  370. }
  371. }
  372. if (!ret)
  373. pr_debug("mapped %s[%lu] at 0x%08lx (%u/%lu pages) "
  374. "to vma 0x%08lx, page_prot 0x%llx\n",
  375. file_dentry(file)->d_name.name, pgoff,
  376. address, pages, vma_pages(vma), vma->vm_start,
  377. (unsigned long long)pgprot_val(vma->vm_page_prot));
  378. return ret;
  379. bailout:
  380. pr_debug("%s[%lu]: direct mmap impossible: %s\n",
  381. file_dentry(file)->d_name.name, pgoff, bailout_reason);
  382. /* Didn't manage any direct map, but normal paging is still possible */
  383. return 0;
  384. }
  385. #else /* CONFIG_MMU */
  386. static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
  387. {
  388. return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -ENOSYS;
  389. }
  390. static unsigned long cramfs_physmem_get_unmapped_area(struct file *file,
  391. unsigned long addr, unsigned long len,
  392. unsigned long pgoff, unsigned long flags)
  393. {
  394. struct inode *inode = file_inode(file);
  395. struct super_block *sb = inode->i_sb;
  396. struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
  397. unsigned int pages, block_pages, max_pages, offset;
  398. pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
  399. max_pages = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  400. if (pgoff >= max_pages || pages > max_pages - pgoff)
  401. return -EINVAL;
  402. block_pages = pages;
  403. offset = cramfs_get_block_range(inode, pgoff, &block_pages);
  404. if (!offset || block_pages != pages)
  405. return -ENOSYS;
  406. addr = sbi->linear_phys_addr + offset;
  407. pr_debug("get_unmapped for %s ofs %#lx siz %lu at 0x%08lx\n",
  408. file_dentry(file)->d_name.name, pgoff*PAGE_SIZE, len, addr);
  409. return addr;
  410. }
  411. static unsigned int cramfs_physmem_mmap_capabilities(struct file *file)
  412. {
  413. return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT |
  414. NOMMU_MAP_READ | NOMMU_MAP_EXEC;
  415. }
  416. #endif /* CONFIG_MMU */
  417. static const struct file_operations cramfs_physmem_fops = {
  418. .llseek = generic_file_llseek,
  419. .read_iter = generic_file_read_iter,
  420. .splice_read = generic_file_splice_read,
  421. .mmap = cramfs_physmem_mmap,
  422. #ifndef CONFIG_MMU
  423. .get_unmapped_area = cramfs_physmem_get_unmapped_area,
  424. .mmap_capabilities = cramfs_physmem_mmap_capabilities,
  425. #endif
  426. };
  427. static void cramfs_kill_sb(struct super_block *sb)
  428. {
  429. struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
  430. if (IS_ENABLED(CCONFIG_CRAMFS_MTD) && sb->s_mtd) {
  431. if (sbi && sbi->mtd_point_size)
  432. mtd_unpoint(sb->s_mtd, 0, sbi->mtd_point_size);
  433. kill_mtd_super(sb);
  434. } else if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV) && sb->s_bdev) {
  435. kill_block_super(sb);
  436. }
  437. kfree(sbi);
  438. }
  439. static int cramfs_remount(struct super_block *sb, int *flags, char *data)
  440. {
  441. sync_filesystem(sb);
  442. *flags |= SB_RDONLY;
  443. return 0;
  444. }
  445. static int cramfs_read_super(struct super_block *sb,
  446. struct cramfs_super *super, int silent)
  447. {
  448. struct cramfs_sb_info *sbi = CRAMFS_SB(sb);
  449. unsigned long root_offset;
  450. /* We don't know the real size yet */
  451. sbi->size = PAGE_SIZE;
  452. /* Read the first block and get the superblock from it */
  453. mutex_lock(&read_mutex);
  454. memcpy(super, cramfs_read(sb, 0, sizeof(*super)), sizeof(*super));
  455. mutex_unlock(&read_mutex);
  456. /* Do sanity checks on the superblock */
  457. if (super->magic != CRAMFS_MAGIC) {
  458. /* check for wrong endianness */
  459. if (super->magic == CRAMFS_MAGIC_WEND) {
  460. if (!silent)
  461. pr_err("wrong endianness\n");
  462. return -EINVAL;
  463. }
  464. /* check at 512 byte offset */
  465. mutex_lock(&read_mutex);
  466. memcpy(super,
  467. cramfs_read(sb, 512, sizeof(*super)),
  468. sizeof(*super));
  469. mutex_unlock(&read_mutex);
  470. if (super->magic != CRAMFS_MAGIC) {
  471. if (super->magic == CRAMFS_MAGIC_WEND && !silent)
  472. pr_err("wrong endianness\n");
  473. else if (!silent)
  474. pr_err("wrong magic\n");
  475. return -EINVAL;
  476. }
  477. }
  478. /* get feature flags first */
  479. if (super->flags & ~CRAMFS_SUPPORTED_FLAGS) {
  480. pr_err("unsupported filesystem features\n");
  481. return -EINVAL;
  482. }
  483. /* Check that the root inode is in a sane state */
  484. if (!S_ISDIR(super->root.mode)) {
  485. pr_err("root is not a directory\n");
  486. return -EINVAL;
  487. }
  488. /* correct strange, hard-coded permissions of mkcramfs */
  489. super->root.mode |= 0555;
  490. root_offset = super->root.offset << 2;
  491. if (super->flags & CRAMFS_FLAG_FSID_VERSION_2) {
  492. sbi->size = super->size;
  493. sbi->blocks = super->fsid.blocks;
  494. sbi->files = super->fsid.files;
  495. } else {
  496. sbi->size = 1<<28;
  497. sbi->blocks = 0;
  498. sbi->files = 0;
  499. }
  500. sbi->magic = super->magic;
  501. sbi->flags = super->flags;
  502. if (root_offset == 0)
  503. pr_info("empty filesystem");
  504. else if (!(super->flags & CRAMFS_FLAG_SHIFTED_ROOT_OFFSET) &&
  505. ((root_offset != sizeof(struct cramfs_super)) &&
  506. (root_offset != 512 + sizeof(struct cramfs_super))))
  507. {
  508. pr_err("bad root offset %lu\n", root_offset);
  509. return -EINVAL;
  510. }
  511. return 0;
  512. }
  513. static int cramfs_finalize_super(struct super_block *sb,
  514. struct cramfs_inode *cramfs_root)
  515. {
  516. struct inode *root;
  517. /* Set it all up.. */
  518. sb->s_flags |= SB_RDONLY;
  519. sb->s_op = &cramfs_ops;
  520. root = get_cramfs_inode(sb, cramfs_root, 0);
  521. if (IS_ERR(root))
  522. return PTR_ERR(root);
  523. sb->s_root = d_make_root(root);
  524. if (!sb->s_root)
  525. return -ENOMEM;
  526. return 0;
  527. }
  528. static int cramfs_blkdev_fill_super(struct super_block *sb, void *data,
  529. int silent)
  530. {
  531. struct cramfs_sb_info *sbi;
  532. struct cramfs_super super;
  533. int i, err;
  534. sbi = kzalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL);
  535. if (!sbi)
  536. return -ENOMEM;
  537. sb->s_fs_info = sbi;
  538. /* Invalidate the read buffers on mount: think disk change.. */
  539. for (i = 0; i < READ_BUFFERS; i++)
  540. buffer_blocknr[i] = -1;
  541. err = cramfs_read_super(sb, &super, silent);
  542. if (err)
  543. return err;
  544. return cramfs_finalize_super(sb, &super.root);
  545. }
  546. static int cramfs_mtd_fill_super(struct super_block *sb, void *data,
  547. int silent)
  548. {
  549. struct cramfs_sb_info *sbi;
  550. struct cramfs_super super;
  551. int err;
  552. sbi = kzalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL);
  553. if (!sbi)
  554. return -ENOMEM;
  555. sb->s_fs_info = sbi;
  556. /* Map only one page for now. Will remap it when fs size is known. */
  557. err = mtd_point(sb->s_mtd, 0, PAGE_SIZE, &sbi->mtd_point_size,
  558. &sbi->linear_virt_addr, &sbi->linear_phys_addr);
  559. if (err || sbi->mtd_point_size != PAGE_SIZE) {
  560. pr_err("unable to get direct memory access to mtd:%s\n",
  561. sb->s_mtd->name);
  562. return err ? : -ENODATA;
  563. }
  564. pr_info("checking physical address %pap for linear cramfs image\n",
  565. &sbi->linear_phys_addr);
  566. err = cramfs_read_super(sb, &super, silent);
  567. if (err)
  568. return err;
  569. /* Remap the whole filesystem now */
  570. pr_info("linear cramfs image on mtd:%s appears to be %lu KB in size\n",
  571. sb->s_mtd->name, sbi->size/1024);
  572. mtd_unpoint(sb->s_mtd, 0, PAGE_SIZE);
  573. err = mtd_point(sb->s_mtd, 0, sbi->size, &sbi->mtd_point_size,
  574. &sbi->linear_virt_addr, &sbi->linear_phys_addr);
  575. if (err || sbi->mtd_point_size != sbi->size) {
  576. pr_err("unable to get direct memory access to mtd:%s\n",
  577. sb->s_mtd->name);
  578. return err ? : -ENODATA;
  579. }
  580. return cramfs_finalize_super(sb, &super.root);
  581. }
  582. static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf)
  583. {
  584. struct super_block *sb = dentry->d_sb;
  585. u64 id = 0;
  586. if (sb->s_bdev)
  587. id = huge_encode_dev(sb->s_bdev->bd_dev);
  588. else if (sb->s_dev)
  589. id = huge_encode_dev(sb->s_dev);
  590. buf->f_type = CRAMFS_MAGIC;
  591. buf->f_bsize = PAGE_SIZE;
  592. buf->f_blocks = CRAMFS_SB(sb)->blocks;
  593. buf->f_bfree = 0;
  594. buf->f_bavail = 0;
  595. buf->f_files = CRAMFS_SB(sb)->files;
  596. buf->f_ffree = 0;
  597. buf->f_fsid.val[0] = (u32)id;
  598. buf->f_fsid.val[1] = (u32)(id >> 32);
  599. buf->f_namelen = CRAMFS_MAXPATHLEN;
  600. return 0;
  601. }
  602. /*
  603. * Read a cramfs directory entry.
  604. */
  605. static int cramfs_readdir(struct file *file, struct dir_context *ctx)
  606. {
  607. struct inode *inode = file_inode(file);
  608. struct super_block *sb = inode->i_sb;
  609. char *buf;
  610. unsigned int offset;
  611. /* Offset within the thing. */
  612. if (ctx->pos >= inode->i_size)
  613. return 0;
  614. offset = ctx->pos;
  615. /* Directory entries are always 4-byte aligned */
  616. if (offset & 3)
  617. return -EINVAL;
  618. buf = kmalloc(CRAMFS_MAXPATHLEN, GFP_KERNEL);
  619. if (!buf)
  620. return -ENOMEM;
  621. while (offset < inode->i_size) {
  622. struct cramfs_inode *de;
  623. unsigned long nextoffset;
  624. char *name;
  625. ino_t ino;
  626. umode_t mode;
  627. int namelen;
  628. mutex_lock(&read_mutex);
  629. de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+CRAMFS_MAXPATHLEN);
  630. name = (char *)(de+1);
  631. /*
  632. * Namelengths on disk are shifted by two
  633. * and the name padded out to 4-byte boundaries
  634. * with zeroes.
  635. */
  636. namelen = de->namelen << 2;
  637. memcpy(buf, name, namelen);
  638. ino = cramino(de, OFFSET(inode) + offset);
  639. mode = de->mode;
  640. mutex_unlock(&read_mutex);
  641. nextoffset = offset + sizeof(*de) + namelen;
  642. for (;;) {
  643. if (!namelen) {
  644. kfree(buf);
  645. return -EIO;
  646. }
  647. if (buf[namelen-1])
  648. break;
  649. namelen--;
  650. }
  651. if (!dir_emit(ctx, buf, namelen, ino, mode >> 12))
  652. break;
  653. ctx->pos = offset = nextoffset;
  654. }
  655. kfree(buf);
  656. return 0;
  657. }
  658. /*
  659. * Lookup and fill in the inode data..
  660. */
  661. static struct dentry *cramfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
  662. {
  663. unsigned int offset = 0;
  664. struct inode *inode = NULL;
  665. int sorted;
  666. mutex_lock(&read_mutex);
  667. sorted = CRAMFS_SB(dir->i_sb)->flags & CRAMFS_FLAG_SORTED_DIRS;
  668. while (offset < dir->i_size) {
  669. struct cramfs_inode *de;
  670. char *name;
  671. int namelen, retval;
  672. int dir_off = OFFSET(dir) + offset;
  673. de = cramfs_read(dir->i_sb, dir_off, sizeof(*de)+CRAMFS_MAXPATHLEN);
  674. name = (char *)(de+1);
  675. /* Try to take advantage of sorted directories */
  676. if (sorted && (dentry->d_name.name[0] < name[0]))
  677. break;
  678. namelen = de->namelen << 2;
  679. offset += sizeof(*de) + namelen;
  680. /* Quick check that the name is roughly the right length */
  681. if (((dentry->d_name.len + 3) & ~3) != namelen)
  682. continue;
  683. for (;;) {
  684. if (!namelen) {
  685. inode = ERR_PTR(-EIO);
  686. goto out;
  687. }
  688. if (name[namelen-1])
  689. break;
  690. namelen--;
  691. }
  692. if (namelen != dentry->d_name.len)
  693. continue;
  694. retval = memcmp(dentry->d_name.name, name, namelen);
  695. if (retval > 0)
  696. continue;
  697. if (!retval) {
  698. inode = get_cramfs_inode(dir->i_sb, de, dir_off);
  699. break;
  700. }
  701. /* else (retval < 0) */
  702. if (sorted)
  703. break;
  704. }
  705. out:
  706. mutex_unlock(&read_mutex);
  707. if (IS_ERR(inode))
  708. return ERR_CAST(inode);
  709. d_add(dentry, inode);
  710. return NULL;
  711. }
  712. static int cramfs_readpage(struct file *file, struct page *page)
  713. {
  714. struct inode *inode = page->mapping->host;
  715. u32 maxblock;
  716. int bytes_filled;
  717. void *pgdata;
  718. maxblock = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  719. bytes_filled = 0;
  720. pgdata = kmap(page);
  721. if (page->index < maxblock) {
  722. struct super_block *sb = inode->i_sb;
  723. u32 blkptr_offset = OFFSET(inode) + page->index * 4;
  724. u32 block_ptr, block_start, block_len;
  725. bool uncompressed, direct;
  726. mutex_lock(&read_mutex);
  727. block_ptr = *(u32 *) cramfs_read(sb, blkptr_offset, 4);
  728. uncompressed = (block_ptr & CRAMFS_BLK_FLAG_UNCOMPRESSED);
  729. direct = (block_ptr & CRAMFS_BLK_FLAG_DIRECT_PTR);
  730. block_ptr &= ~CRAMFS_BLK_FLAGS;
  731. if (direct) {
  732. /*
  733. * The block pointer is an absolute start pointer,
  734. * shifted by 2 bits. The size is included in the
  735. * first 2 bytes of the data block when compressed,
  736. * or PAGE_SIZE otherwise.
  737. */
  738. block_start = block_ptr << CRAMFS_BLK_DIRECT_PTR_SHIFT;
  739. if (uncompressed) {
  740. block_len = PAGE_SIZE;
  741. /* if last block: cap to file length */
  742. if (page->index == maxblock - 1)
  743. block_len =
  744. offset_in_page(inode->i_size);
  745. } else {
  746. block_len = *(u16 *)
  747. cramfs_read(sb, block_start, 2);
  748. block_start += 2;
  749. }
  750. } else {
  751. /*
  752. * The block pointer indicates one past the end of
  753. * the current block (start of next block). If this
  754. * is the first block then it starts where the block
  755. * pointer table ends, otherwise its start comes
  756. * from the previous block's pointer.
  757. */
  758. block_start = OFFSET(inode) + maxblock * 4;
  759. if (page->index)
  760. block_start = *(u32 *)
  761. cramfs_read(sb, blkptr_offset - 4, 4);
  762. /* Beware... previous ptr might be a direct ptr */
  763. if (unlikely(block_start & CRAMFS_BLK_FLAG_DIRECT_PTR)) {
  764. /* See comments on earlier code. */
  765. u32 prev_start = block_start;
  766. block_start = prev_start & ~CRAMFS_BLK_FLAGS;
  767. block_start <<= CRAMFS_BLK_DIRECT_PTR_SHIFT;
  768. if (prev_start & CRAMFS_BLK_FLAG_UNCOMPRESSED) {
  769. block_start += PAGE_SIZE;
  770. } else {
  771. block_len = *(u16 *)
  772. cramfs_read(sb, block_start, 2);
  773. block_start += 2 + block_len;
  774. }
  775. }
  776. block_start &= ~CRAMFS_BLK_FLAGS;
  777. block_len = block_ptr - block_start;
  778. }
  779. if (block_len == 0)
  780. ; /* hole */
  781. else if (unlikely(block_len > 2*PAGE_SIZE ||
  782. (uncompressed && block_len > PAGE_SIZE))) {
  783. mutex_unlock(&read_mutex);
  784. pr_err("bad data blocksize %u\n", block_len);
  785. goto err;
  786. } else if (uncompressed) {
  787. memcpy(pgdata,
  788. cramfs_read(sb, block_start, block_len),
  789. block_len);
  790. bytes_filled = block_len;
  791. } else {
  792. bytes_filled = cramfs_uncompress_block(pgdata,
  793. PAGE_SIZE,
  794. cramfs_read(sb, block_start, block_len),
  795. block_len);
  796. }
  797. mutex_unlock(&read_mutex);
  798. if (unlikely(bytes_filled < 0))
  799. goto err;
  800. }
  801. memset(pgdata + bytes_filled, 0, PAGE_SIZE - bytes_filled);
  802. flush_dcache_page(page);
  803. kunmap(page);
  804. SetPageUptodate(page);
  805. unlock_page(page);
  806. return 0;
  807. err:
  808. kunmap(page);
  809. ClearPageUptodate(page);
  810. SetPageError(page);
  811. unlock_page(page);
  812. return 0;
  813. }
  814. static const struct address_space_operations cramfs_aops = {
  815. .readpage = cramfs_readpage
  816. };
  817. /*
  818. * Our operations:
  819. */
  820. /*
  821. * A directory can only readdir
  822. */
  823. static const struct file_operations cramfs_directory_operations = {
  824. .llseek = generic_file_llseek,
  825. .read = generic_read_dir,
  826. .iterate_shared = cramfs_readdir,
  827. };
  828. static const struct inode_operations cramfs_dir_inode_operations = {
  829. .lookup = cramfs_lookup,
  830. };
  831. static const struct super_operations cramfs_ops = {
  832. .remount_fs = cramfs_remount,
  833. .statfs = cramfs_statfs,
  834. };
  835. static struct dentry *cramfs_mount(struct file_system_type *fs_type, int flags,
  836. const char *dev_name, void *data)
  837. {
  838. struct dentry *ret = ERR_PTR(-ENOPROTOOPT);
  839. if (IS_ENABLED(CONFIG_CRAMFS_MTD)) {
  840. ret = mount_mtd(fs_type, flags, dev_name, data,
  841. cramfs_mtd_fill_super);
  842. if (!IS_ERR(ret))
  843. return ret;
  844. }
  845. if (IS_ENABLED(CONFIG_CRAMFS_BLOCKDEV)) {
  846. ret = mount_bdev(fs_type, flags, dev_name, data,
  847. cramfs_blkdev_fill_super);
  848. }
  849. return ret;
  850. }
  851. static struct file_system_type cramfs_fs_type = {
  852. .owner = THIS_MODULE,
  853. .name = "cramfs",
  854. .mount = cramfs_mount,
  855. .kill_sb = cramfs_kill_sb,
  856. .fs_flags = FS_REQUIRES_DEV,
  857. };
  858. MODULE_ALIAS_FS("cramfs");
  859. static int __init init_cramfs_fs(void)
  860. {
  861. int rv;
  862. rv = cramfs_uncompress_init();
  863. if (rv < 0)
  864. return rv;
  865. rv = register_filesystem(&cramfs_fs_type);
  866. if (rv < 0)
  867. cramfs_uncompress_exit();
  868. return rv;
  869. }
  870. static void __exit exit_cramfs_fs(void)
  871. {
  872. cramfs_uncompress_exit();
  873. unregister_filesystem(&cramfs_fs_type);
  874. }
  875. module_init(init_cramfs_fs)
  876. module_exit(exit_cramfs_fs)
  877. MODULE_LICENSE("GPL");