file.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/fs/affs/file.c
  4. *
  5. * (c) 1996 Hans-Joachim Widmaier - Rewritten
  6. *
  7. * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem.
  8. *
  9. * (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem.
  10. *
  11. * (C) 1991 Linus Torvalds - minix filesystem
  12. *
  13. * affs regular file handling primitives
  14. */
  15. #include <linux/uio.h>
  16. #include "affs.h"
  17. static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
  18. static int
  19. affs_file_open(struct inode *inode, struct file *filp)
  20. {
  21. pr_debug("open(%lu,%d)\n",
  22. inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt));
  23. atomic_inc(&AFFS_I(inode)->i_opencnt);
  24. return 0;
  25. }
  26. static int
  27. affs_file_release(struct inode *inode, struct file *filp)
  28. {
  29. pr_debug("release(%lu, %d)\n",
  30. inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt));
  31. if (atomic_dec_and_test(&AFFS_I(inode)->i_opencnt)) {
  32. inode_lock(inode);
  33. if (inode->i_size != AFFS_I(inode)->mmu_private)
  34. affs_truncate(inode);
  35. affs_free_prealloc(inode);
  36. inode_unlock(inode);
  37. }
  38. return 0;
  39. }
  40. static int
  41. affs_grow_extcache(struct inode *inode, u32 lc_idx)
  42. {
  43. struct super_block *sb = inode->i_sb;
  44. struct buffer_head *bh;
  45. u32 lc_max;
  46. int i, j, key;
  47. if (!AFFS_I(inode)->i_lc) {
  48. char *ptr = (char *)get_zeroed_page(GFP_NOFS);
  49. if (!ptr)
  50. return -ENOMEM;
  51. AFFS_I(inode)->i_lc = (u32 *)ptr;
  52. AFFS_I(inode)->i_ac = (struct affs_ext_key *)(ptr + AFFS_CACHE_SIZE / 2);
  53. }
  54. lc_max = AFFS_LC_SIZE << AFFS_I(inode)->i_lc_shift;
  55. if (AFFS_I(inode)->i_extcnt > lc_max) {
  56. u32 lc_shift, lc_mask, tmp, off;
  57. /* need to recalculate linear cache, start from old size */
  58. lc_shift = AFFS_I(inode)->i_lc_shift;
  59. tmp = (AFFS_I(inode)->i_extcnt / AFFS_LC_SIZE) >> lc_shift;
  60. for (; tmp; tmp >>= 1)
  61. lc_shift++;
  62. lc_mask = (1 << lc_shift) - 1;
  63. /* fix idx and old size to new shift */
  64. lc_idx >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
  65. AFFS_I(inode)->i_lc_size >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
  66. /* first shrink old cache to make more space */
  67. off = 1 << (lc_shift - AFFS_I(inode)->i_lc_shift);
  68. for (i = 1, j = off; j < AFFS_LC_SIZE; i++, j += off)
  69. AFFS_I(inode)->i_ac[i] = AFFS_I(inode)->i_ac[j];
  70. AFFS_I(inode)->i_lc_shift = lc_shift;
  71. AFFS_I(inode)->i_lc_mask = lc_mask;
  72. }
  73. /* fill cache to the needed index */
  74. i = AFFS_I(inode)->i_lc_size;
  75. AFFS_I(inode)->i_lc_size = lc_idx + 1;
  76. for (; i <= lc_idx; i++) {
  77. if (!i) {
  78. AFFS_I(inode)->i_lc[0] = inode->i_ino;
  79. continue;
  80. }
  81. key = AFFS_I(inode)->i_lc[i - 1];
  82. j = AFFS_I(inode)->i_lc_mask + 1;
  83. // unlock cache
  84. for (; j > 0; j--) {
  85. bh = affs_bread(sb, key);
  86. if (!bh)
  87. goto err;
  88. key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  89. affs_brelse(bh);
  90. }
  91. // lock cache
  92. AFFS_I(inode)->i_lc[i] = key;
  93. }
  94. return 0;
  95. err:
  96. // lock cache
  97. return -EIO;
  98. }
  99. static struct buffer_head *
  100. affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext)
  101. {
  102. struct super_block *sb = inode->i_sb;
  103. struct buffer_head *new_bh;
  104. u32 blocknr, tmp;
  105. blocknr = affs_alloc_block(inode, bh->b_blocknr);
  106. if (!blocknr)
  107. return ERR_PTR(-ENOSPC);
  108. new_bh = affs_getzeroblk(sb, blocknr);
  109. if (!new_bh) {
  110. affs_free_block(sb, blocknr);
  111. return ERR_PTR(-EIO);
  112. }
  113. AFFS_HEAD(new_bh)->ptype = cpu_to_be32(T_LIST);
  114. AFFS_HEAD(new_bh)->key = cpu_to_be32(blocknr);
  115. AFFS_TAIL(sb, new_bh)->stype = cpu_to_be32(ST_FILE);
  116. AFFS_TAIL(sb, new_bh)->parent = cpu_to_be32(inode->i_ino);
  117. affs_fix_checksum(sb, new_bh);
  118. mark_buffer_dirty_inode(new_bh, inode);
  119. tmp = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  120. if (tmp)
  121. affs_warning(sb, "alloc_ext", "previous extension set (%x)", tmp);
  122. AFFS_TAIL(sb, bh)->extension = cpu_to_be32(blocknr);
  123. affs_adjust_checksum(bh, blocknr - tmp);
  124. mark_buffer_dirty_inode(bh, inode);
  125. AFFS_I(inode)->i_extcnt++;
  126. mark_inode_dirty(inode);
  127. return new_bh;
  128. }
  129. static inline struct buffer_head *
  130. affs_get_extblock(struct inode *inode, u32 ext)
  131. {
  132. /* inline the simplest case: same extended block as last time */
  133. struct buffer_head *bh = AFFS_I(inode)->i_ext_bh;
  134. if (ext == AFFS_I(inode)->i_ext_last)
  135. get_bh(bh);
  136. else
  137. /* we have to do more (not inlined) */
  138. bh = affs_get_extblock_slow(inode, ext);
  139. return bh;
  140. }
  141. static struct buffer_head *
  142. affs_get_extblock_slow(struct inode *inode, u32 ext)
  143. {
  144. struct super_block *sb = inode->i_sb;
  145. struct buffer_head *bh;
  146. u32 ext_key;
  147. u32 lc_idx, lc_off, ac_idx;
  148. u32 tmp, idx;
  149. if (ext == AFFS_I(inode)->i_ext_last + 1) {
  150. /* read the next extended block from the current one */
  151. bh = AFFS_I(inode)->i_ext_bh;
  152. ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  153. if (ext < AFFS_I(inode)->i_extcnt)
  154. goto read_ext;
  155. BUG_ON(ext > AFFS_I(inode)->i_extcnt);
  156. bh = affs_alloc_extblock(inode, bh, ext);
  157. if (IS_ERR(bh))
  158. return bh;
  159. goto store_ext;
  160. }
  161. if (ext == 0) {
  162. /* we seek back to the file header block */
  163. ext_key = inode->i_ino;
  164. goto read_ext;
  165. }
  166. if (ext >= AFFS_I(inode)->i_extcnt) {
  167. struct buffer_head *prev_bh;
  168. /* allocate a new extended block */
  169. BUG_ON(ext > AFFS_I(inode)->i_extcnt);
  170. /* get previous extended block */
  171. prev_bh = affs_get_extblock(inode, ext - 1);
  172. if (IS_ERR(prev_bh))
  173. return prev_bh;
  174. bh = affs_alloc_extblock(inode, prev_bh, ext);
  175. affs_brelse(prev_bh);
  176. if (IS_ERR(bh))
  177. return bh;
  178. goto store_ext;
  179. }
  180. again:
  181. /* check if there is an extended cache and whether it's large enough */
  182. lc_idx = ext >> AFFS_I(inode)->i_lc_shift;
  183. lc_off = ext & AFFS_I(inode)->i_lc_mask;
  184. if (lc_idx >= AFFS_I(inode)->i_lc_size) {
  185. int err;
  186. err = affs_grow_extcache(inode, lc_idx);
  187. if (err)
  188. return ERR_PTR(err);
  189. goto again;
  190. }
  191. /* every n'th key we find in the linear cache */
  192. if (!lc_off) {
  193. ext_key = AFFS_I(inode)->i_lc[lc_idx];
  194. goto read_ext;
  195. }
  196. /* maybe it's still in the associative cache */
  197. ac_idx = (ext - lc_idx - 1) & AFFS_AC_MASK;
  198. if (AFFS_I(inode)->i_ac[ac_idx].ext == ext) {
  199. ext_key = AFFS_I(inode)->i_ac[ac_idx].key;
  200. goto read_ext;
  201. }
  202. /* try to find one of the previous extended blocks */
  203. tmp = ext;
  204. idx = ac_idx;
  205. while (--tmp, --lc_off > 0) {
  206. idx = (idx - 1) & AFFS_AC_MASK;
  207. if (AFFS_I(inode)->i_ac[idx].ext == tmp) {
  208. ext_key = AFFS_I(inode)->i_ac[idx].key;
  209. goto find_ext;
  210. }
  211. }
  212. /* fall back to the linear cache */
  213. ext_key = AFFS_I(inode)->i_lc[lc_idx];
  214. find_ext:
  215. /* read all extended blocks until we find the one we need */
  216. //unlock cache
  217. do {
  218. bh = affs_bread(sb, ext_key);
  219. if (!bh)
  220. goto err_bread;
  221. ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  222. affs_brelse(bh);
  223. tmp++;
  224. } while (tmp < ext);
  225. //lock cache
  226. /* store it in the associative cache */
  227. // recalculate ac_idx?
  228. AFFS_I(inode)->i_ac[ac_idx].ext = ext;
  229. AFFS_I(inode)->i_ac[ac_idx].key = ext_key;
  230. read_ext:
  231. /* finally read the right extended block */
  232. //unlock cache
  233. bh = affs_bread(sb, ext_key);
  234. if (!bh)
  235. goto err_bread;
  236. //lock cache
  237. store_ext:
  238. /* release old cached extended block and store the new one */
  239. affs_brelse(AFFS_I(inode)->i_ext_bh);
  240. AFFS_I(inode)->i_ext_last = ext;
  241. AFFS_I(inode)->i_ext_bh = bh;
  242. get_bh(bh);
  243. return bh;
  244. err_bread:
  245. affs_brelse(bh);
  246. return ERR_PTR(-EIO);
  247. }
  248. static int
  249. affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
  250. {
  251. struct super_block *sb = inode->i_sb;
  252. struct buffer_head *ext_bh;
  253. u32 ext;
  254. pr_debug("%s(%lu, %llu)\n", __func__, inode->i_ino,
  255. (unsigned long long)block);
  256. BUG_ON(block > (sector_t)0x7fffffffUL);
  257. if (block >= AFFS_I(inode)->i_blkcnt) {
  258. if (block > AFFS_I(inode)->i_blkcnt || !create)
  259. goto err_big;
  260. } else
  261. create = 0;
  262. //lock cache
  263. affs_lock_ext(inode);
  264. ext = (u32)block / AFFS_SB(sb)->s_hashsize;
  265. block -= ext * AFFS_SB(sb)->s_hashsize;
  266. ext_bh = affs_get_extblock(inode, ext);
  267. if (IS_ERR(ext_bh))
  268. goto err_ext;
  269. map_bh(bh_result, sb, (sector_t)be32_to_cpu(AFFS_BLOCK(sb, ext_bh, block)));
  270. if (create) {
  271. u32 blocknr = affs_alloc_block(inode, ext_bh->b_blocknr);
  272. if (!blocknr)
  273. goto err_alloc;
  274. set_buffer_new(bh_result);
  275. AFFS_I(inode)->mmu_private += AFFS_SB(sb)->s_data_blksize;
  276. AFFS_I(inode)->i_blkcnt++;
  277. /* store new block */
  278. if (bh_result->b_blocknr)
  279. affs_warning(sb, "get_block",
  280. "block already set (%llx)",
  281. (unsigned long long)bh_result->b_blocknr);
  282. AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr);
  283. AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1);
  284. affs_adjust_checksum(ext_bh, blocknr - bh_result->b_blocknr + 1);
  285. bh_result->b_blocknr = blocknr;
  286. if (!block) {
  287. /* insert first block into header block */
  288. u32 tmp = be32_to_cpu(AFFS_HEAD(ext_bh)->first_data);
  289. if (tmp)
  290. affs_warning(sb, "get_block", "first block already set (%d)", tmp);
  291. AFFS_HEAD(ext_bh)->first_data = cpu_to_be32(blocknr);
  292. affs_adjust_checksum(ext_bh, blocknr - tmp);
  293. }
  294. }
  295. affs_brelse(ext_bh);
  296. //unlock cache
  297. affs_unlock_ext(inode);
  298. return 0;
  299. err_big:
  300. affs_error(inode->i_sb, "get_block", "strange block request %llu",
  301. (unsigned long long)block);
  302. return -EIO;
  303. err_ext:
  304. // unlock cache
  305. affs_unlock_ext(inode);
  306. return PTR_ERR(ext_bh);
  307. err_alloc:
  308. brelse(ext_bh);
  309. clear_buffer_mapped(bh_result);
  310. bh_result->b_bdev = NULL;
  311. // unlock cache
  312. affs_unlock_ext(inode);
  313. return -ENOSPC;
  314. }
  315. static int affs_writepage(struct page *page, struct writeback_control *wbc)
  316. {
  317. return block_write_full_page(page, affs_get_block, wbc);
  318. }
  319. static int affs_readpage(struct file *file, struct page *page)
  320. {
  321. return block_read_full_page(page, affs_get_block);
  322. }
  323. static void affs_write_failed(struct address_space *mapping, loff_t to)
  324. {
  325. struct inode *inode = mapping->host;
  326. if (to > inode->i_size) {
  327. truncate_pagecache(inode, inode->i_size);
  328. affs_truncate(inode);
  329. }
  330. }
  331. static ssize_t
  332. affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
  333. {
  334. struct file *file = iocb->ki_filp;
  335. struct address_space *mapping = file->f_mapping;
  336. struct inode *inode = mapping->host;
  337. size_t count = iov_iter_count(iter);
  338. loff_t offset = iocb->ki_pos;
  339. ssize_t ret;
  340. if (iov_iter_rw(iter) == WRITE) {
  341. loff_t size = offset + count;
  342. if (AFFS_I(inode)->mmu_private < size)
  343. return 0;
  344. }
  345. ret = blockdev_direct_IO(iocb, inode, iter, affs_get_block);
  346. if (ret < 0 && iov_iter_rw(iter) == WRITE)
  347. affs_write_failed(mapping, offset + count);
  348. return ret;
  349. }
  350. static int affs_write_begin(struct file *file, struct address_space *mapping,
  351. loff_t pos, unsigned len, unsigned flags,
  352. struct page **pagep, void **fsdata)
  353. {
  354. int ret;
  355. *pagep = NULL;
  356. ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
  357. affs_get_block,
  358. &AFFS_I(mapping->host)->mmu_private);
  359. if (unlikely(ret))
  360. affs_write_failed(mapping, pos + len);
  361. return ret;
  362. }
  363. static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
  364. {
  365. return generic_block_bmap(mapping,block,affs_get_block);
  366. }
  367. const struct address_space_operations affs_aops = {
  368. .readpage = affs_readpage,
  369. .writepage = affs_writepage,
  370. .write_begin = affs_write_begin,
  371. .write_end = generic_write_end,
  372. .direct_IO = affs_direct_IO,
  373. .bmap = _affs_bmap
  374. };
  375. static inline struct buffer_head *
  376. affs_bread_ino(struct inode *inode, int block, int create)
  377. {
  378. struct buffer_head *bh, tmp_bh;
  379. int err;
  380. tmp_bh.b_state = 0;
  381. err = affs_get_block(inode, block, &tmp_bh, create);
  382. if (!err) {
  383. bh = affs_bread(inode->i_sb, tmp_bh.b_blocknr);
  384. if (bh) {
  385. bh->b_state |= tmp_bh.b_state;
  386. return bh;
  387. }
  388. err = -EIO;
  389. }
  390. return ERR_PTR(err);
  391. }
  392. static inline struct buffer_head *
  393. affs_getzeroblk_ino(struct inode *inode, int block)
  394. {
  395. struct buffer_head *bh, tmp_bh;
  396. int err;
  397. tmp_bh.b_state = 0;
  398. err = affs_get_block(inode, block, &tmp_bh, 1);
  399. if (!err) {
  400. bh = affs_getzeroblk(inode->i_sb, tmp_bh.b_blocknr);
  401. if (bh) {
  402. bh->b_state |= tmp_bh.b_state;
  403. return bh;
  404. }
  405. err = -EIO;
  406. }
  407. return ERR_PTR(err);
  408. }
  409. static inline struct buffer_head *
  410. affs_getemptyblk_ino(struct inode *inode, int block)
  411. {
  412. struct buffer_head *bh, tmp_bh;
  413. int err;
  414. tmp_bh.b_state = 0;
  415. err = affs_get_block(inode, block, &tmp_bh, 1);
  416. if (!err) {
  417. bh = affs_getemptyblk(inode->i_sb, tmp_bh.b_blocknr);
  418. if (bh) {
  419. bh->b_state |= tmp_bh.b_state;
  420. return bh;
  421. }
  422. err = -EIO;
  423. }
  424. return ERR_PTR(err);
  425. }
  426. static int
  427. affs_do_readpage_ofs(struct page *page, unsigned to, int create)
  428. {
  429. struct inode *inode = page->mapping->host;
  430. struct super_block *sb = inode->i_sb;
  431. struct buffer_head *bh;
  432. char *data;
  433. unsigned pos = 0;
  434. u32 bidx, boff, bsize;
  435. u32 tmp;
  436. pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino,
  437. page->index, to);
  438. BUG_ON(to > PAGE_SIZE);
  439. bsize = AFFS_SB(sb)->s_data_blksize;
  440. tmp = page->index << PAGE_SHIFT;
  441. bidx = tmp / bsize;
  442. boff = tmp % bsize;
  443. while (pos < to) {
  444. bh = affs_bread_ino(inode, bidx, create);
  445. if (IS_ERR(bh))
  446. return PTR_ERR(bh);
  447. tmp = min(bsize - boff, to - pos);
  448. BUG_ON(pos + tmp > to || tmp > bsize);
  449. data = kmap_atomic(page);
  450. memcpy(data + pos, AFFS_DATA(bh) + boff, tmp);
  451. kunmap_atomic(data);
  452. affs_brelse(bh);
  453. bidx++;
  454. pos += tmp;
  455. boff = 0;
  456. }
  457. flush_dcache_page(page);
  458. return 0;
  459. }
  460. static int
  461. affs_extent_file_ofs(struct inode *inode, u32 newsize)
  462. {
  463. struct super_block *sb = inode->i_sb;
  464. struct buffer_head *bh, *prev_bh;
  465. u32 bidx, boff;
  466. u32 size, bsize;
  467. u32 tmp;
  468. pr_debug("%s(%lu, %d)\n", __func__, inode->i_ino, newsize);
  469. bsize = AFFS_SB(sb)->s_data_blksize;
  470. bh = NULL;
  471. size = AFFS_I(inode)->mmu_private;
  472. bidx = size / bsize;
  473. boff = size % bsize;
  474. if (boff) {
  475. bh = affs_bread_ino(inode, bidx, 0);
  476. if (IS_ERR(bh))
  477. return PTR_ERR(bh);
  478. tmp = min(bsize - boff, newsize - size);
  479. BUG_ON(boff + tmp > bsize || tmp > bsize);
  480. memset(AFFS_DATA(bh) + boff, 0, tmp);
  481. be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
  482. affs_fix_checksum(sb, bh);
  483. mark_buffer_dirty_inode(bh, inode);
  484. size += tmp;
  485. bidx++;
  486. } else if (bidx) {
  487. bh = affs_bread_ino(inode, bidx - 1, 0);
  488. if (IS_ERR(bh))
  489. return PTR_ERR(bh);
  490. }
  491. while (size < newsize) {
  492. prev_bh = bh;
  493. bh = affs_getzeroblk_ino(inode, bidx);
  494. if (IS_ERR(bh))
  495. goto out;
  496. tmp = min(bsize, newsize - size);
  497. BUG_ON(tmp > bsize);
  498. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  499. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  500. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  501. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  502. affs_fix_checksum(sb, bh);
  503. bh->b_state &= ~(1UL << BH_New);
  504. mark_buffer_dirty_inode(bh, inode);
  505. if (prev_bh) {
  506. u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  507. if (tmp_next)
  508. affs_warning(sb, "extent_file_ofs",
  509. "next block already set for %d (%d)",
  510. bidx, tmp_next);
  511. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  512. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next);
  513. mark_buffer_dirty_inode(prev_bh, inode);
  514. affs_brelse(prev_bh);
  515. }
  516. size += bsize;
  517. bidx++;
  518. }
  519. affs_brelse(bh);
  520. inode->i_size = AFFS_I(inode)->mmu_private = newsize;
  521. return 0;
  522. out:
  523. inode->i_size = AFFS_I(inode)->mmu_private = newsize;
  524. return PTR_ERR(bh);
  525. }
  526. static int
  527. affs_readpage_ofs(struct file *file, struct page *page)
  528. {
  529. struct inode *inode = page->mapping->host;
  530. u32 to;
  531. int err;
  532. pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, page->index);
  533. to = PAGE_SIZE;
  534. if (((page->index + 1) << PAGE_SHIFT) > inode->i_size) {
  535. to = inode->i_size & ~PAGE_MASK;
  536. memset(page_address(page) + to, 0, PAGE_SIZE - to);
  537. }
  538. err = affs_do_readpage_ofs(page, to, 0);
  539. if (!err)
  540. SetPageUptodate(page);
  541. unlock_page(page);
  542. return err;
  543. }
  544. static int affs_write_begin_ofs(struct file *file, struct address_space *mapping,
  545. loff_t pos, unsigned len, unsigned flags,
  546. struct page **pagep, void **fsdata)
  547. {
  548. struct inode *inode = mapping->host;
  549. struct page *page;
  550. pgoff_t index;
  551. int err = 0;
  552. pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos,
  553. pos + len);
  554. if (pos > AFFS_I(inode)->mmu_private) {
  555. /* XXX: this probably leaves a too-big i_size in case of
  556. * failure. Should really be updating i_size at write_end time
  557. */
  558. err = affs_extent_file_ofs(inode, pos);
  559. if (err)
  560. return err;
  561. }
  562. index = pos >> PAGE_SHIFT;
  563. page = grab_cache_page_write_begin(mapping, index, flags);
  564. if (!page)
  565. return -ENOMEM;
  566. *pagep = page;
  567. if (PageUptodate(page))
  568. return 0;
  569. /* XXX: inefficient but safe in the face of short writes */
  570. err = affs_do_readpage_ofs(page, PAGE_SIZE, 1);
  571. if (err) {
  572. unlock_page(page);
  573. put_page(page);
  574. }
  575. return err;
  576. }
  577. static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
  578. loff_t pos, unsigned len, unsigned copied,
  579. struct page *page, void *fsdata)
  580. {
  581. struct inode *inode = mapping->host;
  582. struct super_block *sb = inode->i_sb;
  583. struct buffer_head *bh, *prev_bh;
  584. char *data;
  585. u32 bidx, boff, bsize;
  586. unsigned from, to;
  587. u32 tmp;
  588. int written;
  589. from = pos & (PAGE_SIZE - 1);
  590. to = from + len;
  591. /*
  592. * XXX: not sure if this can handle short copies (len < copied), but
  593. * we don't have to, because the page should always be uptodate here,
  594. * due to write_begin.
  595. */
  596. pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos,
  597. pos + len);
  598. bsize = AFFS_SB(sb)->s_data_blksize;
  599. data = page_address(page);
  600. bh = NULL;
  601. written = 0;
  602. tmp = (page->index << PAGE_SHIFT) + from;
  603. bidx = tmp / bsize;
  604. boff = tmp % bsize;
  605. if (boff) {
  606. bh = affs_bread_ino(inode, bidx, 0);
  607. if (IS_ERR(bh)) {
  608. written = PTR_ERR(bh);
  609. goto err_first_bh;
  610. }
  611. tmp = min(bsize - boff, to - from);
  612. BUG_ON(boff + tmp > bsize || tmp > bsize);
  613. memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
  614. be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
  615. affs_fix_checksum(sb, bh);
  616. mark_buffer_dirty_inode(bh, inode);
  617. written += tmp;
  618. from += tmp;
  619. bidx++;
  620. } else if (bidx) {
  621. bh = affs_bread_ino(inode, bidx - 1, 0);
  622. if (IS_ERR(bh)) {
  623. written = PTR_ERR(bh);
  624. goto err_first_bh;
  625. }
  626. }
  627. while (from + bsize <= to) {
  628. prev_bh = bh;
  629. bh = affs_getemptyblk_ino(inode, bidx);
  630. if (IS_ERR(bh))
  631. goto err_bh;
  632. memcpy(AFFS_DATA(bh), data + from, bsize);
  633. if (buffer_new(bh)) {
  634. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  635. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  636. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  637. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize);
  638. AFFS_DATA_HEAD(bh)->next = 0;
  639. bh->b_state &= ~(1UL << BH_New);
  640. if (prev_bh) {
  641. u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  642. if (tmp_next)
  643. affs_warning(sb, "commit_write_ofs",
  644. "next block already set for %d (%d)",
  645. bidx, tmp_next);
  646. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  647. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next);
  648. mark_buffer_dirty_inode(prev_bh, inode);
  649. }
  650. }
  651. affs_brelse(prev_bh);
  652. affs_fix_checksum(sb, bh);
  653. mark_buffer_dirty_inode(bh, inode);
  654. written += bsize;
  655. from += bsize;
  656. bidx++;
  657. }
  658. if (from < to) {
  659. prev_bh = bh;
  660. bh = affs_bread_ino(inode, bidx, 1);
  661. if (IS_ERR(bh))
  662. goto err_bh;
  663. tmp = min(bsize, to - from);
  664. BUG_ON(tmp > bsize);
  665. memcpy(AFFS_DATA(bh), data + from, tmp);
  666. if (buffer_new(bh)) {
  667. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  668. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  669. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  670. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  671. AFFS_DATA_HEAD(bh)->next = 0;
  672. bh->b_state &= ~(1UL << BH_New);
  673. if (prev_bh) {
  674. u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  675. if (tmp_next)
  676. affs_warning(sb, "commit_write_ofs",
  677. "next block already set for %d (%d)",
  678. bidx, tmp_next);
  679. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  680. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next);
  681. mark_buffer_dirty_inode(prev_bh, inode);
  682. }
  683. } else if (be32_to_cpu(AFFS_DATA_HEAD(bh)->size) < tmp)
  684. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  685. affs_brelse(prev_bh);
  686. affs_fix_checksum(sb, bh);
  687. mark_buffer_dirty_inode(bh, inode);
  688. written += tmp;
  689. from += tmp;
  690. bidx++;
  691. }
  692. SetPageUptodate(page);
  693. done:
  694. affs_brelse(bh);
  695. tmp = (page->index << PAGE_SHIFT) + from;
  696. if (tmp > inode->i_size)
  697. inode->i_size = AFFS_I(inode)->mmu_private = tmp;
  698. err_first_bh:
  699. unlock_page(page);
  700. put_page(page);
  701. return written;
  702. err_bh:
  703. bh = prev_bh;
  704. if (!written)
  705. written = PTR_ERR(bh);
  706. goto done;
  707. }
  708. const struct address_space_operations affs_aops_ofs = {
  709. .readpage = affs_readpage_ofs,
  710. //.writepage = affs_writepage_ofs,
  711. .write_begin = affs_write_begin_ofs,
  712. .write_end = affs_write_end_ofs
  713. };
  714. /* Free any preallocated blocks. */
  715. void
  716. affs_free_prealloc(struct inode *inode)
  717. {
  718. struct super_block *sb = inode->i_sb;
  719. pr_debug("free_prealloc(ino=%lu)\n", inode->i_ino);
  720. while (AFFS_I(inode)->i_pa_cnt) {
  721. AFFS_I(inode)->i_pa_cnt--;
  722. affs_free_block(sb, ++AFFS_I(inode)->i_lastalloc);
  723. }
  724. }
  725. /* Truncate (or enlarge) a file to the requested size. */
  726. void
  727. affs_truncate(struct inode *inode)
  728. {
  729. struct super_block *sb = inode->i_sb;
  730. u32 ext, ext_key;
  731. u32 last_blk, blkcnt, blk;
  732. u32 size;
  733. struct buffer_head *ext_bh;
  734. int i;
  735. pr_debug("truncate(inode=%lu, oldsize=%llu, newsize=%llu)\n",
  736. inode->i_ino, AFFS_I(inode)->mmu_private, inode->i_size);
  737. last_blk = 0;
  738. ext = 0;
  739. if (inode->i_size) {
  740. last_blk = ((u32)inode->i_size - 1) / AFFS_SB(sb)->s_data_blksize;
  741. ext = last_blk / AFFS_SB(sb)->s_hashsize;
  742. }
  743. if (inode->i_size > AFFS_I(inode)->mmu_private) {
  744. struct address_space *mapping = inode->i_mapping;
  745. struct page *page;
  746. void *fsdata;
  747. loff_t isize = inode->i_size;
  748. int res;
  749. res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, 0, &page, &fsdata);
  750. if (!res)
  751. res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, page, fsdata);
  752. else
  753. inode->i_size = AFFS_I(inode)->mmu_private;
  754. mark_inode_dirty(inode);
  755. return;
  756. } else if (inode->i_size == AFFS_I(inode)->mmu_private)
  757. return;
  758. // lock cache
  759. ext_bh = affs_get_extblock(inode, ext);
  760. if (IS_ERR(ext_bh)) {
  761. affs_warning(sb, "truncate",
  762. "unexpected read error for ext block %u (%ld)",
  763. ext, PTR_ERR(ext_bh));
  764. return;
  765. }
  766. if (AFFS_I(inode)->i_lc) {
  767. /* clear linear cache */
  768. i = (ext + 1) >> AFFS_I(inode)->i_lc_shift;
  769. if (AFFS_I(inode)->i_lc_size > i) {
  770. AFFS_I(inode)->i_lc_size = i;
  771. for (; i < AFFS_LC_SIZE; i++)
  772. AFFS_I(inode)->i_lc[i] = 0;
  773. }
  774. /* clear associative cache */
  775. for (i = 0; i < AFFS_AC_SIZE; i++)
  776. if (AFFS_I(inode)->i_ac[i].ext >= ext)
  777. AFFS_I(inode)->i_ac[i].ext = 0;
  778. }
  779. ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
  780. blkcnt = AFFS_I(inode)->i_blkcnt;
  781. i = 0;
  782. blk = last_blk;
  783. if (inode->i_size) {
  784. i = last_blk % AFFS_SB(sb)->s_hashsize + 1;
  785. blk++;
  786. } else
  787. AFFS_HEAD(ext_bh)->first_data = 0;
  788. AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(i);
  789. size = AFFS_SB(sb)->s_hashsize;
  790. if (size > blkcnt - blk + i)
  791. size = blkcnt - blk + i;
  792. for (; i < size; i++, blk++) {
  793. affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
  794. AFFS_BLOCK(sb, ext_bh, i) = 0;
  795. }
  796. AFFS_TAIL(sb, ext_bh)->extension = 0;
  797. affs_fix_checksum(sb, ext_bh);
  798. mark_buffer_dirty_inode(ext_bh, inode);
  799. affs_brelse(ext_bh);
  800. if (inode->i_size) {
  801. AFFS_I(inode)->i_blkcnt = last_blk + 1;
  802. AFFS_I(inode)->i_extcnt = ext + 1;
  803. if (affs_test_opt(AFFS_SB(sb)->s_flags, SF_OFS)) {
  804. struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0);
  805. u32 tmp;
  806. if (IS_ERR(bh)) {
  807. affs_warning(sb, "truncate",
  808. "unexpected read error for last block %u (%ld)",
  809. ext, PTR_ERR(bh));
  810. return;
  811. }
  812. tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next);
  813. AFFS_DATA_HEAD(bh)->next = 0;
  814. affs_adjust_checksum(bh, -tmp);
  815. affs_brelse(bh);
  816. }
  817. } else {
  818. AFFS_I(inode)->i_blkcnt = 0;
  819. AFFS_I(inode)->i_extcnt = 1;
  820. }
  821. AFFS_I(inode)->mmu_private = inode->i_size;
  822. // unlock cache
  823. while (ext_key) {
  824. ext_bh = affs_bread(sb, ext_key);
  825. size = AFFS_SB(sb)->s_hashsize;
  826. if (size > blkcnt - blk)
  827. size = blkcnt - blk;
  828. for (i = 0; i < size; i++, blk++)
  829. affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
  830. affs_free_block(sb, ext_key);
  831. ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
  832. affs_brelse(ext_bh);
  833. }
  834. affs_free_prealloc(inode);
  835. }
  836. int affs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
  837. {
  838. struct inode *inode = filp->f_mapping->host;
  839. int ret, err;
  840. err = file_write_and_wait_range(filp, start, end);
  841. if (err)
  842. return err;
  843. inode_lock(inode);
  844. ret = write_inode_now(inode, 0);
  845. err = sync_blockdev(inode->i_sb->s_bdev);
  846. if (!ret)
  847. ret = err;
  848. inode_unlock(inode);
  849. return ret;
  850. }
  851. const struct file_operations affs_file_operations = {
  852. .llseek = generic_file_llseek,
  853. .read_iter = generic_file_read_iter,
  854. .write_iter = generic_file_write_iter,
  855. .mmap = generic_file_mmap,
  856. .open = affs_file_open,
  857. .release = affs_file_release,
  858. .fsync = affs_file_fsync,
  859. .splice_read = generic_file_splice_read,
  860. };
  861. const struct inode_operations affs_file_inode_operations = {
  862. .setattr = affs_notify_change,
  863. };