file.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954
  1. /*
  2. * linux/fs/affs/file.c
  3. *
  4. * (c) 1996 Hans-Joachim Widmaier - Rewritten
  5. *
  6. * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem.
  7. *
  8. * (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem.
  9. *
  10. * (C) 1991 Linus Torvalds - minix filesystem
  11. *
  12. * affs regular file handling primitives
  13. */
  14. #include "affs.h"
  15. #if PAGE_SIZE < 4096
  16. #error PAGE_SIZE must be at least 4096
  17. #endif
  18. static int affs_grow_extcache(struct inode *inode, u32 lc_idx);
  19. static struct buffer_head *affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext);
  20. static inline struct buffer_head *affs_get_extblock(struct inode *inode, u32 ext);
  21. static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
  22. static int affs_file_open(struct inode *inode, struct file *filp);
  23. static int affs_file_release(struct inode *inode, struct file *filp);
  24. const struct file_operations affs_file_operations = {
  25. .llseek = generic_file_llseek,
  26. .read = new_sync_read,
  27. .read_iter = generic_file_read_iter,
  28. .write = new_sync_write,
  29. .write_iter = generic_file_write_iter,
  30. .mmap = generic_file_mmap,
  31. .open = affs_file_open,
  32. .release = affs_file_release,
  33. .fsync = affs_file_fsync,
  34. .splice_read = generic_file_splice_read,
  35. };
  36. const struct inode_operations affs_file_inode_operations = {
  37. .setattr = affs_notify_change,
  38. };
  39. static int
  40. affs_file_open(struct inode *inode, struct file *filp)
  41. {
  42. pr_debug("open(%lu,%d)\n",
  43. inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt));
  44. atomic_inc(&AFFS_I(inode)->i_opencnt);
  45. return 0;
  46. }
  47. static int
  48. affs_file_release(struct inode *inode, struct file *filp)
  49. {
  50. pr_debug("release(%lu, %d)\n",
  51. inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt));
  52. if (atomic_dec_and_test(&AFFS_I(inode)->i_opencnt)) {
  53. mutex_lock(&inode->i_mutex);
  54. if (inode->i_size != AFFS_I(inode)->mmu_private)
  55. affs_truncate(inode);
  56. affs_free_prealloc(inode);
  57. mutex_unlock(&inode->i_mutex);
  58. }
  59. return 0;
  60. }
  61. static int
  62. affs_grow_extcache(struct inode *inode, u32 lc_idx)
  63. {
  64. struct super_block *sb = inode->i_sb;
  65. struct buffer_head *bh;
  66. u32 lc_max;
  67. int i, j, key;
  68. if (!AFFS_I(inode)->i_lc) {
  69. char *ptr = (char *)get_zeroed_page(GFP_NOFS);
  70. if (!ptr)
  71. return -ENOMEM;
  72. AFFS_I(inode)->i_lc = (u32 *)ptr;
  73. AFFS_I(inode)->i_ac = (struct affs_ext_key *)(ptr + AFFS_CACHE_SIZE / 2);
  74. }
  75. lc_max = AFFS_LC_SIZE << AFFS_I(inode)->i_lc_shift;
  76. if (AFFS_I(inode)->i_extcnt > lc_max) {
  77. u32 lc_shift, lc_mask, tmp, off;
  78. /* need to recalculate linear cache, start from old size */
  79. lc_shift = AFFS_I(inode)->i_lc_shift;
  80. tmp = (AFFS_I(inode)->i_extcnt / AFFS_LC_SIZE) >> lc_shift;
  81. for (; tmp; tmp >>= 1)
  82. lc_shift++;
  83. lc_mask = (1 << lc_shift) - 1;
  84. /* fix idx and old size to new shift */
  85. lc_idx >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
  86. AFFS_I(inode)->i_lc_size >>= (lc_shift - AFFS_I(inode)->i_lc_shift);
  87. /* first shrink old cache to make more space */
  88. off = 1 << (lc_shift - AFFS_I(inode)->i_lc_shift);
  89. for (i = 1, j = off; j < AFFS_LC_SIZE; i++, j += off)
  90. AFFS_I(inode)->i_ac[i] = AFFS_I(inode)->i_ac[j];
  91. AFFS_I(inode)->i_lc_shift = lc_shift;
  92. AFFS_I(inode)->i_lc_mask = lc_mask;
  93. }
  94. /* fill cache to the needed index */
  95. i = AFFS_I(inode)->i_lc_size;
  96. AFFS_I(inode)->i_lc_size = lc_idx + 1;
  97. for (; i <= lc_idx; i++) {
  98. if (!i) {
  99. AFFS_I(inode)->i_lc[0] = inode->i_ino;
  100. continue;
  101. }
  102. key = AFFS_I(inode)->i_lc[i - 1];
  103. j = AFFS_I(inode)->i_lc_mask + 1;
  104. // unlock cache
  105. for (; j > 0; j--) {
  106. bh = affs_bread(sb, key);
  107. if (!bh)
  108. goto err;
  109. key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  110. affs_brelse(bh);
  111. }
  112. // lock cache
  113. AFFS_I(inode)->i_lc[i] = key;
  114. }
  115. return 0;
  116. err:
  117. // lock cache
  118. return -EIO;
  119. }
  120. static struct buffer_head *
  121. affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext)
  122. {
  123. struct super_block *sb = inode->i_sb;
  124. struct buffer_head *new_bh;
  125. u32 blocknr, tmp;
  126. blocknr = affs_alloc_block(inode, bh->b_blocknr);
  127. if (!blocknr)
  128. return ERR_PTR(-ENOSPC);
  129. new_bh = affs_getzeroblk(sb, blocknr);
  130. if (!new_bh) {
  131. affs_free_block(sb, blocknr);
  132. return ERR_PTR(-EIO);
  133. }
  134. AFFS_HEAD(new_bh)->ptype = cpu_to_be32(T_LIST);
  135. AFFS_HEAD(new_bh)->key = cpu_to_be32(blocknr);
  136. AFFS_TAIL(sb, new_bh)->stype = cpu_to_be32(ST_FILE);
  137. AFFS_TAIL(sb, new_bh)->parent = cpu_to_be32(inode->i_ino);
  138. affs_fix_checksum(sb, new_bh);
  139. mark_buffer_dirty_inode(new_bh, inode);
  140. tmp = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  141. if (tmp)
  142. affs_warning(sb, "alloc_ext", "previous extension set (%x)", tmp);
  143. AFFS_TAIL(sb, bh)->extension = cpu_to_be32(blocknr);
  144. affs_adjust_checksum(bh, blocknr - tmp);
  145. mark_buffer_dirty_inode(bh, inode);
  146. AFFS_I(inode)->i_extcnt++;
  147. mark_inode_dirty(inode);
  148. return new_bh;
  149. }
  150. static inline struct buffer_head *
  151. affs_get_extblock(struct inode *inode, u32 ext)
  152. {
  153. /* inline the simplest case: same extended block as last time */
  154. struct buffer_head *bh = AFFS_I(inode)->i_ext_bh;
  155. if (ext == AFFS_I(inode)->i_ext_last)
  156. get_bh(bh);
  157. else
  158. /* we have to do more (not inlined) */
  159. bh = affs_get_extblock_slow(inode, ext);
  160. return bh;
  161. }
  162. static struct buffer_head *
  163. affs_get_extblock_slow(struct inode *inode, u32 ext)
  164. {
  165. struct super_block *sb = inode->i_sb;
  166. struct buffer_head *bh;
  167. u32 ext_key;
  168. u32 lc_idx, lc_off, ac_idx;
  169. u32 tmp, idx;
  170. if (ext == AFFS_I(inode)->i_ext_last + 1) {
  171. /* read the next extended block from the current one */
  172. bh = AFFS_I(inode)->i_ext_bh;
  173. ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  174. if (ext < AFFS_I(inode)->i_extcnt)
  175. goto read_ext;
  176. if (ext > AFFS_I(inode)->i_extcnt)
  177. BUG();
  178. bh = affs_alloc_extblock(inode, bh, ext);
  179. if (IS_ERR(bh))
  180. return bh;
  181. goto store_ext;
  182. }
  183. if (ext == 0) {
  184. /* we seek back to the file header block */
  185. ext_key = inode->i_ino;
  186. goto read_ext;
  187. }
  188. if (ext >= AFFS_I(inode)->i_extcnt) {
  189. struct buffer_head *prev_bh;
  190. /* allocate a new extended block */
  191. if (ext > AFFS_I(inode)->i_extcnt)
  192. BUG();
  193. /* get previous extended block */
  194. prev_bh = affs_get_extblock(inode, ext - 1);
  195. if (IS_ERR(prev_bh))
  196. return prev_bh;
  197. bh = affs_alloc_extblock(inode, prev_bh, ext);
  198. affs_brelse(prev_bh);
  199. if (IS_ERR(bh))
  200. return bh;
  201. goto store_ext;
  202. }
  203. again:
  204. /* check if there is an extended cache and whether it's large enough */
  205. lc_idx = ext >> AFFS_I(inode)->i_lc_shift;
  206. lc_off = ext & AFFS_I(inode)->i_lc_mask;
  207. if (lc_idx >= AFFS_I(inode)->i_lc_size) {
  208. int err;
  209. err = affs_grow_extcache(inode, lc_idx);
  210. if (err)
  211. return ERR_PTR(err);
  212. goto again;
  213. }
  214. /* every n'th key we find in the linear cache */
  215. if (!lc_off) {
  216. ext_key = AFFS_I(inode)->i_lc[lc_idx];
  217. goto read_ext;
  218. }
  219. /* maybe it's still in the associative cache */
  220. ac_idx = (ext - lc_idx - 1) & AFFS_AC_MASK;
  221. if (AFFS_I(inode)->i_ac[ac_idx].ext == ext) {
  222. ext_key = AFFS_I(inode)->i_ac[ac_idx].key;
  223. goto read_ext;
  224. }
  225. /* try to find one of the previous extended blocks */
  226. tmp = ext;
  227. idx = ac_idx;
  228. while (--tmp, --lc_off > 0) {
  229. idx = (idx - 1) & AFFS_AC_MASK;
  230. if (AFFS_I(inode)->i_ac[idx].ext == tmp) {
  231. ext_key = AFFS_I(inode)->i_ac[idx].key;
  232. goto find_ext;
  233. }
  234. }
  235. /* fall back to the linear cache */
  236. ext_key = AFFS_I(inode)->i_lc[lc_idx];
  237. find_ext:
  238. /* read all extended blocks until we find the one we need */
  239. //unlock cache
  240. do {
  241. bh = affs_bread(sb, ext_key);
  242. if (!bh)
  243. goto err_bread;
  244. ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension);
  245. affs_brelse(bh);
  246. tmp++;
  247. } while (tmp < ext);
  248. //lock cache
  249. /* store it in the associative cache */
  250. // recalculate ac_idx?
  251. AFFS_I(inode)->i_ac[ac_idx].ext = ext;
  252. AFFS_I(inode)->i_ac[ac_idx].key = ext_key;
  253. read_ext:
  254. /* finally read the right extended block */
  255. //unlock cache
  256. bh = affs_bread(sb, ext_key);
  257. if (!bh)
  258. goto err_bread;
  259. //lock cache
  260. store_ext:
  261. /* release old cached extended block and store the new one */
  262. affs_brelse(AFFS_I(inode)->i_ext_bh);
  263. AFFS_I(inode)->i_ext_last = ext;
  264. AFFS_I(inode)->i_ext_bh = bh;
  265. get_bh(bh);
  266. return bh;
  267. err_bread:
  268. affs_brelse(bh);
  269. return ERR_PTR(-EIO);
  270. }
  271. static int
  272. affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
  273. {
  274. struct super_block *sb = inode->i_sb;
  275. struct buffer_head *ext_bh;
  276. u32 ext;
  277. pr_debug("%s(%u, %lu)\n",
  278. __func__, (u32)inode->i_ino, (unsigned long)block);
  279. BUG_ON(block > (sector_t)0x7fffffffUL);
  280. if (block >= AFFS_I(inode)->i_blkcnt) {
  281. if (block > AFFS_I(inode)->i_blkcnt || !create)
  282. goto err_big;
  283. } else
  284. create = 0;
  285. //lock cache
  286. affs_lock_ext(inode);
  287. ext = (u32)block / AFFS_SB(sb)->s_hashsize;
  288. block -= ext * AFFS_SB(sb)->s_hashsize;
  289. ext_bh = affs_get_extblock(inode, ext);
  290. if (IS_ERR(ext_bh))
  291. goto err_ext;
  292. map_bh(bh_result, sb, (sector_t)be32_to_cpu(AFFS_BLOCK(sb, ext_bh, block)));
  293. if (create) {
  294. u32 blocknr = affs_alloc_block(inode, ext_bh->b_blocknr);
  295. if (!blocknr)
  296. goto err_alloc;
  297. set_buffer_new(bh_result);
  298. AFFS_I(inode)->mmu_private += AFFS_SB(sb)->s_data_blksize;
  299. AFFS_I(inode)->i_blkcnt++;
  300. /* store new block */
  301. if (bh_result->b_blocknr)
  302. affs_warning(sb, "get_block", "block already set (%x)", bh_result->b_blocknr);
  303. AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr);
  304. AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1);
  305. affs_adjust_checksum(ext_bh, blocknr - bh_result->b_blocknr + 1);
  306. bh_result->b_blocknr = blocknr;
  307. if (!block) {
  308. /* insert first block into header block */
  309. u32 tmp = be32_to_cpu(AFFS_HEAD(ext_bh)->first_data);
  310. if (tmp)
  311. affs_warning(sb, "get_block", "first block already set (%d)", tmp);
  312. AFFS_HEAD(ext_bh)->first_data = cpu_to_be32(blocknr);
  313. affs_adjust_checksum(ext_bh, blocknr - tmp);
  314. }
  315. }
  316. affs_brelse(ext_bh);
  317. //unlock cache
  318. affs_unlock_ext(inode);
  319. return 0;
  320. err_big:
  321. affs_error(inode->i_sb,"get_block","strange block request %d", block);
  322. return -EIO;
  323. err_ext:
  324. // unlock cache
  325. affs_unlock_ext(inode);
  326. return PTR_ERR(ext_bh);
  327. err_alloc:
  328. brelse(ext_bh);
  329. clear_buffer_mapped(bh_result);
  330. bh_result->b_bdev = NULL;
  331. // unlock cache
  332. affs_unlock_ext(inode);
  333. return -ENOSPC;
  334. }
  335. static int affs_writepage(struct page *page, struct writeback_control *wbc)
  336. {
  337. return block_write_full_page(page, affs_get_block, wbc);
  338. }
  339. static int affs_readpage(struct file *file, struct page *page)
  340. {
  341. return block_read_full_page(page, affs_get_block);
  342. }
  343. static void affs_write_failed(struct address_space *mapping, loff_t to)
  344. {
  345. struct inode *inode = mapping->host;
  346. if (to > inode->i_size) {
  347. truncate_pagecache(inode, inode->i_size);
  348. affs_truncate(inode);
  349. }
  350. }
  351. static int affs_write_begin(struct file *file, struct address_space *mapping,
  352. loff_t pos, unsigned len, unsigned flags,
  353. struct page **pagep, void **fsdata)
  354. {
  355. int ret;
  356. *pagep = NULL;
  357. ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
  358. affs_get_block,
  359. &AFFS_I(mapping->host)->mmu_private);
  360. if (unlikely(ret))
  361. affs_write_failed(mapping, pos + len);
  362. return ret;
  363. }
  364. static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
  365. {
  366. return generic_block_bmap(mapping,block,affs_get_block);
  367. }
  368. const struct address_space_operations affs_aops = {
  369. .readpage = affs_readpage,
  370. .writepage = affs_writepage,
  371. .write_begin = affs_write_begin,
  372. .write_end = generic_write_end,
  373. .bmap = _affs_bmap
  374. };
  375. static inline struct buffer_head *
  376. affs_bread_ino(struct inode *inode, int block, int create)
  377. {
  378. struct buffer_head *bh, tmp_bh;
  379. int err;
  380. tmp_bh.b_state = 0;
  381. err = affs_get_block(inode, block, &tmp_bh, create);
  382. if (!err) {
  383. bh = affs_bread(inode->i_sb, tmp_bh.b_blocknr);
  384. if (bh) {
  385. bh->b_state |= tmp_bh.b_state;
  386. return bh;
  387. }
  388. err = -EIO;
  389. }
  390. return ERR_PTR(err);
  391. }
  392. static inline struct buffer_head *
  393. affs_getzeroblk_ino(struct inode *inode, int block)
  394. {
  395. struct buffer_head *bh, tmp_bh;
  396. int err;
  397. tmp_bh.b_state = 0;
  398. err = affs_get_block(inode, block, &tmp_bh, 1);
  399. if (!err) {
  400. bh = affs_getzeroblk(inode->i_sb, tmp_bh.b_blocknr);
  401. if (bh) {
  402. bh->b_state |= tmp_bh.b_state;
  403. return bh;
  404. }
  405. err = -EIO;
  406. }
  407. return ERR_PTR(err);
  408. }
  409. static inline struct buffer_head *
  410. affs_getemptyblk_ino(struct inode *inode, int block)
  411. {
  412. struct buffer_head *bh, tmp_bh;
  413. int err;
  414. tmp_bh.b_state = 0;
  415. err = affs_get_block(inode, block, &tmp_bh, 1);
  416. if (!err) {
  417. bh = affs_getemptyblk(inode->i_sb, tmp_bh.b_blocknr);
  418. if (bh) {
  419. bh->b_state |= tmp_bh.b_state;
  420. return bh;
  421. }
  422. err = -EIO;
  423. }
  424. return ERR_PTR(err);
  425. }
  426. static int
  427. affs_do_readpage_ofs(struct page *page, unsigned to)
  428. {
  429. struct inode *inode = page->mapping->host;
  430. struct super_block *sb = inode->i_sb;
  431. struct buffer_head *bh;
  432. char *data;
  433. unsigned pos = 0;
  434. u32 bidx, boff, bsize;
  435. u32 tmp;
  436. pr_debug("%s(%u, %ld, 0, %d)\n", __func__, (u32)inode->i_ino,
  437. page->index, to);
  438. BUG_ON(to > PAGE_CACHE_SIZE);
  439. kmap(page);
  440. data = page_address(page);
  441. bsize = AFFS_SB(sb)->s_data_blksize;
  442. tmp = page->index << PAGE_CACHE_SHIFT;
  443. bidx = tmp / bsize;
  444. boff = tmp % bsize;
  445. while (pos < to) {
  446. bh = affs_bread_ino(inode, bidx, 0);
  447. if (IS_ERR(bh))
  448. return PTR_ERR(bh);
  449. tmp = min(bsize - boff, to - pos);
  450. BUG_ON(pos + tmp > to || tmp > bsize);
  451. memcpy(data + pos, AFFS_DATA(bh) + boff, tmp);
  452. affs_brelse(bh);
  453. bidx++;
  454. pos += tmp;
  455. boff = 0;
  456. }
  457. flush_dcache_page(page);
  458. kunmap(page);
  459. return 0;
  460. }
  461. static int
  462. affs_extent_file_ofs(struct inode *inode, u32 newsize)
  463. {
  464. struct super_block *sb = inode->i_sb;
  465. struct buffer_head *bh, *prev_bh;
  466. u32 bidx, boff;
  467. u32 size, bsize;
  468. u32 tmp;
  469. pr_debug("%s(%u, %d)\n", __func__, (u32)inode->i_ino, newsize);
  470. bsize = AFFS_SB(sb)->s_data_blksize;
  471. bh = NULL;
  472. size = AFFS_I(inode)->mmu_private;
  473. bidx = size / bsize;
  474. boff = size % bsize;
  475. if (boff) {
  476. bh = affs_bread_ino(inode, bidx, 0);
  477. if (IS_ERR(bh))
  478. return PTR_ERR(bh);
  479. tmp = min(bsize - boff, newsize - size);
  480. BUG_ON(boff + tmp > bsize || tmp > bsize);
  481. memset(AFFS_DATA(bh) + boff, 0, tmp);
  482. be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
  483. affs_fix_checksum(sb, bh);
  484. mark_buffer_dirty_inode(bh, inode);
  485. size += tmp;
  486. bidx++;
  487. } else if (bidx) {
  488. bh = affs_bread_ino(inode, bidx - 1, 0);
  489. if (IS_ERR(bh))
  490. return PTR_ERR(bh);
  491. }
  492. while (size < newsize) {
  493. prev_bh = bh;
  494. bh = affs_getzeroblk_ino(inode, bidx);
  495. if (IS_ERR(bh))
  496. goto out;
  497. tmp = min(bsize, newsize - size);
  498. BUG_ON(tmp > bsize);
  499. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  500. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  501. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  502. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  503. affs_fix_checksum(sb, bh);
  504. bh->b_state &= ~(1UL << BH_New);
  505. mark_buffer_dirty_inode(bh, inode);
  506. if (prev_bh) {
  507. u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  508. if (tmp)
  509. affs_warning(sb, "extent_file_ofs", "next block already set for %d (%d)", bidx, tmp);
  510. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  511. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
  512. mark_buffer_dirty_inode(prev_bh, inode);
  513. affs_brelse(prev_bh);
  514. }
  515. size += bsize;
  516. bidx++;
  517. }
  518. affs_brelse(bh);
  519. inode->i_size = AFFS_I(inode)->mmu_private = newsize;
  520. return 0;
  521. out:
  522. inode->i_size = AFFS_I(inode)->mmu_private = newsize;
  523. return PTR_ERR(bh);
  524. }
  525. static int
  526. affs_readpage_ofs(struct file *file, struct page *page)
  527. {
  528. struct inode *inode = page->mapping->host;
  529. u32 to;
  530. int err;
  531. pr_debug("%s(%u, %ld)\n", __func__, (u32)inode->i_ino, page->index);
  532. to = PAGE_CACHE_SIZE;
  533. if (((page->index + 1) << PAGE_CACHE_SHIFT) > inode->i_size) {
  534. to = inode->i_size & ~PAGE_CACHE_MASK;
  535. memset(page_address(page) + to, 0, PAGE_CACHE_SIZE - to);
  536. }
  537. err = affs_do_readpage_ofs(page, to);
  538. if (!err)
  539. SetPageUptodate(page);
  540. unlock_page(page);
  541. return err;
  542. }
  543. static int affs_write_begin_ofs(struct file *file, struct address_space *mapping,
  544. loff_t pos, unsigned len, unsigned flags,
  545. struct page **pagep, void **fsdata)
  546. {
  547. struct inode *inode = mapping->host;
  548. struct page *page;
  549. pgoff_t index;
  550. int err = 0;
  551. pr_debug("%s(%u, %llu, %llu)\n", __func__, (u32)inode->i_ino,
  552. (unsigned long long)pos, (unsigned long long)pos + len);
  553. if (pos > AFFS_I(inode)->mmu_private) {
  554. /* XXX: this probably leaves a too-big i_size in case of
  555. * failure. Should really be updating i_size at write_end time
  556. */
  557. err = affs_extent_file_ofs(inode, pos);
  558. if (err)
  559. return err;
  560. }
  561. index = pos >> PAGE_CACHE_SHIFT;
  562. page = grab_cache_page_write_begin(mapping, index, flags);
  563. if (!page)
  564. return -ENOMEM;
  565. *pagep = page;
  566. if (PageUptodate(page))
  567. return 0;
  568. /* XXX: inefficient but safe in the face of short writes */
  569. err = affs_do_readpage_ofs(page, PAGE_CACHE_SIZE);
  570. if (err) {
  571. unlock_page(page);
  572. page_cache_release(page);
  573. }
  574. return err;
  575. }
  576. static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
  577. loff_t pos, unsigned len, unsigned copied,
  578. struct page *page, void *fsdata)
  579. {
  580. struct inode *inode = mapping->host;
  581. struct super_block *sb = inode->i_sb;
  582. struct buffer_head *bh, *prev_bh;
  583. char *data;
  584. u32 bidx, boff, bsize;
  585. unsigned from, to;
  586. u32 tmp;
  587. int written;
  588. from = pos & (PAGE_CACHE_SIZE - 1);
  589. to = pos + len;
  590. /*
  591. * XXX: not sure if this can handle short copies (len < copied), but
  592. * we don't have to, because the page should always be uptodate here,
  593. * due to write_begin.
  594. */
  595. pr_debug("%s(%u, %llu, %llu)\n",
  596. __func__, (u32)inode->i_ino, (unsigned long long)pos,
  597. (unsigned long long)pos + len);
  598. bsize = AFFS_SB(sb)->s_data_blksize;
  599. data = page_address(page);
  600. bh = NULL;
  601. written = 0;
  602. tmp = (page->index << PAGE_CACHE_SHIFT) + from;
  603. bidx = tmp / bsize;
  604. boff = tmp % bsize;
  605. if (boff) {
  606. bh = affs_bread_ino(inode, bidx, 0);
  607. if (IS_ERR(bh))
  608. return PTR_ERR(bh);
  609. tmp = min(bsize - boff, to - from);
  610. BUG_ON(boff + tmp > bsize || tmp > bsize);
  611. memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
  612. be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
  613. affs_fix_checksum(sb, bh);
  614. mark_buffer_dirty_inode(bh, inode);
  615. written += tmp;
  616. from += tmp;
  617. bidx++;
  618. } else if (bidx) {
  619. bh = affs_bread_ino(inode, bidx - 1, 0);
  620. if (IS_ERR(bh))
  621. return PTR_ERR(bh);
  622. }
  623. while (from + bsize <= to) {
  624. prev_bh = bh;
  625. bh = affs_getemptyblk_ino(inode, bidx);
  626. if (IS_ERR(bh))
  627. goto out;
  628. memcpy(AFFS_DATA(bh), data + from, bsize);
  629. if (buffer_new(bh)) {
  630. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  631. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  632. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  633. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize);
  634. AFFS_DATA_HEAD(bh)->next = 0;
  635. bh->b_state &= ~(1UL << BH_New);
  636. if (prev_bh) {
  637. u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  638. if (tmp)
  639. affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp);
  640. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  641. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
  642. mark_buffer_dirty_inode(prev_bh, inode);
  643. }
  644. }
  645. affs_brelse(prev_bh);
  646. affs_fix_checksum(sb, bh);
  647. mark_buffer_dirty_inode(bh, inode);
  648. written += bsize;
  649. from += bsize;
  650. bidx++;
  651. }
  652. if (from < to) {
  653. prev_bh = bh;
  654. bh = affs_bread_ino(inode, bidx, 1);
  655. if (IS_ERR(bh))
  656. goto out;
  657. tmp = min(bsize, to - from);
  658. BUG_ON(tmp > bsize);
  659. memcpy(AFFS_DATA(bh), data + from, tmp);
  660. if (buffer_new(bh)) {
  661. AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
  662. AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
  663. AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
  664. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  665. AFFS_DATA_HEAD(bh)->next = 0;
  666. bh->b_state &= ~(1UL << BH_New);
  667. if (prev_bh) {
  668. u32 tmp = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next);
  669. if (tmp)
  670. affs_warning(sb, "commit_write_ofs", "next block already set for %d (%d)", bidx, tmp);
  671. AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr);
  672. affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp);
  673. mark_buffer_dirty_inode(prev_bh, inode);
  674. }
  675. } else if (be32_to_cpu(AFFS_DATA_HEAD(bh)->size) < tmp)
  676. AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp);
  677. affs_brelse(prev_bh);
  678. affs_fix_checksum(sb, bh);
  679. mark_buffer_dirty_inode(bh, inode);
  680. written += tmp;
  681. from += tmp;
  682. bidx++;
  683. }
  684. SetPageUptodate(page);
  685. done:
  686. affs_brelse(bh);
  687. tmp = (page->index << PAGE_CACHE_SHIFT) + from;
  688. if (tmp > inode->i_size)
  689. inode->i_size = AFFS_I(inode)->mmu_private = tmp;
  690. unlock_page(page);
  691. page_cache_release(page);
  692. return written;
  693. out:
  694. bh = prev_bh;
  695. if (!written)
  696. written = PTR_ERR(bh);
  697. goto done;
  698. }
  699. const struct address_space_operations affs_aops_ofs = {
  700. .readpage = affs_readpage_ofs,
  701. //.writepage = affs_writepage_ofs,
  702. .write_begin = affs_write_begin_ofs,
  703. .write_end = affs_write_end_ofs
  704. };
  705. /* Free any preallocated blocks. */
  706. void
  707. affs_free_prealloc(struct inode *inode)
  708. {
  709. struct super_block *sb = inode->i_sb;
  710. pr_debug("free_prealloc(ino=%lu)\n", inode->i_ino);
  711. while (AFFS_I(inode)->i_pa_cnt) {
  712. AFFS_I(inode)->i_pa_cnt--;
  713. affs_free_block(sb, ++AFFS_I(inode)->i_lastalloc);
  714. }
  715. }
  716. /* Truncate (or enlarge) a file to the requested size. */
  717. void
  718. affs_truncate(struct inode *inode)
  719. {
  720. struct super_block *sb = inode->i_sb;
  721. u32 ext, ext_key;
  722. u32 last_blk, blkcnt, blk;
  723. u32 size;
  724. struct buffer_head *ext_bh;
  725. int i;
  726. pr_debug("truncate(inode=%d, oldsize=%u, newsize=%u)\n",
  727. (u32)inode->i_ino, (u32)AFFS_I(inode)->mmu_private, (u32)inode->i_size);
  728. last_blk = 0;
  729. ext = 0;
  730. if (inode->i_size) {
  731. last_blk = ((u32)inode->i_size - 1) / AFFS_SB(sb)->s_data_blksize;
  732. ext = last_blk / AFFS_SB(sb)->s_hashsize;
  733. }
  734. if (inode->i_size > AFFS_I(inode)->mmu_private) {
  735. struct address_space *mapping = inode->i_mapping;
  736. struct page *page;
  737. void *fsdata;
  738. loff_t size = inode->i_size;
  739. int res;
  740. res = mapping->a_ops->write_begin(NULL, mapping, size, 0, 0, &page, &fsdata);
  741. if (!res)
  742. res = mapping->a_ops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
  743. else
  744. inode->i_size = AFFS_I(inode)->mmu_private;
  745. mark_inode_dirty(inode);
  746. return;
  747. } else if (inode->i_size == AFFS_I(inode)->mmu_private)
  748. return;
  749. // lock cache
  750. ext_bh = affs_get_extblock(inode, ext);
  751. if (IS_ERR(ext_bh)) {
  752. affs_warning(sb, "truncate", "unexpected read error for ext block %u (%d)",
  753. ext, PTR_ERR(ext_bh));
  754. return;
  755. }
  756. if (AFFS_I(inode)->i_lc) {
  757. /* clear linear cache */
  758. i = (ext + 1) >> AFFS_I(inode)->i_lc_shift;
  759. if (AFFS_I(inode)->i_lc_size > i) {
  760. AFFS_I(inode)->i_lc_size = i;
  761. for (; i < AFFS_LC_SIZE; i++)
  762. AFFS_I(inode)->i_lc[i] = 0;
  763. }
  764. /* clear associative cache */
  765. for (i = 0; i < AFFS_AC_SIZE; i++)
  766. if (AFFS_I(inode)->i_ac[i].ext >= ext)
  767. AFFS_I(inode)->i_ac[i].ext = 0;
  768. }
  769. ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
  770. blkcnt = AFFS_I(inode)->i_blkcnt;
  771. i = 0;
  772. blk = last_blk;
  773. if (inode->i_size) {
  774. i = last_blk % AFFS_SB(sb)->s_hashsize + 1;
  775. blk++;
  776. } else
  777. AFFS_HEAD(ext_bh)->first_data = 0;
  778. AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(i);
  779. size = AFFS_SB(sb)->s_hashsize;
  780. if (size > blkcnt - blk + i)
  781. size = blkcnt - blk + i;
  782. for (; i < size; i++, blk++) {
  783. affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
  784. AFFS_BLOCK(sb, ext_bh, i) = 0;
  785. }
  786. AFFS_TAIL(sb, ext_bh)->extension = 0;
  787. affs_fix_checksum(sb, ext_bh);
  788. mark_buffer_dirty_inode(ext_bh, inode);
  789. affs_brelse(ext_bh);
  790. if (inode->i_size) {
  791. AFFS_I(inode)->i_blkcnt = last_blk + 1;
  792. AFFS_I(inode)->i_extcnt = ext + 1;
  793. if (AFFS_SB(sb)->s_flags & SF_OFS) {
  794. struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0);
  795. u32 tmp;
  796. if (IS_ERR(bh)) {
  797. affs_warning(sb, "truncate", "unexpected read error for last block %u (%d)",
  798. ext, PTR_ERR(bh));
  799. return;
  800. }
  801. tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next);
  802. AFFS_DATA_HEAD(bh)->next = 0;
  803. affs_adjust_checksum(bh, -tmp);
  804. affs_brelse(bh);
  805. }
  806. } else {
  807. AFFS_I(inode)->i_blkcnt = 0;
  808. AFFS_I(inode)->i_extcnt = 1;
  809. }
  810. AFFS_I(inode)->mmu_private = inode->i_size;
  811. // unlock cache
  812. while (ext_key) {
  813. ext_bh = affs_bread(sb, ext_key);
  814. size = AFFS_SB(sb)->s_hashsize;
  815. if (size > blkcnt - blk)
  816. size = blkcnt - blk;
  817. for (i = 0; i < size; i++, blk++)
  818. affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i)));
  819. affs_free_block(sb, ext_key);
  820. ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension);
  821. affs_brelse(ext_bh);
  822. }
  823. affs_free_prealloc(inode);
  824. }
  825. int affs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
  826. {
  827. struct inode *inode = filp->f_mapping->host;
  828. int ret, err;
  829. err = filemap_write_and_wait_range(inode->i_mapping, start, end);
  830. if (err)
  831. return err;
  832. mutex_lock(&inode->i_mutex);
  833. ret = write_inode_now(inode, 0);
  834. err = sync_blockdev(inode->i_sb->s_bdev);
  835. if (!ret)
  836. ret = err;
  837. mutex_unlock(&inode->i_mutex);
  838. return ret;
  839. }