buffer.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. /*
  2. * linux/fs/hpfs/buffer.c
  3. *
  4. * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
  5. *
  6. * general buffer i/o
  7. */
  8. #include <linux/sched.h>
  9. #include <linux/slab.h>
  10. #include <linux/blkdev.h>
  11. #include "hpfs_fn.h"
  12. void hpfs_prefetch_sectors(struct super_block *s, unsigned secno, int n)
  13. {
  14. struct buffer_head *bh;
  15. struct blk_plug plug;
  16. if (n <= 0 || unlikely(secno >= hpfs_sb(s)->sb_fs_size))
  17. return;
  18. bh = sb_find_get_block(s, secno);
  19. if (bh) {
  20. if (buffer_uptodate(bh)) {
  21. brelse(bh);
  22. return;
  23. }
  24. brelse(bh);
  25. };
  26. blk_start_plug(&plug);
  27. while (n > 0) {
  28. if (unlikely(secno >= hpfs_sb(s)->sb_fs_size))
  29. break;
  30. sb_breadahead(s, secno);
  31. secno++;
  32. n--;
  33. }
  34. blk_finish_plug(&plug);
  35. }
  36. /* Map a sector into a buffer and return pointers to it and to the buffer. */
  37. void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp,
  38. int ahead)
  39. {
  40. struct buffer_head *bh;
  41. hpfs_lock_assert(s);
  42. hpfs_prefetch_sectors(s, secno, ahead);
  43. cond_resched();
  44. *bhp = bh = sb_bread(s, secno);
  45. if (bh != NULL)
  46. return bh->b_data;
  47. else {
  48. pr_err("%s(): read error\n", __func__);
  49. return NULL;
  50. }
  51. }
  52. /* Like hpfs_map_sector but don't read anything */
  53. void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp)
  54. {
  55. struct buffer_head *bh;
  56. /*return hpfs_map_sector(s, secno, bhp, 0);*/
  57. hpfs_lock_assert(s);
  58. cond_resched();
  59. if ((*bhp = bh = sb_getblk(s, secno)) != NULL) {
  60. if (!buffer_uptodate(bh)) wait_on_buffer(bh);
  61. set_buffer_uptodate(bh);
  62. return bh->b_data;
  63. } else {
  64. pr_err("%s(): getblk failed\n", __func__);
  65. return NULL;
  66. }
  67. }
  68. /* Map 4 sectors into a 4buffer and return pointers to it and to the buffer. */
  69. void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh,
  70. int ahead)
  71. {
  72. char *data;
  73. hpfs_lock_assert(s);
  74. cond_resched();
  75. if (secno & 3) {
  76. pr_err("%s(): unaligned read\n", __func__);
  77. return NULL;
  78. }
  79. hpfs_prefetch_sectors(s, secno, 4 + ahead);
  80. if (!(qbh->bh[0] = sb_bread(s, secno + 0))) goto bail0;
  81. if (!(qbh->bh[1] = sb_bread(s, secno + 1))) goto bail1;
  82. if (!(qbh->bh[2] = sb_bread(s, secno + 2))) goto bail2;
  83. if (!(qbh->bh[3] = sb_bread(s, secno + 3))) goto bail3;
  84. if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) &&
  85. likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) &&
  86. likely(qbh->bh[3]->b_data == qbh->bh[0]->b_data + 3 * 512)) {
  87. return qbh->data = qbh->bh[0]->b_data;
  88. }
  89. qbh->data = data = kmalloc(2048, GFP_NOFS);
  90. if (!data) {
  91. pr_err("%s(): out of memory\n", __func__);
  92. goto bail4;
  93. }
  94. memcpy(data + 0 * 512, qbh->bh[0]->b_data, 512);
  95. memcpy(data + 1 * 512, qbh->bh[1]->b_data, 512);
  96. memcpy(data + 2 * 512, qbh->bh[2]->b_data, 512);
  97. memcpy(data + 3 * 512, qbh->bh[3]->b_data, 512);
  98. return data;
  99. bail4:
  100. brelse(qbh->bh[3]);
  101. bail3:
  102. brelse(qbh->bh[2]);
  103. bail2:
  104. brelse(qbh->bh[1]);
  105. bail1:
  106. brelse(qbh->bh[0]);
  107. bail0:
  108. return NULL;
  109. }
  110. /* Don't read sectors */
  111. void *hpfs_get_4sectors(struct super_block *s, unsigned secno,
  112. struct quad_buffer_head *qbh)
  113. {
  114. cond_resched();
  115. hpfs_lock_assert(s);
  116. if (secno & 3) {
  117. pr_err("%s(): unaligned read\n", __func__);
  118. return NULL;
  119. }
  120. if (!hpfs_get_sector(s, secno + 0, &qbh->bh[0])) goto bail0;
  121. if (!hpfs_get_sector(s, secno + 1, &qbh->bh[1])) goto bail1;
  122. if (!hpfs_get_sector(s, secno + 2, &qbh->bh[2])) goto bail2;
  123. if (!hpfs_get_sector(s, secno + 3, &qbh->bh[3])) goto bail3;
  124. if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) &&
  125. likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) &&
  126. likely(qbh->bh[3]->b_data == qbh->bh[0]->b_data + 3 * 512)) {
  127. return qbh->data = qbh->bh[0]->b_data;
  128. }
  129. if (!(qbh->data = kmalloc(2048, GFP_NOFS))) {
  130. pr_err("%s(): out of memory\n", __func__);
  131. goto bail4;
  132. }
  133. return qbh->data;
  134. bail4:
  135. brelse(qbh->bh[3]);
  136. bail3:
  137. brelse(qbh->bh[2]);
  138. bail2:
  139. brelse(qbh->bh[1]);
  140. bail1:
  141. brelse(qbh->bh[0]);
  142. bail0:
  143. return NULL;
  144. }
  145. void hpfs_brelse4(struct quad_buffer_head *qbh)
  146. {
  147. if (unlikely(qbh->data != qbh->bh[0]->b_data))
  148. kfree(qbh->data);
  149. brelse(qbh->bh[0]);
  150. brelse(qbh->bh[1]);
  151. brelse(qbh->bh[2]);
  152. brelse(qbh->bh[3]);
  153. }
  154. void hpfs_mark_4buffers_dirty(struct quad_buffer_head *qbh)
  155. {
  156. if (unlikely(qbh->data != qbh->bh[0]->b_data)) {
  157. memcpy(qbh->bh[0]->b_data, qbh->data + 0 * 512, 512);
  158. memcpy(qbh->bh[1]->b_data, qbh->data + 1 * 512, 512);
  159. memcpy(qbh->bh[2]->b_data, qbh->data + 2 * 512, 512);
  160. memcpy(qbh->bh[3]->b_data, qbh->data + 3 * 512, 512);
  161. }
  162. mark_buffer_dirty(qbh->bh[0]);
  163. mark_buffer_dirty(qbh->bh[1]);
  164. mark_buffer_dirty(qbh->bh[2]);
  165. mark_buffer_dirty(qbh->bh[3]);
  166. }