dev_bdev.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. /*
  2. * fs/logfs/dev_bdev.c - Device access methods for block devices
  3. *
  4. * As should be obvious for Linux kernel code, license is GPLv2
  5. *
  6. * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
  7. */
  8. #include "logfs.h"
  9. #include <linux/bio.h>
  10. #include <linux/blkdev.h>
  11. #include <linux/buffer_head.h>
  12. #include <linux/gfp.h>
  13. #include <linux/prefetch.h>
  14. #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
  15. static int sync_request(struct page *page, struct block_device *bdev, int op)
  16. {
  17. struct bio bio;
  18. struct bio_vec bio_vec;
  19. bio_init(&bio, &bio_vec, 1);
  20. bio.bi_bdev = bdev;
  21. bio_add_page(&bio, page, PAGE_SIZE, 0);
  22. bio.bi_iter.bi_sector = page->index * (PAGE_SIZE >> 9);
  23. bio_set_op_attrs(&bio, op, 0);
  24. return submit_bio_wait(&bio);
  25. }
  26. static int bdev_readpage(void *_sb, struct page *page)
  27. {
  28. struct super_block *sb = _sb;
  29. struct block_device *bdev = logfs_super(sb)->s_bdev;
  30. int err;
  31. err = sync_request(page, bdev, READ);
  32. if (err) {
  33. ClearPageUptodate(page);
  34. SetPageError(page);
  35. } else {
  36. SetPageUptodate(page);
  37. ClearPageError(page);
  38. }
  39. unlock_page(page);
  40. return err;
  41. }
  42. static DECLARE_WAIT_QUEUE_HEAD(wq);
  43. static void writeseg_end_io(struct bio *bio)
  44. {
  45. struct bio_vec *bvec;
  46. int i;
  47. struct super_block *sb = bio->bi_private;
  48. struct logfs_super *super = logfs_super(sb);
  49. BUG_ON(bio->bi_error); /* FIXME: Retry io or write elsewhere */
  50. bio_for_each_segment_all(bvec, bio, i) {
  51. end_page_writeback(bvec->bv_page);
  52. put_page(bvec->bv_page);
  53. }
  54. bio_put(bio);
  55. if (atomic_dec_and_test(&super->s_pending_writes))
  56. wake_up(&wq);
  57. }
  58. static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
  59. size_t nr_pages)
  60. {
  61. struct logfs_super *super = logfs_super(sb);
  62. struct address_space *mapping = super->s_mapping_inode->i_mapping;
  63. struct bio *bio = NULL;
  64. struct page *page;
  65. unsigned int max_pages;
  66. int i, ret;
  67. max_pages = min_t(size_t, nr_pages, BIO_MAX_PAGES);
  68. for (i = 0; i < nr_pages; i++) {
  69. if (!bio) {
  70. bio = bio_alloc(GFP_NOFS, max_pages);
  71. BUG_ON(!bio);
  72. bio->bi_bdev = super->s_bdev;
  73. bio->bi_iter.bi_sector = ofs >> 9;
  74. bio->bi_private = sb;
  75. bio->bi_end_io = writeseg_end_io;
  76. bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
  77. }
  78. page = find_lock_page(mapping, index + i);
  79. BUG_ON(!page);
  80. ret = bio_add_page(bio, page, PAGE_SIZE, 0);
  81. BUG_ON(PageWriteback(page));
  82. set_page_writeback(page);
  83. unlock_page(page);
  84. if (!ret) {
  85. /* Block layer cannot split bios :( */
  86. ofs += bio->bi_iter.bi_size;
  87. atomic_inc(&super->s_pending_writes);
  88. submit_bio(bio);
  89. bio = NULL;
  90. }
  91. }
  92. if (bio) {
  93. atomic_inc(&super->s_pending_writes);
  94. submit_bio(bio);
  95. }
  96. return 0;
  97. }
  98. static void bdev_writeseg(struct super_block *sb, u64 ofs, size_t len)
  99. {
  100. struct logfs_super *super = logfs_super(sb);
  101. int head;
  102. BUG_ON(super->s_flags & LOGFS_SB_FLAG_RO);
  103. if (len == 0) {
  104. /* This can happen when the object fit perfectly into a
  105. * segment, the segment gets written per sync and subsequently
  106. * closed.
  107. */
  108. return;
  109. }
  110. head = ofs & (PAGE_SIZE - 1);
  111. if (head) {
  112. ofs -= head;
  113. len += head;
  114. }
  115. len = PAGE_ALIGN(len);
  116. __bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
  117. }
  118. static void erase_end_io(struct bio *bio)
  119. {
  120. struct super_block *sb = bio->bi_private;
  121. struct logfs_super *super = logfs_super(sb);
  122. BUG_ON(bio->bi_error); /* FIXME: Retry io or write elsewhere */
  123. bio_put(bio);
  124. if (atomic_dec_and_test(&super->s_pending_writes))
  125. wake_up(&wq);
  126. }
  127. static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
  128. size_t nr_pages)
  129. {
  130. struct logfs_super *super = logfs_super(sb);
  131. struct bio *bio = NULL;
  132. unsigned int max_pages;
  133. int i, ret;
  134. max_pages = min_t(size_t, nr_pages, BIO_MAX_PAGES);
  135. for (i = 0; i < nr_pages; i++) {
  136. if (!bio) {
  137. bio = bio_alloc(GFP_NOFS, max_pages);
  138. BUG_ON(!bio);
  139. bio->bi_bdev = super->s_bdev;
  140. bio->bi_iter.bi_sector = ofs >> 9;
  141. bio->bi_private = sb;
  142. bio->bi_end_io = erase_end_io;
  143. bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
  144. }
  145. ret = bio_add_page(bio, super->s_erase_page, PAGE_SIZE, 0);
  146. if (!ret) {
  147. /* Block layer cannot split bios :( */
  148. ofs += bio->bi_iter.bi_size;
  149. atomic_inc(&super->s_pending_writes);
  150. submit_bio(bio);
  151. }
  152. }
  153. if (bio) {
  154. atomic_inc(&super->s_pending_writes);
  155. submit_bio(bio);
  156. }
  157. return 0;
  158. }
  159. static int bdev_erase(struct super_block *sb, loff_t to, size_t len,
  160. int ensure_write)
  161. {
  162. struct logfs_super *super = logfs_super(sb);
  163. BUG_ON(to & (PAGE_SIZE - 1));
  164. BUG_ON(len & (PAGE_SIZE - 1));
  165. if (super->s_flags & LOGFS_SB_FLAG_RO)
  166. return -EROFS;
  167. if (ensure_write) {
  168. /*
  169. * Object store doesn't care whether erases happen or not.
  170. * But for the journal they are required. Otherwise a scan
  171. * can find an old commit entry and assume it is the current
  172. * one, travelling back in time.
  173. */
  174. do_erase(sb, to, to >> PAGE_SHIFT, len >> PAGE_SHIFT);
  175. }
  176. return 0;
  177. }
  178. static void bdev_sync(struct super_block *sb)
  179. {
  180. struct logfs_super *super = logfs_super(sb);
  181. wait_event(wq, atomic_read(&super->s_pending_writes) == 0);
  182. }
  183. static struct page *bdev_find_first_sb(struct super_block *sb, u64 *ofs)
  184. {
  185. struct logfs_super *super = logfs_super(sb);
  186. struct address_space *mapping = super->s_mapping_inode->i_mapping;
  187. filler_t *filler = bdev_readpage;
  188. *ofs = 0;
  189. return read_cache_page(mapping, 0, filler, sb);
  190. }
  191. static struct page *bdev_find_last_sb(struct super_block *sb, u64 *ofs)
  192. {
  193. struct logfs_super *super = logfs_super(sb);
  194. struct address_space *mapping = super->s_mapping_inode->i_mapping;
  195. filler_t *filler = bdev_readpage;
  196. u64 pos = (super->s_bdev->bd_inode->i_size & ~0xfffULL) - 0x1000;
  197. pgoff_t index = pos >> PAGE_SHIFT;
  198. *ofs = pos;
  199. return read_cache_page(mapping, index, filler, sb);
  200. }
  201. static int bdev_write_sb(struct super_block *sb, struct page *page)
  202. {
  203. struct block_device *bdev = logfs_super(sb)->s_bdev;
  204. /* Nothing special to do for block devices. */
  205. return sync_request(page, bdev, WRITE);
  206. }
  207. static void bdev_put_device(struct logfs_super *s)
  208. {
  209. blkdev_put(s->s_bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  210. }
  211. static int bdev_can_write_buf(struct super_block *sb, u64 ofs)
  212. {
  213. return 0;
  214. }
  215. static const struct logfs_device_ops bd_devops = {
  216. .find_first_sb = bdev_find_first_sb,
  217. .find_last_sb = bdev_find_last_sb,
  218. .write_sb = bdev_write_sb,
  219. .readpage = bdev_readpage,
  220. .writeseg = bdev_writeseg,
  221. .erase = bdev_erase,
  222. .can_write_buf = bdev_can_write_buf,
  223. .sync = bdev_sync,
  224. .put_device = bdev_put_device,
  225. };
  226. int logfs_get_sb_bdev(struct logfs_super *p, struct file_system_type *type,
  227. const char *devname)
  228. {
  229. struct block_device *bdev;
  230. bdev = blkdev_get_by_path(devname, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
  231. type);
  232. if (IS_ERR(bdev))
  233. return PTR_ERR(bdev);
  234. if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
  235. int mtdnr = MINOR(bdev->bd_dev);
  236. blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  237. return logfs_get_sb_mtd(p, mtdnr);
  238. }
  239. p->s_bdev = bdev;
  240. p->s_mtd = NULL;
  241. p->s_devops = &bd_devops;
  242. return 0;
  243. }