blk.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370
  1. /*
  2. * NVDIMM Block Window Driver
  3. * Copyright (c) 2014, Intel Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/blkdev.h>
  15. #include <linux/fs.h>
  16. #include <linux/genhd.h>
  17. #include <linux/module.h>
  18. #include <linux/moduleparam.h>
  19. #include <linux/nd.h>
  20. #include <linux/sizes.h>
  21. #include "nd.h"
  22. struct nd_blk_device {
  23. struct request_queue *queue;
  24. struct gendisk *disk;
  25. struct nd_namespace_blk *nsblk;
  26. struct nd_blk_region *ndbr;
  27. size_t disk_size;
  28. u32 sector_size;
  29. u32 internal_lbasize;
  30. };
  31. static u32 nd_blk_meta_size(struct nd_blk_device *blk_dev)
  32. {
  33. return blk_dev->nsblk->lbasize - blk_dev->sector_size;
  34. }
  35. static resource_size_t to_dev_offset(struct nd_namespace_blk *nsblk,
  36. resource_size_t ns_offset, unsigned int len)
  37. {
  38. int i;
  39. for (i = 0; i < nsblk->num_resources; i++) {
  40. if (ns_offset < resource_size(nsblk->res[i])) {
  41. if (ns_offset + len > resource_size(nsblk->res[i])) {
  42. dev_WARN_ONCE(&nsblk->common.dev, 1,
  43. "illegal request\n");
  44. return SIZE_MAX;
  45. }
  46. return nsblk->res[i]->start + ns_offset;
  47. }
  48. ns_offset -= resource_size(nsblk->res[i]);
  49. }
  50. dev_WARN_ONCE(&nsblk->common.dev, 1, "request out of range\n");
  51. return SIZE_MAX;
  52. }
  53. #ifdef CONFIG_BLK_DEV_INTEGRITY
  54. static int nd_blk_rw_integrity(struct nd_blk_device *blk_dev,
  55. struct bio_integrity_payload *bip, u64 lba,
  56. int rw)
  57. {
  58. unsigned int len = nd_blk_meta_size(blk_dev);
  59. resource_size_t dev_offset, ns_offset;
  60. struct nd_namespace_blk *nsblk;
  61. struct nd_blk_region *ndbr;
  62. int err = 0;
  63. nsblk = blk_dev->nsblk;
  64. ndbr = blk_dev->ndbr;
  65. ns_offset = lba * blk_dev->internal_lbasize + blk_dev->sector_size;
  66. dev_offset = to_dev_offset(nsblk, ns_offset, len);
  67. if (dev_offset == SIZE_MAX)
  68. return -EIO;
  69. while (len) {
  70. unsigned int cur_len;
  71. struct bio_vec bv;
  72. void *iobuf;
  73. bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
  74. /*
  75. * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
  76. * .bv_offset already adjusted for iter->bi_bvec_done, and we
  77. * can use those directly
  78. */
  79. cur_len = min(len, bv.bv_len);
  80. iobuf = kmap_atomic(bv.bv_page);
  81. err = ndbr->do_io(ndbr, dev_offset, iobuf + bv.bv_offset,
  82. cur_len, rw);
  83. kunmap_atomic(iobuf);
  84. if (err)
  85. return err;
  86. len -= cur_len;
  87. dev_offset += cur_len;
  88. bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len);
  89. }
  90. return err;
  91. }
  92. #else /* CONFIG_BLK_DEV_INTEGRITY */
  93. static int nd_blk_rw_integrity(struct nd_blk_device *blk_dev,
  94. struct bio_integrity_payload *bip, u64 lba,
  95. int rw)
  96. {
  97. return 0;
  98. }
  99. #endif
  100. static int nd_blk_do_bvec(struct nd_blk_device *blk_dev,
  101. struct bio_integrity_payload *bip, struct page *page,
  102. unsigned int len, unsigned int off, int rw,
  103. sector_t sector)
  104. {
  105. struct nd_blk_region *ndbr = blk_dev->ndbr;
  106. resource_size_t dev_offset, ns_offset;
  107. int err = 0;
  108. void *iobuf;
  109. u64 lba;
  110. while (len) {
  111. unsigned int cur_len;
  112. /*
  113. * If we don't have an integrity payload, we don't have to
  114. * split the bvec into sectors, as this would cause unnecessary
  115. * Block Window setup/move steps. the do_io routine is capable
  116. * of handling len <= PAGE_SIZE.
  117. */
  118. cur_len = bip ? min(len, blk_dev->sector_size) : len;
  119. lba = div_u64(sector << SECTOR_SHIFT, blk_dev->sector_size);
  120. ns_offset = lba * blk_dev->internal_lbasize;
  121. dev_offset = to_dev_offset(blk_dev->nsblk, ns_offset, cur_len);
  122. if (dev_offset == SIZE_MAX)
  123. return -EIO;
  124. iobuf = kmap_atomic(page);
  125. err = ndbr->do_io(ndbr, dev_offset, iobuf + off, cur_len, rw);
  126. kunmap_atomic(iobuf);
  127. if (err)
  128. return err;
  129. if (bip) {
  130. err = nd_blk_rw_integrity(blk_dev, bip, lba, rw);
  131. if (err)
  132. return err;
  133. }
  134. len -= cur_len;
  135. off += cur_len;
  136. sector += blk_dev->sector_size >> SECTOR_SHIFT;
  137. }
  138. return err;
  139. }
  140. static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
  141. {
  142. struct block_device *bdev = bio->bi_bdev;
  143. struct gendisk *disk = bdev->bd_disk;
  144. struct bio_integrity_payload *bip;
  145. struct nd_blk_device *blk_dev;
  146. struct bvec_iter iter;
  147. unsigned long start;
  148. struct bio_vec bvec;
  149. int err = 0, rw;
  150. bool do_acct;
  151. /*
  152. * bio_integrity_enabled also checks if the bio already has an
  153. * integrity payload attached. If it does, we *don't* do a
  154. * bio_integrity_prep here - the payload has been generated by
  155. * another kernel subsystem, and we just pass it through.
  156. */
  157. if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
  158. bio->bi_error = -EIO;
  159. goto out;
  160. }
  161. bip = bio_integrity(bio);
  162. blk_dev = disk->private_data;
  163. rw = bio_data_dir(bio);
  164. do_acct = nd_iostat_start(bio, &start);
  165. bio_for_each_segment(bvec, bio, iter) {
  166. unsigned int len = bvec.bv_len;
  167. BUG_ON(len > PAGE_SIZE);
  168. err = nd_blk_do_bvec(blk_dev, bip, bvec.bv_page, len,
  169. bvec.bv_offset, rw, iter.bi_sector);
  170. if (err) {
  171. dev_info(&blk_dev->nsblk->common.dev,
  172. "io error in %s sector %lld, len %d,\n",
  173. (rw == READ) ? "READ" : "WRITE",
  174. (unsigned long long) iter.bi_sector, len);
  175. bio->bi_error = err;
  176. break;
  177. }
  178. }
  179. if (do_acct)
  180. nd_iostat_end(bio, start);
  181. out:
  182. bio_endio(bio);
  183. return BLK_QC_T_NONE;
  184. }
  185. static int nd_blk_rw_bytes(struct nd_namespace_common *ndns,
  186. resource_size_t offset, void *iobuf, size_t n, int rw)
  187. {
  188. struct nd_blk_device *blk_dev = dev_get_drvdata(ndns->claim);
  189. struct nd_namespace_blk *nsblk = blk_dev->nsblk;
  190. struct nd_blk_region *ndbr = blk_dev->ndbr;
  191. resource_size_t dev_offset;
  192. dev_offset = to_dev_offset(nsblk, offset, n);
  193. if (unlikely(offset + n > blk_dev->disk_size)) {
  194. dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
  195. return -EFAULT;
  196. }
  197. if (dev_offset == SIZE_MAX)
  198. return -EIO;
  199. return ndbr->do_io(ndbr, dev_offset, iobuf, n, rw);
  200. }
  201. static const struct block_device_operations nd_blk_fops = {
  202. .owner = THIS_MODULE,
  203. .revalidate_disk = nvdimm_revalidate_disk,
  204. };
  205. static int nd_blk_attach_disk(struct nd_namespace_common *ndns,
  206. struct nd_blk_device *blk_dev)
  207. {
  208. resource_size_t available_disk_size;
  209. struct gendisk *disk;
  210. u64 internal_nlba;
  211. internal_nlba = div_u64(blk_dev->disk_size, blk_dev->internal_lbasize);
  212. available_disk_size = internal_nlba * blk_dev->sector_size;
  213. blk_dev->queue = blk_alloc_queue(GFP_KERNEL);
  214. if (!blk_dev->queue)
  215. return -ENOMEM;
  216. blk_queue_make_request(blk_dev->queue, nd_blk_make_request);
  217. blk_queue_max_hw_sectors(blk_dev->queue, UINT_MAX);
  218. blk_queue_bounce_limit(blk_dev->queue, BLK_BOUNCE_ANY);
  219. blk_queue_logical_block_size(blk_dev->queue, blk_dev->sector_size);
  220. queue_flag_set_unlocked(QUEUE_FLAG_NONROT, blk_dev->queue);
  221. disk = blk_dev->disk = alloc_disk(0);
  222. if (!disk) {
  223. blk_cleanup_queue(blk_dev->queue);
  224. return -ENOMEM;
  225. }
  226. disk->driverfs_dev = &ndns->dev;
  227. disk->first_minor = 0;
  228. disk->fops = &nd_blk_fops;
  229. disk->private_data = blk_dev;
  230. disk->queue = blk_dev->queue;
  231. disk->flags = GENHD_FL_EXT_DEVT;
  232. nvdimm_namespace_disk_name(ndns, disk->disk_name);
  233. set_capacity(disk, 0);
  234. add_disk(disk);
  235. if (nd_blk_meta_size(blk_dev)) {
  236. int rc = nd_integrity_init(disk, nd_blk_meta_size(blk_dev));
  237. if (rc) {
  238. del_gendisk(disk);
  239. put_disk(disk);
  240. blk_cleanup_queue(blk_dev->queue);
  241. return rc;
  242. }
  243. }
  244. set_capacity(disk, available_disk_size >> SECTOR_SHIFT);
  245. revalidate_disk(disk);
  246. return 0;
  247. }
  248. static int nd_blk_probe(struct device *dev)
  249. {
  250. struct nd_namespace_common *ndns;
  251. struct nd_namespace_blk *nsblk;
  252. struct nd_blk_device *blk_dev;
  253. int rc;
  254. ndns = nvdimm_namespace_common_probe(dev);
  255. if (IS_ERR(ndns))
  256. return PTR_ERR(ndns);
  257. blk_dev = kzalloc(sizeof(*blk_dev), GFP_KERNEL);
  258. if (!blk_dev)
  259. return -ENOMEM;
  260. nsblk = to_nd_namespace_blk(&ndns->dev);
  261. blk_dev->disk_size = nvdimm_namespace_capacity(ndns);
  262. blk_dev->ndbr = to_nd_blk_region(dev->parent);
  263. blk_dev->nsblk = to_nd_namespace_blk(&ndns->dev);
  264. blk_dev->internal_lbasize = roundup(nsblk->lbasize,
  265. INT_LBASIZE_ALIGNMENT);
  266. blk_dev->sector_size = ((nsblk->lbasize >= 4096) ? 4096 : 512);
  267. dev_set_drvdata(dev, blk_dev);
  268. ndns->rw_bytes = nd_blk_rw_bytes;
  269. if (is_nd_btt(dev))
  270. rc = nvdimm_namespace_attach_btt(ndns);
  271. else if (nd_btt_probe(ndns, blk_dev) == 0) {
  272. /* we'll come back as btt-blk */
  273. rc = -ENXIO;
  274. } else
  275. rc = nd_blk_attach_disk(ndns, blk_dev);
  276. if (rc)
  277. kfree(blk_dev);
  278. return rc;
  279. }
  280. static void nd_blk_detach_disk(struct nd_blk_device *blk_dev)
  281. {
  282. del_gendisk(blk_dev->disk);
  283. put_disk(blk_dev->disk);
  284. blk_cleanup_queue(blk_dev->queue);
  285. }
  286. static int nd_blk_remove(struct device *dev)
  287. {
  288. struct nd_blk_device *blk_dev = dev_get_drvdata(dev);
  289. if (is_nd_btt(dev))
  290. nvdimm_namespace_detach_btt(to_nd_btt(dev)->ndns);
  291. else
  292. nd_blk_detach_disk(blk_dev);
  293. kfree(blk_dev);
  294. return 0;
  295. }
  296. static struct nd_device_driver nd_blk_driver = {
  297. .probe = nd_blk_probe,
  298. .remove = nd_blk_remove,
  299. .drv = {
  300. .name = "nd_blk",
  301. },
  302. .type = ND_DRIVER_NAMESPACE_BLK,
  303. };
  304. static int __init nd_blk_init(void)
  305. {
  306. return nd_driver_register(&nd_blk_driver);
  307. }
  308. static void __exit nd_blk_exit(void)
  309. {
  310. driver_unregister(&nd_blk_driver.drv);
  311. }
  312. MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
  313. MODULE_LICENSE("GPL v2");
  314. MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_BLK);
  315. module_init(nd_blk_init);
  316. module_exit(nd_blk_exit);