pmem.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. /*
  2. * Persistent Memory Driver
  3. *
  4. * Copyright (c) 2014, Intel Corporation.
  5. * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
  6. * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms and conditions of the GNU General Public License,
  10. * version 2, as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. */
  17. #include <asm/cacheflush.h>
  18. #include <linux/blkdev.h>
  19. #include <linux/hdreg.h>
  20. #include <linux/init.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/module.h>
  23. #include <linux/moduleparam.h>
  24. #include <linux/slab.h>
  25. #define PMEM_MINORS 16
  26. struct pmem_device {
  27. struct request_queue *pmem_queue;
  28. struct gendisk *pmem_disk;
  29. /* One contiguous memory region per device */
  30. phys_addr_t phys_addr;
  31. void *virt_addr;
  32. size_t size;
  33. };
  34. static int pmem_major;
  35. static atomic_t pmem_index;
  36. static void pmem_do_bvec(struct pmem_device *pmem, struct page *page,
  37. unsigned int len, unsigned int off, int rw,
  38. sector_t sector)
  39. {
  40. void *mem = kmap_atomic(page);
  41. size_t pmem_off = sector << 9;
  42. if (rw == READ) {
  43. memcpy(mem + off, pmem->virt_addr + pmem_off, len);
  44. flush_dcache_page(page);
  45. } else {
  46. flush_dcache_page(page);
  47. memcpy(pmem->virt_addr + pmem_off, mem + off, len);
  48. }
  49. kunmap_atomic(mem);
  50. }
  51. static void pmem_make_request(struct request_queue *q, struct bio *bio)
  52. {
  53. struct block_device *bdev = bio->bi_bdev;
  54. struct pmem_device *pmem = bdev->bd_disk->private_data;
  55. int rw;
  56. struct bio_vec bvec;
  57. sector_t sector;
  58. struct bvec_iter iter;
  59. int err = 0;
  60. if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) {
  61. err = -EIO;
  62. goto out;
  63. }
  64. BUG_ON(bio->bi_rw & REQ_DISCARD);
  65. rw = bio_data_dir(bio);
  66. sector = bio->bi_iter.bi_sector;
  67. bio_for_each_segment(bvec, bio, iter) {
  68. pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, bvec.bv_offset,
  69. rw, sector);
  70. sector += bvec.bv_len >> 9;
  71. }
  72. out:
  73. bio_endio(bio, err);
  74. }
  75. static int pmem_rw_page(struct block_device *bdev, sector_t sector,
  76. struct page *page, int rw)
  77. {
  78. struct pmem_device *pmem = bdev->bd_disk->private_data;
  79. pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector);
  80. page_endio(page, rw & WRITE, 0);
  81. return 0;
  82. }
  83. static long pmem_direct_access(struct block_device *bdev, sector_t sector,
  84. void **kaddr, unsigned long *pfn, long size)
  85. {
  86. struct pmem_device *pmem = bdev->bd_disk->private_data;
  87. size_t offset = sector << 9;
  88. if (!pmem)
  89. return -ENODEV;
  90. *kaddr = pmem->virt_addr + offset;
  91. *pfn = (pmem->phys_addr + offset) >> PAGE_SHIFT;
  92. return pmem->size - offset;
  93. }
  94. static const struct block_device_operations pmem_fops = {
  95. .owner = THIS_MODULE,
  96. .rw_page = pmem_rw_page,
  97. .direct_access = pmem_direct_access,
  98. };
  99. static struct pmem_device *pmem_alloc(struct device *dev, struct resource *res)
  100. {
  101. struct pmem_device *pmem;
  102. struct gendisk *disk;
  103. int idx, err;
  104. err = -ENOMEM;
  105. pmem = kzalloc(sizeof(*pmem), GFP_KERNEL);
  106. if (!pmem)
  107. goto out;
  108. pmem->phys_addr = res->start;
  109. pmem->size = resource_size(res);
  110. err = -EINVAL;
  111. if (!request_mem_region(pmem->phys_addr, pmem->size, "pmem")) {
  112. dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n", &pmem->phys_addr, pmem->size);
  113. goto out_free_dev;
  114. }
  115. /*
  116. * Map the memory as non-cachable, as we can't write back the contents
  117. * of the CPU caches in case of a crash.
  118. */
  119. err = -ENOMEM;
  120. pmem->virt_addr = ioremap_nocache(pmem->phys_addr, pmem->size);
  121. if (!pmem->virt_addr)
  122. goto out_release_region;
  123. pmem->pmem_queue = blk_alloc_queue(GFP_KERNEL);
  124. if (!pmem->pmem_queue)
  125. goto out_unmap;
  126. blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
  127. blk_queue_max_hw_sectors(pmem->pmem_queue, 1024);
  128. blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);
  129. disk = alloc_disk(PMEM_MINORS);
  130. if (!disk)
  131. goto out_free_queue;
  132. idx = atomic_inc_return(&pmem_index) - 1;
  133. disk->major = pmem_major;
  134. disk->first_minor = PMEM_MINORS * idx;
  135. disk->fops = &pmem_fops;
  136. disk->private_data = pmem;
  137. disk->queue = pmem->pmem_queue;
  138. disk->flags = GENHD_FL_EXT_DEVT;
  139. sprintf(disk->disk_name, "pmem%d", idx);
  140. disk->driverfs_dev = dev;
  141. set_capacity(disk, pmem->size >> 9);
  142. pmem->pmem_disk = disk;
  143. add_disk(disk);
  144. return pmem;
  145. out_free_queue:
  146. blk_cleanup_queue(pmem->pmem_queue);
  147. out_unmap:
  148. iounmap(pmem->virt_addr);
  149. out_release_region:
  150. release_mem_region(pmem->phys_addr, pmem->size);
  151. out_free_dev:
  152. kfree(pmem);
  153. out:
  154. return ERR_PTR(err);
  155. }
  156. static void pmem_free(struct pmem_device *pmem)
  157. {
  158. del_gendisk(pmem->pmem_disk);
  159. put_disk(pmem->pmem_disk);
  160. blk_cleanup_queue(pmem->pmem_queue);
  161. iounmap(pmem->virt_addr);
  162. release_mem_region(pmem->phys_addr, pmem->size);
  163. kfree(pmem);
  164. }
  165. static int pmem_probe(struct platform_device *pdev)
  166. {
  167. struct pmem_device *pmem;
  168. struct resource *res;
  169. if (WARN_ON(pdev->num_resources > 1))
  170. return -ENXIO;
  171. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  172. if (!res)
  173. return -ENXIO;
  174. pmem = pmem_alloc(&pdev->dev, res);
  175. if (IS_ERR(pmem))
  176. return PTR_ERR(pmem);
  177. platform_set_drvdata(pdev, pmem);
  178. return 0;
  179. }
  180. static int pmem_remove(struct platform_device *pdev)
  181. {
  182. struct pmem_device *pmem = platform_get_drvdata(pdev);
  183. pmem_free(pmem);
  184. return 0;
  185. }
  186. static struct platform_driver pmem_driver = {
  187. .probe = pmem_probe,
  188. .remove = pmem_remove,
  189. .driver = {
  190. .owner = THIS_MODULE,
  191. .name = "pmem",
  192. },
  193. };
  194. static int __init pmem_init(void)
  195. {
  196. int error;
  197. pmem_major = register_blkdev(0, "pmem");
  198. if (pmem_major < 0)
  199. return pmem_major;
  200. error = platform_driver_register(&pmem_driver);
  201. if (error)
  202. unregister_blkdev(pmem_major, "pmem");
  203. return error;
  204. }
  205. module_init(pmem_init);
  206. static void pmem_exit(void)
  207. {
  208. platform_driver_unregister(&pmem_driver);
  209. unregister_blkdev(pmem_major, "pmem");
  210. }
  211. module_exit(pmem_exit);
  212. MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
  213. MODULE_LICENSE("GPL v2");