pmem.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553
  1. /*
  2. * Persistent Memory Driver
  3. *
  4. * Copyright (c) 2014-2015, Intel Corporation.
  5. * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
  6. * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms and conditions of the GNU General Public License,
  10. * version 2, as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. */
  17. #include <asm/cacheflush.h>
  18. #include <linux/blkdev.h>
  19. #include <linux/hdreg.h>
  20. #include <linux/init.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/module.h>
  23. #include <linux/moduleparam.h>
  24. #include <linux/badblocks.h>
  25. #include <linux/memremap.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/blk-mq.h>
  28. #include <linux/pfn_t.h>
  29. #include <linux/slab.h>
  30. #include <linux/uio.h>
  31. #include <linux/dax.h>
  32. #include <linux/nd.h>
  33. #include <linux/backing-dev.h>
  34. #include "pmem.h"
  35. #include "pfn.h"
  36. #include "nd.h"
  37. #include "nd-core.h"
  38. static struct device *to_dev(struct pmem_device *pmem)
  39. {
  40. /*
  41. * nvdimm bus services need a 'dev' parameter, and we record the device
  42. * at init in bb.dev.
  43. */
  44. return pmem->bb.dev;
  45. }
  46. static struct nd_region *to_region(struct pmem_device *pmem)
  47. {
  48. return to_nd_region(to_dev(pmem)->parent);
  49. }
  50. static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
  51. phys_addr_t offset, unsigned int len)
  52. {
  53. struct device *dev = to_dev(pmem);
  54. sector_t sector;
  55. long cleared;
  56. blk_status_t rc = BLK_STS_OK;
  57. sector = (offset - pmem->data_offset) / 512;
  58. cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
  59. if (cleared < len)
  60. rc = BLK_STS_IOERR;
  61. if (cleared > 0 && cleared / 512) {
  62. cleared /= 512;
  63. dev_dbg(dev, "%#llx clear %ld sector%s\n",
  64. (unsigned long long) sector, cleared,
  65. cleared > 1 ? "s" : "");
  66. badblocks_clear(&pmem->bb, sector, cleared);
  67. if (pmem->bb_state)
  68. sysfs_notify_dirent(pmem->bb_state);
  69. }
  70. arch_invalidate_pmem(pmem->virt_addr + offset, len);
  71. return rc;
  72. }
  73. static void write_pmem(void *pmem_addr, struct page *page,
  74. unsigned int off, unsigned int len)
  75. {
  76. unsigned int chunk;
  77. void *mem;
  78. while (len) {
  79. mem = kmap_atomic(page);
  80. chunk = min_t(unsigned int, len, PAGE_SIZE);
  81. memcpy_flushcache(pmem_addr, mem + off, chunk);
  82. kunmap_atomic(mem);
  83. len -= chunk;
  84. off = 0;
  85. page++;
  86. pmem_addr += PAGE_SIZE;
  87. }
  88. }
  89. static blk_status_t read_pmem(struct page *page, unsigned int off,
  90. void *pmem_addr, unsigned int len)
  91. {
  92. unsigned int chunk;
  93. int rc;
  94. void *mem;
  95. while (len) {
  96. mem = kmap_atomic(page);
  97. chunk = min_t(unsigned int, len, PAGE_SIZE);
  98. rc = memcpy_mcsafe(mem + off, pmem_addr, chunk);
  99. kunmap_atomic(mem);
  100. if (rc)
  101. return BLK_STS_IOERR;
  102. len -= chunk;
  103. off = 0;
  104. page++;
  105. pmem_addr += PAGE_SIZE;
  106. }
  107. return BLK_STS_OK;
  108. }
  109. static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
  110. unsigned int len, unsigned int off, bool is_write,
  111. sector_t sector)
  112. {
  113. blk_status_t rc = BLK_STS_OK;
  114. bool bad_pmem = false;
  115. phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
  116. void *pmem_addr = pmem->virt_addr + pmem_off;
  117. if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
  118. bad_pmem = true;
  119. if (!is_write) {
  120. if (unlikely(bad_pmem))
  121. rc = BLK_STS_IOERR;
  122. else {
  123. rc = read_pmem(page, off, pmem_addr, len);
  124. flush_dcache_page(page);
  125. }
  126. } else {
  127. /*
  128. * Note that we write the data both before and after
  129. * clearing poison. The write before clear poison
  130. * handles situations where the latest written data is
  131. * preserved and the clear poison operation simply marks
  132. * the address range as valid without changing the data.
  133. * In this case application software can assume that an
  134. * interrupted write will either return the new good
  135. * data or an error.
  136. *
  137. * However, if pmem_clear_poison() leaves the data in an
  138. * indeterminate state we need to perform the write
  139. * after clear poison.
  140. */
  141. flush_dcache_page(page);
  142. write_pmem(pmem_addr, page, off, len);
  143. if (unlikely(bad_pmem)) {
  144. rc = pmem_clear_poison(pmem, pmem_off, len);
  145. write_pmem(pmem_addr, page, off, len);
  146. }
  147. }
  148. return rc;
  149. }
  150. /* account for REQ_FLUSH rename, replace with REQ_PREFLUSH after v4.8-rc1 */
  151. #ifndef REQ_FLUSH
  152. #define REQ_FLUSH REQ_PREFLUSH
  153. #endif
  154. static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
  155. {
  156. blk_status_t rc = 0;
  157. bool do_acct;
  158. unsigned long start;
  159. struct bio_vec bvec;
  160. struct bvec_iter iter;
  161. struct pmem_device *pmem = q->queuedata;
  162. struct nd_region *nd_region = to_region(pmem);
  163. if (bio->bi_opf & REQ_FLUSH)
  164. nvdimm_flush(nd_region);
  165. do_acct = nd_iostat_start(bio, &start);
  166. bio_for_each_segment(bvec, bio, iter) {
  167. rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
  168. bvec.bv_offset, op_is_write(bio_op(bio)),
  169. iter.bi_sector);
  170. if (rc) {
  171. bio->bi_status = rc;
  172. break;
  173. }
  174. }
  175. if (do_acct)
  176. nd_iostat_end(bio, start);
  177. if (bio->bi_opf & REQ_FUA)
  178. nvdimm_flush(nd_region);
  179. bio_endio(bio);
  180. return BLK_QC_T_NONE;
  181. }
  182. static int pmem_rw_page(struct block_device *bdev, sector_t sector,
  183. struct page *page, bool is_write)
  184. {
  185. struct pmem_device *pmem = bdev->bd_queue->queuedata;
  186. blk_status_t rc;
  187. rc = pmem_do_bvec(pmem, page, hpage_nr_pages(page) * PAGE_SIZE,
  188. 0, is_write, sector);
  189. /*
  190. * The ->rw_page interface is subtle and tricky. The core
  191. * retries on any error, so we can only invoke page_endio() in
  192. * the successful completion case. Otherwise, we'll see crashes
  193. * caused by double completion.
  194. */
  195. if (rc == 0)
  196. page_endio(page, is_write, 0);
  197. return blk_status_to_errno(rc);
  198. }
  199. /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
  200. __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
  201. long nr_pages, void **kaddr, pfn_t *pfn)
  202. {
  203. resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
  204. if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
  205. PFN_PHYS(nr_pages))))
  206. return -EIO;
  207. *kaddr = pmem->virt_addr + offset;
  208. *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
  209. /*
  210. * If badblocks are present, limit known good range to the
  211. * requested range.
  212. */
  213. if (unlikely(pmem->bb.count))
  214. return nr_pages;
  215. return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
  216. }
  217. static const struct block_device_operations pmem_fops = {
  218. .owner = THIS_MODULE,
  219. .rw_page = pmem_rw_page,
  220. .revalidate_disk = nvdimm_revalidate_disk,
  221. };
  222. static long pmem_dax_direct_access(struct dax_device *dax_dev,
  223. pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
  224. {
  225. struct pmem_device *pmem = dax_get_private(dax_dev);
  226. return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
  227. }
  228. static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
  229. void *addr, size_t bytes, struct iov_iter *i)
  230. {
  231. return copy_from_iter_flushcache(addr, bytes, i);
  232. }
  233. static const struct dax_operations pmem_dax_ops = {
  234. .direct_access = pmem_dax_direct_access,
  235. .copy_from_iter = pmem_copy_from_iter,
  236. };
  237. static const struct attribute_group *pmem_attribute_groups[] = {
  238. &dax_attribute_group,
  239. NULL,
  240. };
  241. static void pmem_release_queue(void *q)
  242. {
  243. blk_cleanup_queue(q);
  244. }
  245. static void pmem_freeze_queue(void *q)
  246. {
  247. blk_freeze_queue_start(q);
  248. }
  249. static void pmem_release_disk(void *__pmem)
  250. {
  251. struct pmem_device *pmem = __pmem;
  252. kill_dax(pmem->dax_dev);
  253. put_dax(pmem->dax_dev);
  254. del_gendisk(pmem->disk);
  255. put_disk(pmem->disk);
  256. }
  257. static int pmem_attach_disk(struct device *dev,
  258. struct nd_namespace_common *ndns)
  259. {
  260. struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
  261. struct nd_region *nd_region = to_nd_region(dev->parent);
  262. int nid = dev_to_node(dev), fua, wbc;
  263. struct resource *res = &nsio->res;
  264. struct resource bb_res;
  265. struct nd_pfn *nd_pfn = NULL;
  266. struct dax_device *dax_dev;
  267. struct nd_pfn_sb *pfn_sb;
  268. struct pmem_device *pmem;
  269. struct request_queue *q;
  270. struct device *gendev;
  271. struct gendisk *disk;
  272. void *addr;
  273. int rc;
  274. pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
  275. if (!pmem)
  276. return -ENOMEM;
  277. /* while nsio_rw_bytes is active, parse a pfn info block if present */
  278. if (is_nd_pfn(dev)) {
  279. nd_pfn = to_nd_pfn(dev);
  280. rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap);
  281. if (rc)
  282. return rc;
  283. }
  284. /* we're attaching a block device, disable raw namespace access */
  285. devm_nsio_disable(dev, nsio);
  286. dev_set_drvdata(dev, pmem);
  287. pmem->phys_addr = res->start;
  288. pmem->size = resource_size(res);
  289. fua = nvdimm_has_flush(nd_region);
  290. if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) {
  291. dev_warn(dev, "unable to guarantee persistence of writes\n");
  292. fua = 0;
  293. }
  294. wbc = nvdimm_has_cache(nd_region);
  295. if (!devm_request_mem_region(dev, res->start, resource_size(res),
  296. dev_name(&ndns->dev))) {
  297. dev_warn(dev, "could not reserve region %pR\n", res);
  298. return -EBUSY;
  299. }
  300. q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev), NULL);
  301. if (!q)
  302. return -ENOMEM;
  303. if (devm_add_action_or_reset(dev, pmem_release_queue, q))
  304. return -ENOMEM;
  305. pmem->pfn_flags = PFN_DEV;
  306. pmem->pgmap.ref = &q->q_usage_counter;
  307. if (is_nd_pfn(dev)) {
  308. addr = devm_memremap_pages(dev, &pmem->pgmap);
  309. pfn_sb = nd_pfn->pfn_sb;
  310. pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
  311. pmem->pfn_pad = resource_size(res) -
  312. resource_size(&pmem->pgmap.res);
  313. pmem->pfn_flags |= PFN_MAP;
  314. memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
  315. bb_res.start += pmem->data_offset;
  316. } else if (pmem_should_map_pages(dev)) {
  317. memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res));
  318. pmem->pgmap.altmap_valid = false;
  319. addr = devm_memremap_pages(dev, &pmem->pgmap);
  320. pmem->pfn_flags |= PFN_MAP;
  321. memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
  322. } else
  323. addr = devm_memremap(dev, pmem->phys_addr,
  324. pmem->size, ARCH_MEMREMAP_PMEM);
  325. /*
  326. * At release time the queue must be frozen before
  327. * devm_memremap_pages is unwound
  328. */
  329. if (devm_add_action_or_reset(dev, pmem_freeze_queue, q))
  330. return -ENOMEM;
  331. if (IS_ERR(addr))
  332. return PTR_ERR(addr);
  333. pmem->virt_addr = addr;
  334. blk_queue_write_cache(q, wbc, fua);
  335. blk_queue_make_request(q, pmem_make_request);
  336. blk_queue_physical_block_size(q, PAGE_SIZE);
  337. blk_queue_logical_block_size(q, pmem_sector_size(ndns));
  338. blk_queue_max_hw_sectors(q, UINT_MAX);
  339. blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
  340. blk_queue_flag_set(QUEUE_FLAG_DAX, q);
  341. q->queuedata = pmem;
  342. disk = alloc_disk_node(0, nid);
  343. if (!disk)
  344. return -ENOMEM;
  345. pmem->disk = disk;
  346. disk->fops = &pmem_fops;
  347. disk->queue = q;
  348. disk->flags = GENHD_FL_EXT_DEVT;
  349. disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
  350. nvdimm_namespace_disk_name(ndns, disk->disk_name);
  351. set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
  352. / 512);
  353. if (devm_init_badblocks(dev, &pmem->bb))
  354. return -ENOMEM;
  355. nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_res);
  356. disk->bb = &pmem->bb;
  357. dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops);
  358. if (!dax_dev) {
  359. put_disk(disk);
  360. return -ENOMEM;
  361. }
  362. dax_write_cache(dax_dev, wbc);
  363. pmem->dax_dev = dax_dev;
  364. gendev = disk_to_dev(disk);
  365. gendev->groups = pmem_attribute_groups;
  366. device_add_disk(dev, disk);
  367. if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
  368. return -ENOMEM;
  369. revalidate_disk(disk);
  370. pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd,
  371. "badblocks");
  372. if (!pmem->bb_state)
  373. dev_warn(dev, "'badblocks' notification disabled\n");
  374. return 0;
  375. }
  376. static int nd_pmem_probe(struct device *dev)
  377. {
  378. struct nd_namespace_common *ndns;
  379. ndns = nvdimm_namespace_common_probe(dev);
  380. if (IS_ERR(ndns))
  381. return PTR_ERR(ndns);
  382. if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
  383. return -ENXIO;
  384. if (is_nd_btt(dev))
  385. return nvdimm_namespace_attach_btt(ndns);
  386. if (is_nd_pfn(dev))
  387. return pmem_attach_disk(dev, ndns);
  388. /* if we find a valid info-block we'll come back as that personality */
  389. if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0
  390. || nd_dax_probe(dev, ndns) == 0)
  391. return -ENXIO;
  392. /* ...otherwise we're just a raw pmem device */
  393. return pmem_attach_disk(dev, ndns);
  394. }
  395. static int nd_pmem_remove(struct device *dev)
  396. {
  397. struct pmem_device *pmem = dev_get_drvdata(dev);
  398. if (is_nd_btt(dev))
  399. nvdimm_namespace_detach_btt(to_nd_btt(dev));
  400. else {
  401. /*
  402. * Note, this assumes device_lock() context to not race
  403. * nd_pmem_notify()
  404. */
  405. sysfs_put(pmem->bb_state);
  406. pmem->bb_state = NULL;
  407. }
  408. nvdimm_flush(to_nd_region(dev->parent));
  409. return 0;
  410. }
  411. static void nd_pmem_shutdown(struct device *dev)
  412. {
  413. nvdimm_flush(to_nd_region(dev->parent));
  414. }
  415. static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
  416. {
  417. struct nd_region *nd_region;
  418. resource_size_t offset = 0, end_trunc = 0;
  419. struct nd_namespace_common *ndns;
  420. struct nd_namespace_io *nsio;
  421. struct resource res;
  422. struct badblocks *bb;
  423. struct kernfs_node *bb_state;
  424. if (event != NVDIMM_REVALIDATE_POISON)
  425. return;
  426. if (is_nd_btt(dev)) {
  427. struct nd_btt *nd_btt = to_nd_btt(dev);
  428. ndns = nd_btt->ndns;
  429. nd_region = to_nd_region(ndns->dev.parent);
  430. nsio = to_nd_namespace_io(&ndns->dev);
  431. bb = &nsio->bb;
  432. bb_state = NULL;
  433. } else {
  434. struct pmem_device *pmem = dev_get_drvdata(dev);
  435. nd_region = to_region(pmem);
  436. bb = &pmem->bb;
  437. bb_state = pmem->bb_state;
  438. if (is_nd_pfn(dev)) {
  439. struct nd_pfn *nd_pfn = to_nd_pfn(dev);
  440. struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
  441. ndns = nd_pfn->ndns;
  442. offset = pmem->data_offset +
  443. __le32_to_cpu(pfn_sb->start_pad);
  444. end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
  445. } else {
  446. ndns = to_ndns(dev);
  447. }
  448. nsio = to_nd_namespace_io(&ndns->dev);
  449. }
  450. res.start = nsio->res.start + offset;
  451. res.end = nsio->res.end - end_trunc;
  452. nvdimm_badblocks_populate(nd_region, bb, &res);
  453. if (bb_state)
  454. sysfs_notify_dirent(bb_state);
  455. }
  456. MODULE_ALIAS("pmem");
  457. MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
  458. MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
  459. static struct nd_device_driver nd_pmem_driver = {
  460. .probe = nd_pmem_probe,
  461. .remove = nd_pmem_remove,
  462. .notify = nd_pmem_notify,
  463. .shutdown = nd_pmem_shutdown,
  464. .drv = {
  465. .name = "nd_pmem",
  466. },
  467. .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
  468. };
  469. module_nd_driver(nd_pmem_driver);
  470. MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
  471. MODULE_LICENSE("GPL v2");