pmem.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. /*
  2. * Copyright(c) 2016 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #include <linux/percpu-refcount.h>
  14. #include <linux/memremap.h>
  15. #include <linux/module.h>
  16. #include <linux/pfn_t.h>
  17. #include "../nvdimm/pfn.h"
  18. #include "../nvdimm/nd.h"
  19. #include "device-dax.h"
  20. struct dax_pmem {
  21. struct device *dev;
  22. struct percpu_ref ref;
  23. struct dev_pagemap pgmap;
  24. struct completion cmp;
  25. };
  26. static struct dax_pmem *to_dax_pmem(struct percpu_ref *ref)
  27. {
  28. return container_of(ref, struct dax_pmem, ref);
  29. }
  30. static void dax_pmem_percpu_release(struct percpu_ref *ref)
  31. {
  32. struct dax_pmem *dax_pmem = to_dax_pmem(ref);
  33. dev_dbg(dax_pmem->dev, "trace\n");
  34. complete(&dax_pmem->cmp);
  35. }
  36. static void dax_pmem_percpu_exit(void *data)
  37. {
  38. struct percpu_ref *ref = data;
  39. struct dax_pmem *dax_pmem = to_dax_pmem(ref);
  40. dev_dbg(dax_pmem->dev, "trace\n");
  41. wait_for_completion(&dax_pmem->cmp);
  42. percpu_ref_exit(ref);
  43. }
  44. static void dax_pmem_percpu_kill(void *data)
  45. {
  46. struct percpu_ref *ref = data;
  47. struct dax_pmem *dax_pmem = to_dax_pmem(ref);
  48. dev_dbg(dax_pmem->dev, "trace\n");
  49. percpu_ref_kill(ref);
  50. }
  51. static int dax_pmem_probe(struct device *dev)
  52. {
  53. void *addr;
  54. struct resource res;
  55. int rc, id, region_id;
  56. struct nd_pfn_sb *pfn_sb;
  57. struct dev_dax *dev_dax;
  58. struct dax_pmem *dax_pmem;
  59. struct nd_namespace_io *nsio;
  60. struct dax_region *dax_region;
  61. struct nd_namespace_common *ndns;
  62. struct nd_dax *nd_dax = to_nd_dax(dev);
  63. struct nd_pfn *nd_pfn = &nd_dax->nd_pfn;
  64. ndns = nvdimm_namespace_common_probe(dev);
  65. if (IS_ERR(ndns))
  66. return PTR_ERR(ndns);
  67. nsio = to_nd_namespace_io(&ndns->dev);
  68. dax_pmem = devm_kzalloc(dev, sizeof(*dax_pmem), GFP_KERNEL);
  69. if (!dax_pmem)
  70. return -ENOMEM;
  71. /* parse the 'pfn' info block via ->rw_bytes */
  72. rc = devm_nsio_enable(dev, nsio);
  73. if (rc)
  74. return rc;
  75. rc = nvdimm_setup_pfn(nd_pfn, &dax_pmem->pgmap);
  76. if (rc)
  77. return rc;
  78. devm_nsio_disable(dev, nsio);
  79. pfn_sb = nd_pfn->pfn_sb;
  80. if (!devm_request_mem_region(dev, nsio->res.start,
  81. resource_size(&nsio->res),
  82. dev_name(&ndns->dev))) {
  83. dev_warn(dev, "could not reserve region %pR\n", &nsio->res);
  84. return -EBUSY;
  85. }
  86. dax_pmem->dev = dev;
  87. init_completion(&dax_pmem->cmp);
  88. rc = percpu_ref_init(&dax_pmem->ref, dax_pmem_percpu_release, 0,
  89. GFP_KERNEL);
  90. if (rc)
  91. return rc;
  92. rc = devm_add_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
  93. if (rc) {
  94. percpu_ref_exit(&dax_pmem->ref);
  95. return rc;
  96. }
  97. dax_pmem->pgmap.ref = &dax_pmem->ref;
  98. addr = devm_memremap_pages(dev, &dax_pmem->pgmap);
  99. if (IS_ERR(addr)) {
  100. devm_remove_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
  101. percpu_ref_exit(&dax_pmem->ref);
  102. return PTR_ERR(addr);
  103. }
  104. rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill,
  105. &dax_pmem->ref);
  106. if (rc)
  107. return rc;
  108. /* adjust the dax_region resource to the start of data */
  109. memcpy(&res, &dax_pmem->pgmap.res, sizeof(res));
  110. res.start += le64_to_cpu(pfn_sb->dataoff);
  111. rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", &region_id, &id);
  112. if (rc != 2)
  113. return -EINVAL;
  114. dax_region = alloc_dax_region(dev, region_id, &res,
  115. le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP);
  116. if (!dax_region)
  117. return -ENOMEM;
  118. /* TODO: support for subdividing a dax region... */
  119. dev_dax = devm_create_dev_dax(dax_region, id, &res, 1);
  120. /* child dev_dax instances now own the lifetime of the dax_region */
  121. dax_region_put(dax_region);
  122. return PTR_ERR_OR_ZERO(dev_dax);
  123. }
  124. static struct nd_device_driver dax_pmem_driver = {
  125. .probe = dax_pmem_probe,
  126. .drv = {
  127. .name = "dax_pmem",
  128. },
  129. .type = ND_DRIVER_DAX_PMEM,
  130. };
  131. module_nd_driver(dax_pmem_driver);
  132. MODULE_LICENSE("GPL v2");
  133. MODULE_AUTHOR("Intel Corporation");
  134. MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM);