papr_scm.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366
  1. // SPDX-License-Identifier: GPL-2.0
  2. #define pr_fmt(fmt) "papr-scm: " fmt
  3. #include <linux/of.h>
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/ioport.h>
  7. #include <linux/slab.h>
  8. #include <linux/ndctl.h>
  9. #include <linux/sched.h>
  10. #include <linux/libnvdimm.h>
  11. #include <linux/platform_device.h>
  12. #include <asm/plpar_wrappers.h>
  13. #define BIND_ANY_ADDR (~0ul)
  14. #define PAPR_SCM_DIMM_CMD_MASK \
  15. ((1ul << ND_CMD_GET_CONFIG_SIZE) | \
  16. (1ul << ND_CMD_GET_CONFIG_DATA) | \
  17. (1ul << ND_CMD_SET_CONFIG_DATA))
  18. struct papr_scm_priv {
  19. struct platform_device *pdev;
  20. struct device_node *dn;
  21. uint32_t drc_index;
  22. uint64_t blocks;
  23. uint64_t block_size;
  24. int metadata_size;
  25. uint64_t bound_addr;
  26. struct nvdimm_bus_descriptor bus_desc;
  27. struct nvdimm_bus *bus;
  28. struct nvdimm *nvdimm;
  29. struct resource res;
  30. struct nd_region *region;
  31. struct nd_interleave_set nd_set;
  32. };
  33. static int drc_pmem_bind(struct papr_scm_priv *p)
  34. {
  35. unsigned long ret[PLPAR_HCALL_BUFSIZE];
  36. uint64_t rc, token;
  37. /*
  38. * When the hypervisor cannot map all the requested memory in a single
  39. * hcall it returns H_BUSY and we call again with the token until
  40. * we get H_SUCCESS. Aborting the retry loop before getting H_SUCCESS
  41. * leave the system in an undefined state, so we wait.
  42. */
  43. token = 0;
  44. do {
  45. rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0,
  46. p->blocks, BIND_ANY_ADDR, token);
  47. token = ret[0];
  48. cond_resched();
  49. } while (rc == H_BUSY);
  50. if (rc) {
  51. dev_err(&p->pdev->dev, "bind err: %lld\n", rc);
  52. return -ENXIO;
  53. }
  54. p->bound_addr = ret[1];
  55. dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res);
  56. return 0;
  57. }
  58. static int drc_pmem_unbind(struct papr_scm_priv *p)
  59. {
  60. unsigned long ret[PLPAR_HCALL_BUFSIZE];
  61. uint64_t rc, token;
  62. token = 0;
  63. /* NB: unbind has the same retry requirements mentioned above */
  64. do {
  65. rc = plpar_hcall(H_SCM_UNBIND_MEM, ret, p->drc_index,
  66. p->bound_addr, p->blocks, token);
  67. token = ret[0];
  68. cond_resched();
  69. } while (rc == H_BUSY);
  70. if (rc)
  71. dev_err(&p->pdev->dev, "unbind error: %lld\n", rc);
  72. return !!rc;
  73. }
  74. static int papr_scm_meta_get(struct papr_scm_priv *p,
  75. struct nd_cmd_get_config_data_hdr *hdr)
  76. {
  77. unsigned long data[PLPAR_HCALL_BUFSIZE];
  78. int64_t ret;
  79. if (hdr->in_offset >= p->metadata_size || hdr->in_length != 1)
  80. return -EINVAL;
  81. ret = plpar_hcall(H_SCM_READ_METADATA, data, p->drc_index,
  82. hdr->in_offset, 1);
  83. if (ret == H_PARAMETER) /* bad DRC index */
  84. return -ENODEV;
  85. if (ret)
  86. return -EINVAL; /* other invalid parameter */
  87. hdr->out_buf[0] = data[0] & 0xff;
  88. return 0;
  89. }
  90. static int papr_scm_meta_set(struct papr_scm_priv *p,
  91. struct nd_cmd_set_config_hdr *hdr)
  92. {
  93. int64_t ret;
  94. if (hdr->in_offset >= p->metadata_size || hdr->in_length != 1)
  95. return -EINVAL;
  96. ret = plpar_hcall_norets(H_SCM_WRITE_METADATA,
  97. p->drc_index, hdr->in_offset, hdr->in_buf[0], 1);
  98. if (ret == H_PARAMETER) /* bad DRC index */
  99. return -ENODEV;
  100. if (ret)
  101. return -EINVAL; /* other invalid parameter */
  102. return 0;
  103. }
  104. int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
  105. unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
  106. {
  107. struct nd_cmd_get_config_size *get_size_hdr;
  108. struct papr_scm_priv *p;
  109. /* Only dimm-specific calls are supported atm */
  110. if (!nvdimm)
  111. return -EINVAL;
  112. p = nvdimm_provider_data(nvdimm);
  113. switch (cmd) {
  114. case ND_CMD_GET_CONFIG_SIZE:
  115. get_size_hdr = buf;
  116. get_size_hdr->status = 0;
  117. get_size_hdr->max_xfer = 1;
  118. get_size_hdr->config_size = p->metadata_size;
  119. *cmd_rc = 0;
  120. break;
  121. case ND_CMD_GET_CONFIG_DATA:
  122. *cmd_rc = papr_scm_meta_get(p, buf);
  123. break;
  124. case ND_CMD_SET_CONFIG_DATA:
  125. *cmd_rc = papr_scm_meta_set(p, buf);
  126. break;
  127. default:
  128. return -EINVAL;
  129. }
  130. dev_dbg(&p->pdev->dev, "returned with cmd_rc = %d\n", *cmd_rc);
  131. return 0;
  132. }
  133. static const struct attribute_group *region_attr_groups[] = {
  134. &nd_region_attribute_group,
  135. &nd_device_attribute_group,
  136. &nd_mapping_attribute_group,
  137. &nd_numa_attribute_group,
  138. NULL,
  139. };
  140. static const struct attribute_group *bus_attr_groups[] = {
  141. &nvdimm_bus_attribute_group,
  142. NULL,
  143. };
  144. static const struct attribute_group *papr_scm_dimm_groups[] = {
  145. &nvdimm_attribute_group,
  146. &nd_device_attribute_group,
  147. NULL,
  148. };
  149. static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
  150. {
  151. struct device *dev = &p->pdev->dev;
  152. struct nd_mapping_desc mapping;
  153. struct nd_region_desc ndr_desc;
  154. unsigned long dimm_flags;
  155. p->bus_desc.ndctl = papr_scm_ndctl;
  156. p->bus_desc.module = THIS_MODULE;
  157. p->bus_desc.of_node = p->pdev->dev.of_node;
  158. p->bus_desc.attr_groups = bus_attr_groups;
  159. p->bus_desc.provider_name = kstrdup(p->pdev->name, GFP_KERNEL);
  160. if (!p->bus_desc.provider_name)
  161. return -ENOMEM;
  162. p->bus = nvdimm_bus_register(NULL, &p->bus_desc);
  163. if (!p->bus) {
  164. dev_err(dev, "Error creating nvdimm bus %pOF\n", p->dn);
  165. return -ENXIO;
  166. }
  167. dimm_flags = 0;
  168. set_bit(NDD_ALIASING, &dimm_flags);
  169. p->nvdimm = nvdimm_create(p->bus, p, papr_scm_dimm_groups,
  170. dimm_flags, PAPR_SCM_DIMM_CMD_MASK, 0, NULL);
  171. if (!p->nvdimm) {
  172. dev_err(dev, "Error creating DIMM object for %pOF\n", p->dn);
  173. goto err;
  174. }
  175. if (nvdimm_bus_check_dimm_count(p->bus, 1))
  176. goto err;
  177. /* now add the region */
  178. memset(&mapping, 0, sizeof(mapping));
  179. mapping.nvdimm = p->nvdimm;
  180. mapping.start = 0;
  181. mapping.size = p->blocks * p->block_size; // XXX: potential overflow?
  182. memset(&ndr_desc, 0, sizeof(ndr_desc));
  183. ndr_desc.attr_groups = region_attr_groups;
  184. ndr_desc.numa_node = dev_to_node(&p->pdev->dev);
  185. ndr_desc.res = &p->res;
  186. ndr_desc.of_node = p->dn;
  187. ndr_desc.provider_data = p;
  188. ndr_desc.mapping = &mapping;
  189. ndr_desc.num_mappings = 1;
  190. ndr_desc.nd_set = &p->nd_set;
  191. set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
  192. p->region = nvdimm_pmem_region_create(p->bus, &ndr_desc);
  193. if (!p->region) {
  194. dev_err(dev, "Error registering region %pR from %pOF\n",
  195. ndr_desc.res, p->dn);
  196. goto err;
  197. }
  198. return 0;
  199. err: nvdimm_bus_unregister(p->bus);
  200. kfree(p->bus_desc.provider_name);
  201. return -ENXIO;
  202. }
  203. static int papr_scm_probe(struct platform_device *pdev)
  204. {
  205. struct device_node *dn = pdev->dev.of_node;
  206. u32 drc_index, metadata_size;
  207. u64 blocks, block_size;
  208. struct papr_scm_priv *p;
  209. const char *uuid_str;
  210. u64 uuid[2];
  211. int rc;
  212. /* check we have all the required DT properties */
  213. if (of_property_read_u32(dn, "ibm,my-drc-index", &drc_index)) {
  214. dev_err(&pdev->dev, "%pOF: missing drc-index!\n", dn);
  215. return -ENODEV;
  216. }
  217. if (of_property_read_u64(dn, "ibm,block-size", &block_size)) {
  218. dev_err(&pdev->dev, "%pOF: missing block-size!\n", dn);
  219. return -ENODEV;
  220. }
  221. if (of_property_read_u64(dn, "ibm,number-of-blocks", &blocks)) {
  222. dev_err(&pdev->dev, "%pOF: missing number-of-blocks!\n", dn);
  223. return -ENODEV;
  224. }
  225. if (of_property_read_string(dn, "ibm,unit-guid", &uuid_str)) {
  226. dev_err(&pdev->dev, "%pOF: missing unit-guid!\n", dn);
  227. return -ENODEV;
  228. }
  229. p = kzalloc(sizeof(*p), GFP_KERNEL);
  230. if (!p)
  231. return -ENOMEM;
  232. /* optional DT properties */
  233. of_property_read_u32(dn, "ibm,metadata-size", &metadata_size);
  234. p->dn = dn;
  235. p->drc_index = drc_index;
  236. p->block_size = block_size;
  237. p->blocks = blocks;
  238. /* We just need to ensure that set cookies are unique across */
  239. uuid_parse(uuid_str, (uuid_t *) uuid);
  240. p->nd_set.cookie1 = uuid[0];
  241. p->nd_set.cookie2 = uuid[1];
  242. /* might be zero */
  243. p->metadata_size = metadata_size;
  244. p->pdev = pdev;
  245. /* request the hypervisor to bind this region to somewhere in memory */
  246. rc = drc_pmem_bind(p);
  247. if (rc)
  248. goto err;
  249. /* setup the resource for the newly bound range */
  250. p->res.start = p->bound_addr;
  251. p->res.end = p->bound_addr + p->blocks * p->block_size - 1;
  252. p->res.name = pdev->name;
  253. p->res.flags = IORESOURCE_MEM;
  254. rc = papr_scm_nvdimm_init(p);
  255. if (rc)
  256. goto err2;
  257. platform_set_drvdata(pdev, p);
  258. return 0;
  259. err2: drc_pmem_unbind(p);
  260. err: kfree(p);
  261. return rc;
  262. }
  263. static int papr_scm_remove(struct platform_device *pdev)
  264. {
  265. struct papr_scm_priv *p = platform_get_drvdata(pdev);
  266. nvdimm_bus_unregister(p->bus);
  267. drc_pmem_unbind(p);
  268. kfree(p);
  269. return 0;
  270. }
  271. static const struct of_device_id papr_scm_match[] = {
  272. { .compatible = "ibm,pmemory" },
  273. { },
  274. };
  275. static struct platform_driver papr_scm_driver = {
  276. .probe = papr_scm_probe,
  277. .remove = papr_scm_remove,
  278. .driver = {
  279. .name = "papr_scm",
  280. .owner = THIS_MODULE,
  281. .of_match_table = papr_scm_match,
  282. },
  283. };
  284. module_platform_driver(papr_scm_driver);
  285. MODULE_DEVICE_TABLE(of, papr_scm_match);
  286. MODULE_LICENSE("GPL");
  287. MODULE_AUTHOR("IBM Corporation");