device.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674
  1. /*
  2. * Copyright(c) 2016 - 2017 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #include <linux/pagemap.h>
  14. #include <linux/module.h>
  15. #include <linux/device.h>
  16. #include <linux/pfn_t.h>
  17. #include <linux/cdev.h>
  18. #include <linux/slab.h>
  19. #include <linux/dax.h>
  20. #include <linux/fs.h>
  21. #include <linux/mm.h>
  22. #include "dax-private.h"
  23. #include "dax.h"
  24. static struct class *dax_class;
  25. /*
  26. * Rely on the fact that drvdata is set before the attributes are
  27. * registered, and that the attributes are unregistered before drvdata
  28. * is cleared to assume that drvdata is always valid.
  29. */
  30. static ssize_t id_show(struct device *dev,
  31. struct device_attribute *attr, char *buf)
  32. {
  33. struct dax_region *dax_region = dev_get_drvdata(dev);
  34. return sprintf(buf, "%d\n", dax_region->id);
  35. }
  36. static DEVICE_ATTR_RO(id);
  37. static ssize_t region_size_show(struct device *dev,
  38. struct device_attribute *attr, char *buf)
  39. {
  40. struct dax_region *dax_region = dev_get_drvdata(dev);
  41. return sprintf(buf, "%llu\n", (unsigned long long)
  42. resource_size(&dax_region->res));
  43. }
  44. static struct device_attribute dev_attr_region_size = __ATTR(size, 0444,
  45. region_size_show, NULL);
  46. static ssize_t align_show(struct device *dev,
  47. struct device_attribute *attr, char *buf)
  48. {
  49. struct dax_region *dax_region = dev_get_drvdata(dev);
  50. return sprintf(buf, "%u\n", dax_region->align);
  51. }
  52. static DEVICE_ATTR_RO(align);
  53. static struct attribute *dax_region_attributes[] = {
  54. &dev_attr_region_size.attr,
  55. &dev_attr_align.attr,
  56. &dev_attr_id.attr,
  57. NULL,
  58. };
  59. static const struct attribute_group dax_region_attribute_group = {
  60. .name = "dax_region",
  61. .attrs = dax_region_attributes,
  62. };
  63. static const struct attribute_group *dax_region_attribute_groups[] = {
  64. &dax_region_attribute_group,
  65. NULL,
  66. };
  67. static void dax_region_free(struct kref *kref)
  68. {
  69. struct dax_region *dax_region;
  70. dax_region = container_of(kref, struct dax_region, kref);
  71. kfree(dax_region);
  72. }
  73. void dax_region_put(struct dax_region *dax_region)
  74. {
  75. kref_put(&dax_region->kref, dax_region_free);
  76. }
  77. EXPORT_SYMBOL_GPL(dax_region_put);
  78. static void dax_region_unregister(void *region)
  79. {
  80. struct dax_region *dax_region = region;
  81. sysfs_remove_groups(&dax_region->dev->kobj,
  82. dax_region_attribute_groups);
  83. dax_region_put(dax_region);
  84. }
  85. struct dax_region *alloc_dax_region(struct device *parent, int region_id,
  86. struct resource *res, unsigned int align, void *addr,
  87. unsigned long pfn_flags)
  88. {
  89. struct dax_region *dax_region;
  90. /*
  91. * The DAX core assumes that it can store its private data in
  92. * parent->driver_data. This WARN is a reminder / safeguard for
  93. * developers of device-dax drivers.
  94. */
  95. if (dev_get_drvdata(parent)) {
  96. dev_WARN(parent, "dax core failed to setup private data\n");
  97. return NULL;
  98. }
  99. if (!IS_ALIGNED(res->start, align)
  100. || !IS_ALIGNED(resource_size(res), align))
  101. return NULL;
  102. dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
  103. if (!dax_region)
  104. return NULL;
  105. dev_set_drvdata(parent, dax_region);
  106. memcpy(&dax_region->res, res, sizeof(*res));
  107. dax_region->pfn_flags = pfn_flags;
  108. kref_init(&dax_region->kref);
  109. dax_region->id = region_id;
  110. ida_init(&dax_region->ida);
  111. dax_region->align = align;
  112. dax_region->dev = parent;
  113. dax_region->base = addr;
  114. if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) {
  115. kfree(dax_region);
  116. return NULL;;
  117. }
  118. kref_get(&dax_region->kref);
  119. if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region))
  120. return NULL;
  121. return dax_region;
  122. }
  123. EXPORT_SYMBOL_GPL(alloc_dax_region);
  124. static struct dev_dax *to_dev_dax(struct device *dev)
  125. {
  126. return container_of(dev, struct dev_dax, dev);
  127. }
  128. static ssize_t size_show(struct device *dev,
  129. struct device_attribute *attr, char *buf)
  130. {
  131. struct dev_dax *dev_dax = to_dev_dax(dev);
  132. unsigned long long size = 0;
  133. int i;
  134. for (i = 0; i < dev_dax->num_resources; i++)
  135. size += resource_size(&dev_dax->res[i]);
  136. return sprintf(buf, "%llu\n", size);
  137. }
  138. static DEVICE_ATTR_RO(size);
  139. static struct attribute *dev_dax_attributes[] = {
  140. &dev_attr_size.attr,
  141. NULL,
  142. };
  143. static const struct attribute_group dev_dax_attribute_group = {
  144. .attrs = dev_dax_attributes,
  145. };
  146. static const struct attribute_group *dax_attribute_groups[] = {
  147. &dev_dax_attribute_group,
  148. NULL,
  149. };
  150. static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
  151. const char *func)
  152. {
  153. struct dax_region *dax_region = dev_dax->region;
  154. struct device *dev = &dev_dax->dev;
  155. unsigned long mask;
  156. if (!dax_alive(dev_dax->dax_dev))
  157. return -ENXIO;
  158. /* prevent private mappings from being established */
  159. if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
  160. dev_info(dev, "%s: %s: fail, attempted private mapping\n",
  161. current->comm, func);
  162. return -EINVAL;
  163. }
  164. mask = dax_region->align - 1;
  165. if (vma->vm_start & mask || vma->vm_end & mask) {
  166. dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
  167. current->comm, func, vma->vm_start, vma->vm_end,
  168. mask);
  169. return -EINVAL;
  170. }
  171. if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
  172. && (vma->vm_flags & VM_DONTCOPY) == 0) {
  173. dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n",
  174. current->comm, func);
  175. return -EINVAL;
  176. }
  177. if (!vma_is_dax(vma)) {
  178. dev_info(dev, "%s: %s: fail, vma is not DAX capable\n",
  179. current->comm, func);
  180. return -EINVAL;
  181. }
  182. return 0;
  183. }
  184. /* see "strong" declaration in tools/testing/nvdimm/dax-dev.c */
  185. __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
  186. unsigned long size)
  187. {
  188. struct resource *res;
  189. phys_addr_t phys;
  190. int i;
  191. for (i = 0; i < dev_dax->num_resources; i++) {
  192. res = &dev_dax->res[i];
  193. phys = pgoff * PAGE_SIZE + res->start;
  194. if (phys >= res->start && phys <= res->end)
  195. break;
  196. pgoff -= PHYS_PFN(resource_size(res));
  197. }
  198. if (i < dev_dax->num_resources) {
  199. res = &dev_dax->res[i];
  200. if (phys + size - 1 <= res->end)
  201. return phys;
  202. }
  203. return -1;
  204. }
  205. static int __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
  206. {
  207. struct device *dev = &dev_dax->dev;
  208. struct dax_region *dax_region;
  209. int rc = VM_FAULT_SIGBUS;
  210. phys_addr_t phys;
  211. pfn_t pfn;
  212. unsigned int fault_size = PAGE_SIZE;
  213. if (check_vma(dev_dax, vmf->vma, __func__))
  214. return VM_FAULT_SIGBUS;
  215. dax_region = dev_dax->region;
  216. if (dax_region->align > PAGE_SIZE) {
  217. dev_dbg(dev, "%s: alignment (%#x) > fault size (%#x)\n",
  218. __func__, dax_region->align, fault_size);
  219. return VM_FAULT_SIGBUS;
  220. }
  221. if (fault_size != dax_region->align)
  222. return VM_FAULT_SIGBUS;
  223. phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE);
  224. if (phys == -1) {
  225. dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
  226. vmf->pgoff);
  227. return VM_FAULT_SIGBUS;
  228. }
  229. pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
  230. rc = vm_insert_mixed(vmf->vma, vmf->address, pfn);
  231. if (rc == -ENOMEM)
  232. return VM_FAULT_OOM;
  233. if (rc < 0 && rc != -EBUSY)
  234. return VM_FAULT_SIGBUS;
  235. return VM_FAULT_NOPAGE;
  236. }
  237. static int __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
  238. {
  239. unsigned long pmd_addr = vmf->address & PMD_MASK;
  240. struct device *dev = &dev_dax->dev;
  241. struct dax_region *dax_region;
  242. phys_addr_t phys;
  243. pgoff_t pgoff;
  244. pfn_t pfn;
  245. unsigned int fault_size = PMD_SIZE;
  246. if (check_vma(dev_dax, vmf->vma, __func__))
  247. return VM_FAULT_SIGBUS;
  248. dax_region = dev_dax->region;
  249. if (dax_region->align > PMD_SIZE) {
  250. dev_dbg(dev, "%s: alignment (%#x) > fault size (%#x)\n",
  251. __func__, dax_region->align, fault_size);
  252. return VM_FAULT_SIGBUS;
  253. }
  254. /* dax pmd mappings require pfn_t_devmap() */
  255. if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
  256. dev_dbg(dev, "%s: region lacks devmap flags\n", __func__);
  257. return VM_FAULT_SIGBUS;
  258. }
  259. if (fault_size < dax_region->align)
  260. return VM_FAULT_SIGBUS;
  261. else if (fault_size > dax_region->align)
  262. return VM_FAULT_FALLBACK;
  263. /* if we are outside of the VMA */
  264. if (pmd_addr < vmf->vma->vm_start ||
  265. (pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
  266. return VM_FAULT_SIGBUS;
  267. pgoff = linear_page_index(vmf->vma, pmd_addr);
  268. phys = dax_pgoff_to_phys(dev_dax, pgoff, PMD_SIZE);
  269. if (phys == -1) {
  270. dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
  271. pgoff);
  272. return VM_FAULT_SIGBUS;
  273. }
  274. pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
  275. return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, pfn,
  276. vmf->flags & FAULT_FLAG_WRITE);
  277. }
  278. #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
  279. static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
  280. {
  281. unsigned long pud_addr = vmf->address & PUD_MASK;
  282. struct device *dev = &dev_dax->dev;
  283. struct dax_region *dax_region;
  284. phys_addr_t phys;
  285. pgoff_t pgoff;
  286. pfn_t pfn;
  287. unsigned int fault_size = PUD_SIZE;
  288. if (check_vma(dev_dax, vmf->vma, __func__))
  289. return VM_FAULT_SIGBUS;
  290. dax_region = dev_dax->region;
  291. if (dax_region->align > PUD_SIZE) {
  292. dev_dbg(dev, "%s: alignment (%#x) > fault size (%#x)\n",
  293. __func__, dax_region->align, fault_size);
  294. return VM_FAULT_SIGBUS;
  295. }
  296. /* dax pud mappings require pfn_t_devmap() */
  297. if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
  298. dev_dbg(dev, "%s: region lacks devmap flags\n", __func__);
  299. return VM_FAULT_SIGBUS;
  300. }
  301. if (fault_size < dax_region->align)
  302. return VM_FAULT_SIGBUS;
  303. else if (fault_size > dax_region->align)
  304. return VM_FAULT_FALLBACK;
  305. /* if we are outside of the VMA */
  306. if (pud_addr < vmf->vma->vm_start ||
  307. (pud_addr + PUD_SIZE) > vmf->vma->vm_end)
  308. return VM_FAULT_SIGBUS;
  309. pgoff = linear_page_index(vmf->vma, pud_addr);
  310. phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE);
  311. if (phys == -1) {
  312. dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
  313. pgoff);
  314. return VM_FAULT_SIGBUS;
  315. }
  316. pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
  317. return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, pfn,
  318. vmf->flags & FAULT_FLAG_WRITE);
  319. }
  320. #else
  321. static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
  322. {
  323. return VM_FAULT_FALLBACK;
  324. }
  325. #endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
  326. static int dev_dax_huge_fault(struct vm_fault *vmf,
  327. enum page_entry_size pe_size)
  328. {
  329. int rc, id;
  330. struct file *filp = vmf->vma->vm_file;
  331. struct dev_dax *dev_dax = filp->private_data;
  332. dev_dbg(&dev_dax->dev, "%s: %s: %s (%#lx - %#lx) size = %d\n", __func__,
  333. current->comm, (vmf->flags & FAULT_FLAG_WRITE)
  334. ? "write" : "read",
  335. vmf->vma->vm_start, vmf->vma->vm_end, pe_size);
  336. id = dax_read_lock();
  337. switch (pe_size) {
  338. case PE_SIZE_PTE:
  339. rc = __dev_dax_pte_fault(dev_dax, vmf);
  340. break;
  341. case PE_SIZE_PMD:
  342. rc = __dev_dax_pmd_fault(dev_dax, vmf);
  343. break;
  344. case PE_SIZE_PUD:
  345. rc = __dev_dax_pud_fault(dev_dax, vmf);
  346. break;
  347. default:
  348. rc = VM_FAULT_SIGBUS;
  349. }
  350. dax_read_unlock(id);
  351. return rc;
  352. }
  353. static int dev_dax_fault(struct vm_fault *vmf)
  354. {
  355. return dev_dax_huge_fault(vmf, PE_SIZE_PTE);
  356. }
  357. static const struct vm_operations_struct dax_vm_ops = {
  358. .fault = dev_dax_fault,
  359. .huge_fault = dev_dax_huge_fault,
  360. };
  361. static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
  362. {
  363. struct dev_dax *dev_dax = filp->private_data;
  364. int rc, id;
  365. dev_dbg(&dev_dax->dev, "%s\n", __func__);
  366. /*
  367. * We lock to check dax_dev liveness and will re-check at
  368. * fault time.
  369. */
  370. id = dax_read_lock();
  371. rc = check_vma(dev_dax, vma, __func__);
  372. dax_read_unlock(id);
  373. if (rc)
  374. return rc;
  375. vma->vm_ops = &dax_vm_ops;
  376. vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
  377. return 0;
  378. }
  379. /* return an unmapped area aligned to the dax region specified alignment */
  380. static unsigned long dax_get_unmapped_area(struct file *filp,
  381. unsigned long addr, unsigned long len, unsigned long pgoff,
  382. unsigned long flags)
  383. {
  384. unsigned long off, off_end, off_align, len_align, addr_align, align;
  385. struct dev_dax *dev_dax = filp ? filp->private_data : NULL;
  386. struct dax_region *dax_region;
  387. if (!dev_dax || addr)
  388. goto out;
  389. dax_region = dev_dax->region;
  390. align = dax_region->align;
  391. off = pgoff << PAGE_SHIFT;
  392. off_end = off + len;
  393. off_align = round_up(off, align);
  394. if ((off_end <= off_align) || ((off_end - off_align) < align))
  395. goto out;
  396. len_align = len + align;
  397. if ((off + len_align) < off)
  398. goto out;
  399. addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
  400. pgoff, flags);
  401. if (!IS_ERR_VALUE(addr_align)) {
  402. addr_align += (off - addr_align) & (align - 1);
  403. return addr_align;
  404. }
  405. out:
  406. return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
  407. }
  408. static int dax_open(struct inode *inode, struct file *filp)
  409. {
  410. struct dax_device *dax_dev = inode_dax(inode);
  411. struct inode *__dax_inode = dax_inode(dax_dev);
  412. struct dev_dax *dev_dax = dax_get_private(dax_dev);
  413. dev_dbg(&dev_dax->dev, "%s\n", __func__);
  414. inode->i_mapping = __dax_inode->i_mapping;
  415. inode->i_mapping->host = __dax_inode;
  416. filp->f_mapping = inode->i_mapping;
  417. filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
  418. filp->private_data = dev_dax;
  419. inode->i_flags = S_DAX;
  420. return 0;
  421. }
  422. static int dax_release(struct inode *inode, struct file *filp)
  423. {
  424. struct dev_dax *dev_dax = filp->private_data;
  425. dev_dbg(&dev_dax->dev, "%s\n", __func__);
  426. return 0;
  427. }
  428. static const struct file_operations dax_fops = {
  429. .llseek = noop_llseek,
  430. .owner = THIS_MODULE,
  431. .open = dax_open,
  432. .release = dax_release,
  433. .get_unmapped_area = dax_get_unmapped_area,
  434. .mmap = dax_mmap,
  435. };
  436. static void dev_dax_release(struct device *dev)
  437. {
  438. struct dev_dax *dev_dax = to_dev_dax(dev);
  439. struct dax_region *dax_region = dev_dax->region;
  440. struct dax_device *dax_dev = dev_dax->dax_dev;
  441. if (dev_dax->id >= 0)
  442. ida_simple_remove(&dax_region->ida, dev_dax->id);
  443. dax_region_put(dax_region);
  444. put_dax(dax_dev);
  445. kfree(dev_dax);
  446. }
  447. static void kill_dev_dax(struct dev_dax *dev_dax)
  448. {
  449. struct dax_device *dax_dev = dev_dax->dax_dev;
  450. struct inode *inode = dax_inode(dax_dev);
  451. kill_dax(dax_dev);
  452. unmap_mapping_range(inode->i_mapping, 0, 0, 1);
  453. }
  454. static void unregister_dev_dax(void *dev)
  455. {
  456. struct dev_dax *dev_dax = to_dev_dax(dev);
  457. struct dax_device *dax_dev = dev_dax->dax_dev;
  458. struct inode *inode = dax_inode(dax_dev);
  459. struct cdev *cdev = inode->i_cdev;
  460. dev_dbg(dev, "%s\n", __func__);
  461. kill_dev_dax(dev_dax);
  462. cdev_device_del(cdev, dev);
  463. put_device(dev);
  464. }
  465. struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
  466. int id, struct resource *res, int count)
  467. {
  468. struct device *parent = dax_region->dev;
  469. struct dax_device *dax_dev;
  470. struct dev_dax *dev_dax;
  471. struct inode *inode;
  472. struct device *dev;
  473. struct cdev *cdev;
  474. int rc, i;
  475. if (!count)
  476. return ERR_PTR(-EINVAL);
  477. dev_dax = kzalloc(sizeof(*dev_dax) + sizeof(*res) * count, GFP_KERNEL);
  478. if (!dev_dax)
  479. return ERR_PTR(-ENOMEM);
  480. for (i = 0; i < count; i++) {
  481. if (!IS_ALIGNED(res[i].start, dax_region->align)
  482. || !IS_ALIGNED(resource_size(&res[i]),
  483. dax_region->align)) {
  484. rc = -EINVAL;
  485. break;
  486. }
  487. dev_dax->res[i].start = res[i].start;
  488. dev_dax->res[i].end = res[i].end;
  489. }
  490. if (i < count)
  491. goto err_id;
  492. if (id < 0) {
  493. id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
  494. dev_dax->id = id;
  495. if (id < 0) {
  496. rc = id;
  497. goto err_id;
  498. }
  499. } else {
  500. /* region provider owns @id lifetime */
  501. dev_dax->id = -1;
  502. }
  503. /*
  504. * No 'host' or dax_operations since there is no access to this
  505. * device outside of mmap of the resulting character device.
  506. */
  507. dax_dev = alloc_dax(dev_dax, NULL, NULL);
  508. if (!dax_dev) {
  509. rc = -ENOMEM;
  510. goto err_dax;
  511. }
  512. /* from here on we're committed to teardown via dax_dev_release() */
  513. dev = &dev_dax->dev;
  514. device_initialize(dev);
  515. inode = dax_inode(dax_dev);
  516. cdev = inode->i_cdev;
  517. cdev_init(cdev, &dax_fops);
  518. cdev->owner = parent->driver->owner;
  519. dev_dax->num_resources = count;
  520. dev_dax->dax_dev = dax_dev;
  521. dev_dax->region = dax_region;
  522. kref_get(&dax_region->kref);
  523. dev->devt = inode->i_rdev;
  524. dev->class = dax_class;
  525. dev->parent = parent;
  526. dev->groups = dax_attribute_groups;
  527. dev->release = dev_dax_release;
  528. dev_set_name(dev, "dax%d.%d", dax_region->id, id);
  529. rc = cdev_device_add(cdev, dev);
  530. if (rc) {
  531. kill_dev_dax(dev_dax);
  532. put_device(dev);
  533. return ERR_PTR(rc);
  534. }
  535. rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev);
  536. if (rc)
  537. return ERR_PTR(rc);
  538. return dev_dax;
  539. err_dax:
  540. if (dev_dax->id >= 0)
  541. ida_simple_remove(&dax_region->ida, dev_dax->id);
  542. err_id:
  543. kfree(dev_dax);
  544. return ERR_PTR(rc);
  545. }
  546. EXPORT_SYMBOL_GPL(devm_create_dev_dax);
  547. static int __init dax_init(void)
  548. {
  549. dax_class = class_create(THIS_MODULE, "dax");
  550. return PTR_ERR_OR_ZERO(dax_class);
  551. }
  552. static void __exit dax_exit(void)
  553. {
  554. class_destroy(dax_class);
  555. }
  556. MODULE_AUTHOR("Intel Corporation");
  557. MODULE_LICENSE("GPL v2");
  558. subsys_initcall(dax_init);
  559. module_exit(dax_exit);