core.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662
  1. /*
  2. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #include <linux/libnvdimm.h>
  14. #include <linux/badblocks.h>
  15. #include <linux/export.h>
  16. #include <linux/module.h>
  17. #include <linux/blkdev.h>
  18. #include <linux/device.h>
  19. #include <linux/ctype.h>
  20. #include <linux/ndctl.h>
  21. #include <linux/mutex.h>
  22. #include <linux/slab.h>
  23. #include <linux/io.h>
  24. #include "nd-core.h"
  25. #include "nd.h"
  26. LIST_HEAD(nvdimm_bus_list);
  27. DEFINE_MUTEX(nvdimm_bus_list_mutex);
  28. void nvdimm_bus_lock(struct device *dev)
  29. {
  30. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  31. if (!nvdimm_bus)
  32. return;
  33. mutex_lock(&nvdimm_bus->reconfig_mutex);
  34. }
  35. EXPORT_SYMBOL(nvdimm_bus_lock);
  36. void nvdimm_bus_unlock(struct device *dev)
  37. {
  38. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  39. if (!nvdimm_bus)
  40. return;
  41. mutex_unlock(&nvdimm_bus->reconfig_mutex);
  42. }
  43. EXPORT_SYMBOL(nvdimm_bus_unlock);
  44. bool is_nvdimm_bus_locked(struct device *dev)
  45. {
  46. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  47. if (!nvdimm_bus)
  48. return false;
  49. return mutex_is_locked(&nvdimm_bus->reconfig_mutex);
  50. }
  51. EXPORT_SYMBOL(is_nvdimm_bus_locked);
  52. struct nvdimm_map {
  53. struct nvdimm_bus *nvdimm_bus;
  54. struct list_head list;
  55. resource_size_t offset;
  56. unsigned long flags;
  57. size_t size;
  58. union {
  59. void *mem;
  60. void __iomem *iomem;
  61. };
  62. struct kref kref;
  63. };
  64. static struct nvdimm_map *find_nvdimm_map(struct device *dev,
  65. resource_size_t offset)
  66. {
  67. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  68. struct nvdimm_map *nvdimm_map;
  69. list_for_each_entry(nvdimm_map, &nvdimm_bus->mapping_list, list)
  70. if (nvdimm_map->offset == offset)
  71. return nvdimm_map;
  72. return NULL;
  73. }
  74. static struct nvdimm_map *alloc_nvdimm_map(struct device *dev,
  75. resource_size_t offset, size_t size, unsigned long flags)
  76. {
  77. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  78. struct nvdimm_map *nvdimm_map;
  79. nvdimm_map = kzalloc(sizeof(*nvdimm_map), GFP_KERNEL);
  80. if (!nvdimm_map)
  81. return NULL;
  82. INIT_LIST_HEAD(&nvdimm_map->list);
  83. nvdimm_map->nvdimm_bus = nvdimm_bus;
  84. nvdimm_map->offset = offset;
  85. nvdimm_map->flags = flags;
  86. nvdimm_map->size = size;
  87. kref_init(&nvdimm_map->kref);
  88. if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev)))
  89. goto err_request_region;
  90. if (flags)
  91. nvdimm_map->mem = memremap(offset, size, flags);
  92. else
  93. nvdimm_map->iomem = ioremap(offset, size);
  94. if (!nvdimm_map->mem)
  95. goto err_map;
  96. dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev), "%s: bus unlocked!",
  97. __func__);
  98. list_add(&nvdimm_map->list, &nvdimm_bus->mapping_list);
  99. return nvdimm_map;
  100. err_map:
  101. release_mem_region(offset, size);
  102. err_request_region:
  103. kfree(nvdimm_map);
  104. return NULL;
  105. }
  106. static void nvdimm_map_release(struct kref *kref)
  107. {
  108. struct nvdimm_bus *nvdimm_bus;
  109. struct nvdimm_map *nvdimm_map;
  110. nvdimm_map = container_of(kref, struct nvdimm_map, kref);
  111. nvdimm_bus = nvdimm_map->nvdimm_bus;
  112. dev_dbg(&nvdimm_bus->dev, "%s: %pa\n", __func__, &nvdimm_map->offset);
  113. list_del(&nvdimm_map->list);
  114. if (nvdimm_map->flags)
  115. memunmap(nvdimm_map->mem);
  116. else
  117. iounmap(nvdimm_map->iomem);
  118. release_mem_region(nvdimm_map->offset, nvdimm_map->size);
  119. kfree(nvdimm_map);
  120. }
  121. static void nvdimm_map_put(void *data)
  122. {
  123. struct nvdimm_map *nvdimm_map = data;
  124. struct nvdimm_bus *nvdimm_bus = nvdimm_map->nvdimm_bus;
  125. nvdimm_bus_lock(&nvdimm_bus->dev);
  126. kref_put(&nvdimm_map->kref, nvdimm_map_release);
  127. nvdimm_bus_unlock(&nvdimm_bus->dev);
  128. }
  129. /**
  130. * devm_nvdimm_memremap - map a resource that is shared across regions
  131. * @dev: device that will own a reference to the shared mapping
  132. * @offset: physical base address of the mapping
  133. * @size: mapping size
  134. * @flags: memremap flags, or, if zero, perform an ioremap instead
  135. */
  136. void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset,
  137. size_t size, unsigned long flags)
  138. {
  139. struct nvdimm_map *nvdimm_map;
  140. nvdimm_bus_lock(dev);
  141. nvdimm_map = find_nvdimm_map(dev, offset);
  142. if (!nvdimm_map)
  143. nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags);
  144. else
  145. kref_get(&nvdimm_map->kref);
  146. nvdimm_bus_unlock(dev);
  147. if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map))
  148. return NULL;
  149. return nvdimm_map->mem;
  150. }
  151. EXPORT_SYMBOL_GPL(devm_nvdimm_memremap);
  152. u64 nd_fletcher64(void *addr, size_t len, bool le)
  153. {
  154. u32 *buf = addr;
  155. u32 lo32 = 0;
  156. u64 hi32 = 0;
  157. int i;
  158. for (i = 0; i < len / sizeof(u32); i++) {
  159. lo32 += le ? le32_to_cpu((__le32) buf[i]) : buf[i];
  160. hi32 += lo32;
  161. }
  162. return hi32 << 32 | lo32;
  163. }
  164. EXPORT_SYMBOL_GPL(nd_fletcher64);
  165. struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus)
  166. {
  167. /* struct nvdimm_bus definition is private to libnvdimm */
  168. return nvdimm_bus->nd_desc;
  169. }
  170. EXPORT_SYMBOL_GPL(to_nd_desc);
  171. struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus)
  172. {
  173. /* struct nvdimm_bus definition is private to libnvdimm */
  174. return &nvdimm_bus->dev;
  175. }
  176. EXPORT_SYMBOL_GPL(to_nvdimm_bus_dev);
  177. static bool is_uuid_sep(char sep)
  178. {
  179. if (sep == '\n' || sep == '-' || sep == ':' || sep == '\0')
  180. return true;
  181. return false;
  182. }
  183. static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf,
  184. size_t len)
  185. {
  186. const char *str = buf;
  187. u8 uuid[16];
  188. int i;
  189. for (i = 0; i < 16; i++) {
  190. if (!isxdigit(str[0]) || !isxdigit(str[1])) {
  191. dev_dbg(dev, "%s: pos: %d buf[%zd]: %c buf[%zd]: %c\n",
  192. __func__, i, str - buf, str[0],
  193. str + 1 - buf, str[1]);
  194. return -EINVAL;
  195. }
  196. uuid[i] = (hex_to_bin(str[0]) << 4) | hex_to_bin(str[1]);
  197. str += 2;
  198. if (is_uuid_sep(*str))
  199. str++;
  200. }
  201. memcpy(uuid_out, uuid, sizeof(uuid));
  202. return 0;
  203. }
  204. /**
  205. * nd_uuid_store: common implementation for writing 'uuid' sysfs attributes
  206. * @dev: container device for the uuid property
  207. * @uuid_out: uuid buffer to replace
  208. * @buf: raw sysfs buffer to parse
  209. *
  210. * Enforce that uuids can only be changed while the device is disabled
  211. * (driver detached)
  212. * LOCKING: expects device_lock() is held on entry
  213. */
  214. int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
  215. size_t len)
  216. {
  217. u8 uuid[16];
  218. int rc;
  219. if (dev->driver)
  220. return -EBUSY;
  221. rc = nd_uuid_parse(dev, uuid, buf, len);
  222. if (rc)
  223. return rc;
  224. kfree(*uuid_out);
  225. *uuid_out = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
  226. if (!(*uuid_out))
  227. return -ENOMEM;
  228. return 0;
  229. }
  230. ssize_t nd_sector_size_show(unsigned long current_lbasize,
  231. const unsigned long *supported, char *buf)
  232. {
  233. ssize_t len = 0;
  234. int i;
  235. for (i = 0; supported[i]; i++)
  236. if (current_lbasize == supported[i])
  237. len += sprintf(buf + len, "[%ld] ", supported[i]);
  238. else
  239. len += sprintf(buf + len, "%ld ", supported[i]);
  240. len += sprintf(buf + len, "\n");
  241. return len;
  242. }
  243. ssize_t nd_sector_size_store(struct device *dev, const char *buf,
  244. unsigned long *current_lbasize, const unsigned long *supported)
  245. {
  246. unsigned long lbasize;
  247. int rc, i;
  248. if (dev->driver)
  249. return -EBUSY;
  250. rc = kstrtoul(buf, 0, &lbasize);
  251. if (rc)
  252. return rc;
  253. for (i = 0; supported[i]; i++)
  254. if (lbasize == supported[i])
  255. break;
  256. if (supported[i]) {
  257. *current_lbasize = lbasize;
  258. return 0;
  259. } else {
  260. return -EINVAL;
  261. }
  262. }
  263. void __nd_iostat_start(struct bio *bio, unsigned long *start)
  264. {
  265. struct gendisk *disk = bio->bi_bdev->bd_disk;
  266. const int rw = bio_data_dir(bio);
  267. int cpu = part_stat_lock();
  268. *start = jiffies;
  269. part_round_stats(cpu, &disk->part0);
  270. part_stat_inc(cpu, &disk->part0, ios[rw]);
  271. part_stat_add(cpu, &disk->part0, sectors[rw], bio_sectors(bio));
  272. part_inc_in_flight(&disk->part0, rw);
  273. part_stat_unlock();
  274. }
  275. EXPORT_SYMBOL(__nd_iostat_start);
  276. void nd_iostat_end(struct bio *bio, unsigned long start)
  277. {
  278. struct gendisk *disk = bio->bi_bdev->bd_disk;
  279. unsigned long duration = jiffies - start;
  280. const int rw = bio_data_dir(bio);
  281. int cpu = part_stat_lock();
  282. part_stat_add(cpu, &disk->part0, ticks[rw], duration);
  283. part_round_stats(cpu, &disk->part0);
  284. part_dec_in_flight(&disk->part0, rw);
  285. part_stat_unlock();
  286. }
  287. EXPORT_SYMBOL(nd_iostat_end);
  288. static ssize_t commands_show(struct device *dev,
  289. struct device_attribute *attr, char *buf)
  290. {
  291. int cmd, len = 0;
  292. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  293. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  294. for_each_set_bit(cmd, &nd_desc->cmd_mask, BITS_PER_LONG)
  295. len += sprintf(buf + len, "%s ", nvdimm_bus_cmd_name(cmd));
  296. len += sprintf(buf + len, "\n");
  297. return len;
  298. }
  299. static DEVICE_ATTR_RO(commands);
  300. static const char *nvdimm_bus_provider(struct nvdimm_bus *nvdimm_bus)
  301. {
  302. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  303. struct device *parent = nvdimm_bus->dev.parent;
  304. if (nd_desc->provider_name)
  305. return nd_desc->provider_name;
  306. else if (parent)
  307. return dev_name(parent);
  308. else
  309. return "unknown";
  310. }
  311. static ssize_t provider_show(struct device *dev,
  312. struct device_attribute *attr, char *buf)
  313. {
  314. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  315. return sprintf(buf, "%s\n", nvdimm_bus_provider(nvdimm_bus));
  316. }
  317. static DEVICE_ATTR_RO(provider);
  318. static int flush_namespaces(struct device *dev, void *data)
  319. {
  320. device_lock(dev);
  321. device_unlock(dev);
  322. return 0;
  323. }
  324. static int flush_regions_dimms(struct device *dev, void *data)
  325. {
  326. device_lock(dev);
  327. device_unlock(dev);
  328. device_for_each_child(dev, NULL, flush_namespaces);
  329. return 0;
  330. }
  331. static ssize_t wait_probe_show(struct device *dev,
  332. struct device_attribute *attr, char *buf)
  333. {
  334. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  335. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  336. int rc;
  337. if (nd_desc->flush_probe) {
  338. rc = nd_desc->flush_probe(nd_desc);
  339. if (rc)
  340. return rc;
  341. }
  342. nd_synchronize();
  343. device_for_each_child(dev, NULL, flush_regions_dimms);
  344. return sprintf(buf, "1\n");
  345. }
  346. static DEVICE_ATTR_RO(wait_probe);
  347. static struct attribute *nvdimm_bus_attributes[] = {
  348. &dev_attr_commands.attr,
  349. &dev_attr_wait_probe.attr,
  350. &dev_attr_provider.attr,
  351. NULL,
  352. };
  353. struct attribute_group nvdimm_bus_attribute_group = {
  354. .attrs = nvdimm_bus_attributes,
  355. };
  356. EXPORT_SYMBOL_GPL(nvdimm_bus_attribute_group);
  357. static void set_badblock(struct badblocks *bb, sector_t s, int num)
  358. {
  359. dev_dbg(bb->dev, "Found a poison range (0x%llx, 0x%llx)\n",
  360. (u64) s * 512, (u64) num * 512);
  361. /* this isn't an error as the hardware will still throw an exception */
  362. if (badblocks_set(bb, s, num, 1))
  363. dev_info_once(bb->dev, "%s: failed for sector %llx\n",
  364. __func__, (u64) s);
  365. }
  366. /**
  367. * __add_badblock_range() - Convert a physical address range to bad sectors
  368. * @bb: badblocks instance to populate
  369. * @ns_offset: namespace offset where the error range begins (in bytes)
  370. * @len: number of bytes of poison to be added
  371. *
  372. * This assumes that the range provided with (ns_offset, len) is within
  373. * the bounds of physical addresses for this namespace, i.e. lies in the
  374. * interval [ns_start, ns_start + ns_size)
  375. */
  376. static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
  377. {
  378. const unsigned int sector_size = 512;
  379. sector_t start_sector;
  380. u64 num_sectors;
  381. u32 rem;
  382. start_sector = div_u64(ns_offset, sector_size);
  383. num_sectors = div_u64_rem(len, sector_size, &rem);
  384. if (rem)
  385. num_sectors++;
  386. if (unlikely(num_sectors > (u64)INT_MAX)) {
  387. u64 remaining = num_sectors;
  388. sector_t s = start_sector;
  389. while (remaining) {
  390. int done = min_t(u64, remaining, INT_MAX);
  391. set_badblock(bb, s, done);
  392. remaining -= done;
  393. s += done;
  394. }
  395. } else
  396. set_badblock(bb, start_sector, num_sectors);
  397. }
  398. static void badblocks_populate(struct list_head *poison_list,
  399. struct badblocks *bb, const struct resource *res)
  400. {
  401. struct nd_poison *pl;
  402. if (list_empty(poison_list))
  403. return;
  404. list_for_each_entry(pl, poison_list, list) {
  405. u64 pl_end = pl->start + pl->length - 1;
  406. /* Discard intervals with no intersection */
  407. if (pl_end < res->start)
  408. continue;
  409. if (pl->start > res->end)
  410. continue;
  411. /* Deal with any overlap after start of the namespace */
  412. if (pl->start >= res->start) {
  413. u64 start = pl->start;
  414. u64 len;
  415. if (pl_end <= res->end)
  416. len = pl->length;
  417. else
  418. len = res->start + resource_size(res)
  419. - pl->start;
  420. __add_badblock_range(bb, start - res->start, len);
  421. continue;
  422. }
  423. /* Deal with overlap for poison starting before the namespace */
  424. if (pl->start < res->start) {
  425. u64 len;
  426. if (pl_end < res->end)
  427. len = pl->start + pl->length - res->start;
  428. else
  429. len = resource_size(res);
  430. __add_badblock_range(bb, 0, len);
  431. }
  432. }
  433. }
  434. /**
  435. * nvdimm_badblocks_populate() - Convert a list of poison ranges to badblocks
  436. * @region: parent region of the range to interrogate
  437. * @bb: badblocks instance to populate
  438. * @res: resource range to consider
  439. *
  440. * The poison list generated during bus initialization may contain
  441. * multiple, possibly overlapping physical address ranges. Compare each
  442. * of these ranges to the resource range currently being initialized,
  443. * and add badblocks entries for all matching sub-ranges
  444. */
  445. void nvdimm_badblocks_populate(struct nd_region *nd_region,
  446. struct badblocks *bb, const struct resource *res)
  447. {
  448. struct nvdimm_bus *nvdimm_bus;
  449. struct list_head *poison_list;
  450. if (!is_nd_pmem(&nd_region->dev)) {
  451. dev_WARN_ONCE(&nd_region->dev, 1,
  452. "%s only valid for pmem regions\n", __func__);
  453. return;
  454. }
  455. nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
  456. poison_list = &nvdimm_bus->poison_list;
  457. nvdimm_bus_lock(&nvdimm_bus->dev);
  458. badblocks_populate(poison_list, bb, res);
  459. nvdimm_bus_unlock(&nvdimm_bus->dev);
  460. }
  461. EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
  462. static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
  463. {
  464. struct nd_poison *pl;
  465. pl = kzalloc(sizeof(*pl), GFP_KERNEL);
  466. if (!pl)
  467. return -ENOMEM;
  468. pl->start = addr;
  469. pl->length = length;
  470. list_add_tail(&pl->list, &nvdimm_bus->poison_list);
  471. return 0;
  472. }
  473. static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
  474. {
  475. struct nd_poison *pl;
  476. if (list_empty(&nvdimm_bus->poison_list))
  477. return add_poison(nvdimm_bus, addr, length);
  478. /*
  479. * There is a chance this is a duplicate, check for those first.
  480. * This will be the common case as ARS_STATUS returns all known
  481. * errors in the SPA space, and we can't query it per region
  482. */
  483. list_for_each_entry(pl, &nvdimm_bus->poison_list, list)
  484. if (pl->start == addr) {
  485. /* If length has changed, update this list entry */
  486. if (pl->length != length)
  487. pl->length = length;
  488. return 0;
  489. }
  490. /*
  491. * If not a duplicate or a simple length update, add the entry as is,
  492. * as any overlapping ranges will get resolved when the list is consumed
  493. * and converted to badblocks
  494. */
  495. return add_poison(nvdimm_bus, addr, length);
  496. }
  497. int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
  498. {
  499. int rc;
  500. nvdimm_bus_lock(&nvdimm_bus->dev);
  501. rc = bus_add_poison(nvdimm_bus, addr, length);
  502. nvdimm_bus_unlock(&nvdimm_bus->dev);
  503. return rc;
  504. }
  505. EXPORT_SYMBOL_GPL(nvdimm_bus_add_poison);
  506. #ifdef CONFIG_BLK_DEV_INTEGRITY
  507. int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
  508. {
  509. struct blk_integrity bi;
  510. if (meta_size == 0)
  511. return 0;
  512. memset(&bi, 0, sizeof(bi));
  513. bi.tuple_size = meta_size;
  514. bi.tag_size = meta_size;
  515. blk_integrity_register(disk, &bi);
  516. blk_queue_max_integrity_segments(disk->queue, 1);
  517. return 0;
  518. }
  519. EXPORT_SYMBOL(nd_integrity_init);
  520. #else /* CONFIG_BLK_DEV_INTEGRITY */
  521. int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
  522. {
  523. return 0;
  524. }
  525. EXPORT_SYMBOL(nd_integrity_init);
  526. #endif
  527. static __init int libnvdimm_init(void)
  528. {
  529. int rc;
  530. rc = nvdimm_bus_init();
  531. if (rc)
  532. return rc;
  533. rc = nvdimm_init();
  534. if (rc)
  535. goto err_dimm;
  536. rc = nd_region_init();
  537. if (rc)
  538. goto err_region;
  539. return 0;
  540. err_region:
  541. nvdimm_exit();
  542. err_dimm:
  543. nvdimm_bus_exit();
  544. return rc;
  545. }
  546. static __exit void libnvdimm_exit(void)
  547. {
  548. WARN_ON(!list_empty(&nvdimm_bus_list));
  549. nd_region_exit();
  550. nvdimm_exit();
  551. nvdimm_bus_exit();
  552. nd_region_devs_exit();
  553. nvdimm_devs_exit();
  554. }
  555. MODULE_LICENSE("GPL v2");
  556. MODULE_AUTHOR("Intel Corporation");
  557. subsys_initcall(libnvdimm_init);
  558. module_exit(libnvdimm_exit);