nfit.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686
  1. /*
  2. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #include <linux/list_sort.h>
  14. #include <linux/libnvdimm.h>
  15. #include <linux/module.h>
  16. #include <linux/mutex.h>
  17. #include <linux/ndctl.h>
  18. #include <linux/list.h>
  19. #include <linux/acpi.h>
  20. #include <linux/sort.h>
  21. #include <linux/pmem.h>
  22. #include <linux/io.h>
  23. #include <asm/cacheflush.h>
  24. #include "nfit.h"
  25. /*
  26. * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
  27. * irrelevant.
  28. */
  29. #include <asm-generic/io-64-nonatomic-hi-lo.h>
  30. static bool force_enable_dimms;
  31. module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
  32. MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
  33. static u8 nfit_uuid[NFIT_UUID_MAX][16];
  34. const u8 *to_nfit_uuid(enum nfit_uuids id)
  35. {
  36. return nfit_uuid[id];
  37. }
  38. EXPORT_SYMBOL(to_nfit_uuid);
  39. static struct acpi_nfit_desc *to_acpi_nfit_desc(
  40. struct nvdimm_bus_descriptor *nd_desc)
  41. {
  42. return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
  43. }
  44. static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
  45. {
  46. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  47. /*
  48. * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
  49. * acpi_device.
  50. */
  51. if (!nd_desc->provider_name
  52. || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
  53. return NULL;
  54. return to_acpi_device(acpi_desc->dev);
  55. }
  56. static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
  57. struct nvdimm *nvdimm, unsigned int cmd, void *buf,
  58. unsigned int buf_len)
  59. {
  60. struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
  61. const struct nd_cmd_desc *desc = NULL;
  62. union acpi_object in_obj, in_buf, *out_obj;
  63. struct device *dev = acpi_desc->dev;
  64. const char *cmd_name, *dimm_name;
  65. unsigned long dsm_mask;
  66. acpi_handle handle;
  67. const u8 *uuid;
  68. u32 offset;
  69. int rc, i;
  70. if (nvdimm) {
  71. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  72. struct acpi_device *adev = nfit_mem->adev;
  73. if (!adev)
  74. return -ENOTTY;
  75. dimm_name = nvdimm_name(nvdimm);
  76. cmd_name = nvdimm_cmd_name(cmd);
  77. dsm_mask = nfit_mem->dsm_mask;
  78. desc = nd_cmd_dimm_desc(cmd);
  79. uuid = to_nfit_uuid(NFIT_DEV_DIMM);
  80. handle = adev->handle;
  81. } else {
  82. struct acpi_device *adev = to_acpi_dev(acpi_desc);
  83. cmd_name = nvdimm_bus_cmd_name(cmd);
  84. dsm_mask = nd_desc->dsm_mask;
  85. desc = nd_cmd_bus_desc(cmd);
  86. uuid = to_nfit_uuid(NFIT_DEV_BUS);
  87. handle = adev->handle;
  88. dimm_name = "bus";
  89. }
  90. if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
  91. return -ENOTTY;
  92. if (!test_bit(cmd, &dsm_mask))
  93. return -ENOTTY;
  94. in_obj.type = ACPI_TYPE_PACKAGE;
  95. in_obj.package.count = 1;
  96. in_obj.package.elements = &in_buf;
  97. in_buf.type = ACPI_TYPE_BUFFER;
  98. in_buf.buffer.pointer = buf;
  99. in_buf.buffer.length = 0;
  100. /* libnvdimm has already validated the input envelope */
  101. for (i = 0; i < desc->in_num; i++)
  102. in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
  103. i, buf);
  104. if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
  105. dev_dbg(dev, "%s:%s cmd: %s input length: %d\n", __func__,
  106. dimm_name, cmd_name, in_buf.buffer.length);
  107. print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
  108. 4, in_buf.buffer.pointer, min_t(u32, 128,
  109. in_buf.buffer.length), true);
  110. }
  111. out_obj = acpi_evaluate_dsm(handle, uuid, 1, cmd, &in_obj);
  112. if (!out_obj) {
  113. dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
  114. cmd_name);
  115. return -EINVAL;
  116. }
  117. if (out_obj->package.type != ACPI_TYPE_BUFFER) {
  118. dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n",
  119. __func__, dimm_name, cmd_name, out_obj->type);
  120. rc = -EINVAL;
  121. goto out;
  122. }
  123. if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
  124. dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__,
  125. dimm_name, cmd_name, out_obj->buffer.length);
  126. print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
  127. 4, out_obj->buffer.pointer, min_t(u32, 128,
  128. out_obj->buffer.length), true);
  129. }
  130. for (i = 0, offset = 0; i < desc->out_num; i++) {
  131. u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
  132. (u32 *) out_obj->buffer.pointer);
  133. if (offset + out_size > out_obj->buffer.length) {
  134. dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n",
  135. __func__, dimm_name, cmd_name, i);
  136. break;
  137. }
  138. if (in_buf.buffer.length + offset + out_size > buf_len) {
  139. dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n",
  140. __func__, dimm_name, cmd_name, i);
  141. rc = -ENXIO;
  142. goto out;
  143. }
  144. memcpy(buf + in_buf.buffer.length + offset,
  145. out_obj->buffer.pointer + offset, out_size);
  146. offset += out_size;
  147. }
  148. if (offset + in_buf.buffer.length < buf_len) {
  149. if (i >= 1) {
  150. /*
  151. * status valid, return the number of bytes left
  152. * unfilled in the output buffer
  153. */
  154. rc = buf_len - offset - in_buf.buffer.length;
  155. } else {
  156. dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
  157. __func__, dimm_name, cmd_name, buf_len,
  158. offset);
  159. rc = -ENXIO;
  160. }
  161. } else
  162. rc = 0;
  163. out:
  164. ACPI_FREE(out_obj);
  165. return rc;
  166. }
  167. static const char *spa_type_name(u16 type)
  168. {
  169. static const char *to_name[] = {
  170. [NFIT_SPA_VOLATILE] = "volatile",
  171. [NFIT_SPA_PM] = "pmem",
  172. [NFIT_SPA_DCR] = "dimm-control-region",
  173. [NFIT_SPA_BDW] = "block-data-window",
  174. [NFIT_SPA_VDISK] = "volatile-disk",
  175. [NFIT_SPA_VCD] = "volatile-cd",
  176. [NFIT_SPA_PDISK] = "persistent-disk",
  177. [NFIT_SPA_PCD] = "persistent-cd",
  178. };
  179. if (type > NFIT_SPA_PCD)
  180. return "unknown";
  181. return to_name[type];
  182. }
  183. static int nfit_spa_type(struct acpi_nfit_system_address *spa)
  184. {
  185. int i;
  186. for (i = 0; i < NFIT_UUID_MAX; i++)
  187. if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0)
  188. return i;
  189. return -1;
  190. }
  191. static bool add_spa(struct acpi_nfit_desc *acpi_desc,
  192. struct acpi_nfit_system_address *spa)
  193. {
  194. struct device *dev = acpi_desc->dev;
  195. struct nfit_spa *nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa),
  196. GFP_KERNEL);
  197. if (!nfit_spa)
  198. return false;
  199. INIT_LIST_HEAD(&nfit_spa->list);
  200. nfit_spa->spa = spa;
  201. list_add_tail(&nfit_spa->list, &acpi_desc->spas);
  202. dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__,
  203. spa->range_index,
  204. spa_type_name(nfit_spa_type(spa)));
  205. return true;
  206. }
  207. static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
  208. struct acpi_nfit_memory_map *memdev)
  209. {
  210. struct device *dev = acpi_desc->dev;
  211. struct nfit_memdev *nfit_memdev = devm_kzalloc(dev,
  212. sizeof(*nfit_memdev), GFP_KERNEL);
  213. if (!nfit_memdev)
  214. return false;
  215. INIT_LIST_HEAD(&nfit_memdev->list);
  216. nfit_memdev->memdev = memdev;
  217. list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
  218. dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n",
  219. __func__, memdev->device_handle, memdev->range_index,
  220. memdev->region_index);
  221. return true;
  222. }
  223. static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
  224. struct acpi_nfit_control_region *dcr)
  225. {
  226. struct device *dev = acpi_desc->dev;
  227. struct nfit_dcr *nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr),
  228. GFP_KERNEL);
  229. if (!nfit_dcr)
  230. return false;
  231. INIT_LIST_HEAD(&nfit_dcr->list);
  232. nfit_dcr->dcr = dcr;
  233. list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
  234. dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__,
  235. dcr->region_index, dcr->windows);
  236. return true;
  237. }
  238. static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
  239. struct acpi_nfit_data_region *bdw)
  240. {
  241. struct device *dev = acpi_desc->dev;
  242. struct nfit_bdw *nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw),
  243. GFP_KERNEL);
  244. if (!nfit_bdw)
  245. return false;
  246. INIT_LIST_HEAD(&nfit_bdw->list);
  247. nfit_bdw->bdw = bdw;
  248. list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
  249. dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__,
  250. bdw->region_index, bdw->windows);
  251. return true;
  252. }
  253. static bool add_idt(struct acpi_nfit_desc *acpi_desc,
  254. struct acpi_nfit_interleave *idt)
  255. {
  256. struct device *dev = acpi_desc->dev;
  257. struct nfit_idt *nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt),
  258. GFP_KERNEL);
  259. if (!nfit_idt)
  260. return false;
  261. INIT_LIST_HEAD(&nfit_idt->list);
  262. nfit_idt->idt = idt;
  263. list_add_tail(&nfit_idt->list, &acpi_desc->idts);
  264. dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__,
  265. idt->interleave_index, idt->line_count);
  266. return true;
  267. }
  268. static bool add_flush(struct acpi_nfit_desc *acpi_desc,
  269. struct acpi_nfit_flush_address *flush)
  270. {
  271. struct device *dev = acpi_desc->dev;
  272. struct nfit_flush *nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush),
  273. GFP_KERNEL);
  274. if (!nfit_flush)
  275. return false;
  276. INIT_LIST_HEAD(&nfit_flush->list);
  277. nfit_flush->flush = flush;
  278. list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
  279. dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
  280. flush->device_handle, flush->hint_count);
  281. return true;
  282. }
  283. static void *add_table(struct acpi_nfit_desc *acpi_desc, void *table,
  284. const void *end)
  285. {
  286. struct device *dev = acpi_desc->dev;
  287. struct acpi_nfit_header *hdr;
  288. void *err = ERR_PTR(-ENOMEM);
  289. if (table >= end)
  290. return NULL;
  291. hdr = table;
  292. switch (hdr->type) {
  293. case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
  294. if (!add_spa(acpi_desc, table))
  295. return err;
  296. break;
  297. case ACPI_NFIT_TYPE_MEMORY_MAP:
  298. if (!add_memdev(acpi_desc, table))
  299. return err;
  300. break;
  301. case ACPI_NFIT_TYPE_CONTROL_REGION:
  302. if (!add_dcr(acpi_desc, table))
  303. return err;
  304. break;
  305. case ACPI_NFIT_TYPE_DATA_REGION:
  306. if (!add_bdw(acpi_desc, table))
  307. return err;
  308. break;
  309. case ACPI_NFIT_TYPE_INTERLEAVE:
  310. if (!add_idt(acpi_desc, table))
  311. return err;
  312. break;
  313. case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
  314. if (!add_flush(acpi_desc, table))
  315. return err;
  316. break;
  317. case ACPI_NFIT_TYPE_SMBIOS:
  318. dev_dbg(dev, "%s: smbios\n", __func__);
  319. break;
  320. default:
  321. dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
  322. break;
  323. }
  324. return table + hdr->length;
  325. }
  326. static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
  327. struct nfit_mem *nfit_mem)
  328. {
  329. u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
  330. u16 dcr = nfit_mem->dcr->region_index;
  331. struct nfit_spa *nfit_spa;
  332. list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
  333. u16 range_index = nfit_spa->spa->range_index;
  334. int type = nfit_spa_type(nfit_spa->spa);
  335. struct nfit_memdev *nfit_memdev;
  336. if (type != NFIT_SPA_BDW)
  337. continue;
  338. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
  339. if (nfit_memdev->memdev->range_index != range_index)
  340. continue;
  341. if (nfit_memdev->memdev->device_handle != device_handle)
  342. continue;
  343. if (nfit_memdev->memdev->region_index != dcr)
  344. continue;
  345. nfit_mem->spa_bdw = nfit_spa->spa;
  346. return;
  347. }
  348. }
  349. dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
  350. nfit_mem->spa_dcr->range_index);
  351. nfit_mem->bdw = NULL;
  352. }
  353. static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
  354. struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
  355. {
  356. u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
  357. struct nfit_memdev *nfit_memdev;
  358. struct nfit_flush *nfit_flush;
  359. struct nfit_dcr *nfit_dcr;
  360. struct nfit_bdw *nfit_bdw;
  361. struct nfit_idt *nfit_idt;
  362. u16 idt_idx, range_index;
  363. list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
  364. if (nfit_dcr->dcr->region_index != dcr)
  365. continue;
  366. nfit_mem->dcr = nfit_dcr->dcr;
  367. break;
  368. }
  369. if (!nfit_mem->dcr) {
  370. dev_dbg(acpi_desc->dev, "SPA %d missing:%s%s\n",
  371. spa->range_index, __to_nfit_memdev(nfit_mem)
  372. ? "" : " MEMDEV", nfit_mem->dcr ? "" : " DCR");
  373. return -ENODEV;
  374. }
  375. /*
  376. * We've found enough to create an nvdimm, optionally
  377. * find an associated BDW
  378. */
  379. list_add(&nfit_mem->list, &acpi_desc->dimms);
  380. list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
  381. if (nfit_bdw->bdw->region_index != dcr)
  382. continue;
  383. nfit_mem->bdw = nfit_bdw->bdw;
  384. break;
  385. }
  386. if (!nfit_mem->bdw)
  387. return 0;
  388. nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
  389. if (!nfit_mem->spa_bdw)
  390. return 0;
  391. range_index = nfit_mem->spa_bdw->range_index;
  392. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
  393. if (nfit_memdev->memdev->range_index != range_index ||
  394. nfit_memdev->memdev->region_index != dcr)
  395. continue;
  396. nfit_mem->memdev_bdw = nfit_memdev->memdev;
  397. idt_idx = nfit_memdev->memdev->interleave_index;
  398. list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
  399. if (nfit_idt->idt->interleave_index != idt_idx)
  400. continue;
  401. nfit_mem->idt_bdw = nfit_idt->idt;
  402. break;
  403. }
  404. list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
  405. if (nfit_flush->flush->device_handle !=
  406. nfit_memdev->memdev->device_handle)
  407. continue;
  408. nfit_mem->nfit_flush = nfit_flush;
  409. break;
  410. }
  411. break;
  412. }
  413. return 0;
  414. }
  415. static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
  416. struct acpi_nfit_system_address *spa)
  417. {
  418. struct nfit_mem *nfit_mem, *found;
  419. struct nfit_memdev *nfit_memdev;
  420. int type = nfit_spa_type(spa);
  421. u16 dcr;
  422. switch (type) {
  423. case NFIT_SPA_DCR:
  424. case NFIT_SPA_PM:
  425. break;
  426. default:
  427. return 0;
  428. }
  429. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
  430. int rc;
  431. if (nfit_memdev->memdev->range_index != spa->range_index)
  432. continue;
  433. found = NULL;
  434. dcr = nfit_memdev->memdev->region_index;
  435. list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
  436. if (__to_nfit_memdev(nfit_mem)->region_index == dcr) {
  437. found = nfit_mem;
  438. break;
  439. }
  440. if (found)
  441. nfit_mem = found;
  442. else {
  443. nfit_mem = devm_kzalloc(acpi_desc->dev,
  444. sizeof(*nfit_mem), GFP_KERNEL);
  445. if (!nfit_mem)
  446. return -ENOMEM;
  447. INIT_LIST_HEAD(&nfit_mem->list);
  448. }
  449. if (type == NFIT_SPA_DCR) {
  450. struct nfit_idt *nfit_idt;
  451. u16 idt_idx;
  452. /* multiple dimms may share a SPA when interleaved */
  453. nfit_mem->spa_dcr = spa;
  454. nfit_mem->memdev_dcr = nfit_memdev->memdev;
  455. idt_idx = nfit_memdev->memdev->interleave_index;
  456. list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
  457. if (nfit_idt->idt->interleave_index != idt_idx)
  458. continue;
  459. nfit_mem->idt_dcr = nfit_idt->idt;
  460. break;
  461. }
  462. } else {
  463. /*
  464. * A single dimm may belong to multiple SPA-PM
  465. * ranges, record at least one in addition to
  466. * any SPA-DCR range.
  467. */
  468. nfit_mem->memdev_pmem = nfit_memdev->memdev;
  469. }
  470. if (found)
  471. continue;
  472. rc = nfit_mem_add(acpi_desc, nfit_mem, spa);
  473. if (rc)
  474. return rc;
  475. }
  476. return 0;
  477. }
  478. static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
  479. {
  480. struct nfit_mem *a = container_of(_a, typeof(*a), list);
  481. struct nfit_mem *b = container_of(_b, typeof(*b), list);
  482. u32 handleA, handleB;
  483. handleA = __to_nfit_memdev(a)->device_handle;
  484. handleB = __to_nfit_memdev(b)->device_handle;
  485. if (handleA < handleB)
  486. return -1;
  487. else if (handleA > handleB)
  488. return 1;
  489. return 0;
  490. }
  491. static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
  492. {
  493. struct nfit_spa *nfit_spa;
  494. /*
  495. * For each SPA-DCR or SPA-PMEM address range find its
  496. * corresponding MEMDEV(s). From each MEMDEV find the
  497. * corresponding DCR. Then, if we're operating on a SPA-DCR,
  498. * try to find a SPA-BDW and a corresponding BDW that references
  499. * the DCR. Throw it all into an nfit_mem object. Note, that
  500. * BDWs are optional.
  501. */
  502. list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
  503. int rc;
  504. rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa);
  505. if (rc)
  506. return rc;
  507. }
  508. list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
  509. return 0;
  510. }
  511. static ssize_t revision_show(struct device *dev,
  512. struct device_attribute *attr, char *buf)
  513. {
  514. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  515. struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
  516. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  517. return sprintf(buf, "%d\n", acpi_desc->nfit->header.revision);
  518. }
  519. static DEVICE_ATTR_RO(revision);
  520. static struct attribute *acpi_nfit_attributes[] = {
  521. &dev_attr_revision.attr,
  522. NULL,
  523. };
  524. static struct attribute_group acpi_nfit_attribute_group = {
  525. .name = "nfit",
  526. .attrs = acpi_nfit_attributes,
  527. };
  528. const struct attribute_group *acpi_nfit_attribute_groups[] = {
  529. &nvdimm_bus_attribute_group,
  530. &acpi_nfit_attribute_group,
  531. NULL,
  532. };
  533. EXPORT_SYMBOL_GPL(acpi_nfit_attribute_groups);
  534. static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
  535. {
  536. struct nvdimm *nvdimm = to_nvdimm(dev);
  537. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  538. return __to_nfit_memdev(nfit_mem);
  539. }
  540. static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
  541. {
  542. struct nvdimm *nvdimm = to_nvdimm(dev);
  543. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  544. return nfit_mem->dcr;
  545. }
  546. static ssize_t handle_show(struct device *dev,
  547. struct device_attribute *attr, char *buf)
  548. {
  549. struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
  550. return sprintf(buf, "%#x\n", memdev->device_handle);
  551. }
  552. static DEVICE_ATTR_RO(handle);
  553. static ssize_t phys_id_show(struct device *dev,
  554. struct device_attribute *attr, char *buf)
  555. {
  556. struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
  557. return sprintf(buf, "%#x\n", memdev->physical_id);
  558. }
  559. static DEVICE_ATTR_RO(phys_id);
  560. static ssize_t vendor_show(struct device *dev,
  561. struct device_attribute *attr, char *buf)
  562. {
  563. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  564. return sprintf(buf, "%#x\n", dcr->vendor_id);
  565. }
  566. static DEVICE_ATTR_RO(vendor);
  567. static ssize_t rev_id_show(struct device *dev,
  568. struct device_attribute *attr, char *buf)
  569. {
  570. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  571. return sprintf(buf, "%#x\n", dcr->revision_id);
  572. }
  573. static DEVICE_ATTR_RO(rev_id);
  574. static ssize_t device_show(struct device *dev,
  575. struct device_attribute *attr, char *buf)
  576. {
  577. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  578. return sprintf(buf, "%#x\n", dcr->device_id);
  579. }
  580. static DEVICE_ATTR_RO(device);
  581. static ssize_t format_show(struct device *dev,
  582. struct device_attribute *attr, char *buf)
  583. {
  584. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  585. return sprintf(buf, "%#x\n", dcr->code);
  586. }
  587. static DEVICE_ATTR_RO(format);
  588. static ssize_t serial_show(struct device *dev,
  589. struct device_attribute *attr, char *buf)
  590. {
  591. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  592. return sprintf(buf, "%#x\n", dcr->serial_number);
  593. }
  594. static DEVICE_ATTR_RO(serial);
  595. static ssize_t flags_show(struct device *dev,
  596. struct device_attribute *attr, char *buf)
  597. {
  598. u16 flags = to_nfit_memdev(dev)->flags;
  599. return sprintf(buf, "%s%s%s%s%s\n",
  600. flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
  601. flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
  602. flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
  603. flags & ACPI_NFIT_MEM_ARMED ? "not_armed " : "",
  604. flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "");
  605. }
  606. static DEVICE_ATTR_RO(flags);
  607. static struct attribute *acpi_nfit_dimm_attributes[] = {
  608. &dev_attr_handle.attr,
  609. &dev_attr_phys_id.attr,
  610. &dev_attr_vendor.attr,
  611. &dev_attr_device.attr,
  612. &dev_attr_format.attr,
  613. &dev_attr_serial.attr,
  614. &dev_attr_rev_id.attr,
  615. &dev_attr_flags.attr,
  616. NULL,
  617. };
  618. static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
  619. struct attribute *a, int n)
  620. {
  621. struct device *dev = container_of(kobj, struct device, kobj);
  622. if (to_nfit_dcr(dev))
  623. return a->mode;
  624. else
  625. return 0;
  626. }
  627. static struct attribute_group acpi_nfit_dimm_attribute_group = {
  628. .name = "nfit",
  629. .attrs = acpi_nfit_dimm_attributes,
  630. .is_visible = acpi_nfit_dimm_attr_visible,
  631. };
  632. static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
  633. &nvdimm_attribute_group,
  634. &nd_device_attribute_group,
  635. &acpi_nfit_dimm_attribute_group,
  636. NULL,
  637. };
  638. static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
  639. u32 device_handle)
  640. {
  641. struct nfit_mem *nfit_mem;
  642. list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
  643. if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
  644. return nfit_mem->nvdimm;
  645. return NULL;
  646. }
  647. static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
  648. struct nfit_mem *nfit_mem, u32 device_handle)
  649. {
  650. struct acpi_device *adev, *adev_dimm;
  651. struct device *dev = acpi_desc->dev;
  652. const u8 *uuid = to_nfit_uuid(NFIT_DEV_DIMM);
  653. int i;
  654. nfit_mem->dsm_mask = acpi_desc->dimm_dsm_force_en;
  655. adev = to_acpi_dev(acpi_desc);
  656. if (!adev)
  657. return 0;
  658. adev_dimm = acpi_find_child_device(adev, device_handle, false);
  659. nfit_mem->adev = adev_dimm;
  660. if (!adev_dimm) {
  661. dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
  662. device_handle);
  663. return force_enable_dimms ? 0 : -ENODEV;
  664. }
  665. for (i = ND_CMD_SMART; i <= ND_CMD_VENDOR; i++)
  666. if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
  667. set_bit(i, &nfit_mem->dsm_mask);
  668. return 0;
  669. }
  670. static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
  671. {
  672. struct nfit_mem *nfit_mem;
  673. int dimm_count = 0;
  674. list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
  675. struct nvdimm *nvdimm;
  676. unsigned long flags = 0;
  677. u32 device_handle;
  678. u16 mem_flags;
  679. int rc;
  680. device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
  681. nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
  682. if (nvdimm) {
  683. /*
  684. * If for some reason we find multiple DCRs the
  685. * first one wins
  686. */
  687. dev_err(acpi_desc->dev, "duplicate DCR detected: %s\n",
  688. nvdimm_name(nvdimm));
  689. continue;
  690. }
  691. if (nfit_mem->bdw && nfit_mem->memdev_pmem)
  692. flags |= NDD_ALIASING;
  693. mem_flags = __to_nfit_memdev(nfit_mem)->flags;
  694. if (mem_flags & ACPI_NFIT_MEM_ARMED)
  695. flags |= NDD_UNARMED;
  696. rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
  697. if (rc)
  698. continue;
  699. nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
  700. acpi_nfit_dimm_attribute_groups,
  701. flags, &nfit_mem->dsm_mask);
  702. if (!nvdimm)
  703. return -ENOMEM;
  704. nfit_mem->nvdimm = nvdimm;
  705. dimm_count++;
  706. if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
  707. continue;
  708. dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n",
  709. nvdimm_name(nvdimm),
  710. mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
  711. mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
  712. mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
  713. mem_flags & ACPI_NFIT_MEM_ARMED ? " not_armed" : "");
  714. }
  715. return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
  716. }
  717. static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
  718. {
  719. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  720. const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS);
  721. struct acpi_device *adev;
  722. int i;
  723. nd_desc->dsm_mask = acpi_desc->bus_dsm_force_en;
  724. adev = to_acpi_dev(acpi_desc);
  725. if (!adev)
  726. return;
  727. for (i = ND_CMD_ARS_CAP; i <= ND_CMD_ARS_STATUS; i++)
  728. if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
  729. set_bit(i, &nd_desc->dsm_mask);
  730. }
  731. static ssize_t range_index_show(struct device *dev,
  732. struct device_attribute *attr, char *buf)
  733. {
  734. struct nd_region *nd_region = to_nd_region(dev);
  735. struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
  736. return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
  737. }
  738. static DEVICE_ATTR_RO(range_index);
  739. static struct attribute *acpi_nfit_region_attributes[] = {
  740. &dev_attr_range_index.attr,
  741. NULL,
  742. };
  743. static struct attribute_group acpi_nfit_region_attribute_group = {
  744. .name = "nfit",
  745. .attrs = acpi_nfit_region_attributes,
  746. };
  747. static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
  748. &nd_region_attribute_group,
  749. &nd_mapping_attribute_group,
  750. &nd_device_attribute_group,
  751. &nd_numa_attribute_group,
  752. &acpi_nfit_region_attribute_group,
  753. NULL,
  754. };
  755. /* enough info to uniquely specify an interleave set */
  756. struct nfit_set_info {
  757. struct nfit_set_info_map {
  758. u64 region_offset;
  759. u32 serial_number;
  760. u32 pad;
  761. } mapping[0];
  762. };
  763. static size_t sizeof_nfit_set_info(int num_mappings)
  764. {
  765. return sizeof(struct nfit_set_info)
  766. + num_mappings * sizeof(struct nfit_set_info_map);
  767. }
  768. static int cmp_map(const void *m0, const void *m1)
  769. {
  770. const struct nfit_set_info_map *map0 = m0;
  771. const struct nfit_set_info_map *map1 = m1;
  772. return memcmp(&map0->region_offset, &map1->region_offset,
  773. sizeof(u64));
  774. }
  775. /* Retrieve the nth entry referencing this spa */
  776. static struct acpi_nfit_memory_map *memdev_from_spa(
  777. struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
  778. {
  779. struct nfit_memdev *nfit_memdev;
  780. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
  781. if (nfit_memdev->memdev->range_index == range_index)
  782. if (n-- == 0)
  783. return nfit_memdev->memdev;
  784. return NULL;
  785. }
  786. static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
  787. struct nd_region_desc *ndr_desc,
  788. struct acpi_nfit_system_address *spa)
  789. {
  790. int i, spa_type = nfit_spa_type(spa);
  791. struct device *dev = acpi_desc->dev;
  792. struct nd_interleave_set *nd_set;
  793. u16 nr = ndr_desc->num_mappings;
  794. struct nfit_set_info *info;
  795. if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE)
  796. /* pass */;
  797. else
  798. return 0;
  799. nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
  800. if (!nd_set)
  801. return -ENOMEM;
  802. info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
  803. if (!info)
  804. return -ENOMEM;
  805. for (i = 0; i < nr; i++) {
  806. struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
  807. struct nfit_set_info_map *map = &info->mapping[i];
  808. struct nvdimm *nvdimm = nd_mapping->nvdimm;
  809. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  810. struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
  811. spa->range_index, i);
  812. if (!memdev || !nfit_mem->dcr) {
  813. dev_err(dev, "%s: failed to find DCR\n", __func__);
  814. return -ENODEV;
  815. }
  816. map->region_offset = memdev->region_offset;
  817. map->serial_number = nfit_mem->dcr->serial_number;
  818. }
  819. sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
  820. cmp_map, NULL);
  821. nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
  822. ndr_desc->nd_set = nd_set;
  823. devm_kfree(dev, info);
  824. return 0;
  825. }
  826. static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
  827. {
  828. struct acpi_nfit_interleave *idt = mmio->idt;
  829. u32 sub_line_offset, line_index, line_offset;
  830. u64 line_no, table_skip_count, table_offset;
  831. line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
  832. table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
  833. line_offset = idt->line_offset[line_index]
  834. * mmio->line_size;
  835. table_offset = table_skip_count * mmio->table_size;
  836. return mmio->base_offset + line_offset + table_offset + sub_line_offset;
  837. }
  838. static void wmb_blk(struct nfit_blk *nfit_blk)
  839. {
  840. if (nfit_blk->nvdimm_flush) {
  841. /*
  842. * The first wmb() is needed to 'sfence' all previous writes
  843. * such that they are architecturally visible for the platform
  844. * buffer flush. Note that we've already arranged for pmem
  845. * writes to avoid the cache via arch_memcpy_to_pmem(). The
  846. * final wmb() ensures ordering for the NVDIMM flush write.
  847. */
  848. wmb();
  849. writeq(1, nfit_blk->nvdimm_flush);
  850. wmb();
  851. } else
  852. wmb_pmem();
  853. }
  854. static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
  855. {
  856. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
  857. u64 offset = nfit_blk->stat_offset + mmio->size * bw;
  858. if (mmio->num_lines)
  859. offset = to_interleave_offset(offset, mmio);
  860. return readl(mmio->addr.base + offset);
  861. }
  862. static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
  863. resource_size_t dpa, unsigned int len, unsigned int write)
  864. {
  865. u64 cmd, offset;
  866. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
  867. enum {
  868. BCW_OFFSET_MASK = (1ULL << 48)-1,
  869. BCW_LEN_SHIFT = 48,
  870. BCW_LEN_MASK = (1ULL << 8) - 1,
  871. BCW_CMD_SHIFT = 56,
  872. };
  873. cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
  874. len = len >> L1_CACHE_SHIFT;
  875. cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
  876. cmd |= ((u64) write) << BCW_CMD_SHIFT;
  877. offset = nfit_blk->cmd_offset + mmio->size * bw;
  878. if (mmio->num_lines)
  879. offset = to_interleave_offset(offset, mmio);
  880. writeq(cmd, mmio->addr.base + offset);
  881. wmb_blk(nfit_blk);
  882. if (nfit_blk->dimm_flags & ND_BLK_DCR_LATCH)
  883. readq(mmio->addr.base + offset);
  884. }
  885. static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
  886. resource_size_t dpa, void *iobuf, size_t len, int rw,
  887. unsigned int lane)
  888. {
  889. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
  890. unsigned int copied = 0;
  891. u64 base_offset;
  892. int rc;
  893. base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
  894. + lane * mmio->size;
  895. write_blk_ctl(nfit_blk, lane, dpa, len, rw);
  896. while (len) {
  897. unsigned int c;
  898. u64 offset;
  899. if (mmio->num_lines) {
  900. u32 line_offset;
  901. offset = to_interleave_offset(base_offset + copied,
  902. mmio);
  903. div_u64_rem(offset, mmio->line_size, &line_offset);
  904. c = min_t(size_t, len, mmio->line_size - line_offset);
  905. } else {
  906. offset = base_offset + nfit_blk->bdw_offset;
  907. c = len;
  908. }
  909. if (rw)
  910. memcpy_to_pmem(mmio->addr.aperture + offset,
  911. iobuf + copied, c);
  912. else {
  913. if (nfit_blk->dimm_flags & ND_BLK_READ_FLUSH)
  914. mmio_flush_range((void __force *)
  915. mmio->addr.aperture + offset, c);
  916. memcpy_from_pmem(iobuf + copied,
  917. mmio->addr.aperture + offset, c);
  918. }
  919. copied += c;
  920. len -= c;
  921. }
  922. if (rw)
  923. wmb_blk(nfit_blk);
  924. rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
  925. return rc;
  926. }
  927. static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
  928. resource_size_t dpa, void *iobuf, u64 len, int rw)
  929. {
  930. struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
  931. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
  932. struct nd_region *nd_region = nfit_blk->nd_region;
  933. unsigned int lane, copied = 0;
  934. int rc = 0;
  935. lane = nd_region_acquire_lane(nd_region);
  936. while (len) {
  937. u64 c = min(len, mmio->size);
  938. rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
  939. iobuf + copied, c, rw, lane);
  940. if (rc)
  941. break;
  942. copied += c;
  943. len -= c;
  944. }
  945. nd_region_release_lane(nd_region, lane);
  946. return rc;
  947. }
  948. static void nfit_spa_mapping_release(struct kref *kref)
  949. {
  950. struct nfit_spa_mapping *spa_map = to_spa_map(kref);
  951. struct acpi_nfit_system_address *spa = spa_map->spa;
  952. struct acpi_nfit_desc *acpi_desc = spa_map->acpi_desc;
  953. WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
  954. dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index);
  955. if (spa_map->type == SPA_MAP_APERTURE)
  956. memunmap((void __force *)spa_map->addr.aperture);
  957. else
  958. iounmap(spa_map->addr.base);
  959. release_mem_region(spa->address, spa->length);
  960. list_del(&spa_map->list);
  961. kfree(spa_map);
  962. }
  963. static struct nfit_spa_mapping *find_spa_mapping(
  964. struct acpi_nfit_desc *acpi_desc,
  965. struct acpi_nfit_system_address *spa)
  966. {
  967. struct nfit_spa_mapping *spa_map;
  968. WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
  969. list_for_each_entry(spa_map, &acpi_desc->spa_maps, list)
  970. if (spa_map->spa == spa)
  971. return spa_map;
  972. return NULL;
  973. }
  974. static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc,
  975. struct acpi_nfit_system_address *spa)
  976. {
  977. struct nfit_spa_mapping *spa_map;
  978. mutex_lock(&acpi_desc->spa_map_mutex);
  979. spa_map = find_spa_mapping(acpi_desc, spa);
  980. if (spa_map)
  981. kref_put(&spa_map->kref, nfit_spa_mapping_release);
  982. mutex_unlock(&acpi_desc->spa_map_mutex);
  983. }
  984. static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
  985. struct acpi_nfit_system_address *spa, enum spa_map_type type)
  986. {
  987. resource_size_t start = spa->address;
  988. resource_size_t n = spa->length;
  989. struct nfit_spa_mapping *spa_map;
  990. struct resource *res;
  991. WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
  992. spa_map = find_spa_mapping(acpi_desc, spa);
  993. if (spa_map) {
  994. kref_get(&spa_map->kref);
  995. return spa_map->addr.base;
  996. }
  997. spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL);
  998. if (!spa_map)
  999. return NULL;
  1000. INIT_LIST_HEAD(&spa_map->list);
  1001. spa_map->spa = spa;
  1002. kref_init(&spa_map->kref);
  1003. spa_map->acpi_desc = acpi_desc;
  1004. res = request_mem_region(start, n, dev_name(acpi_desc->dev));
  1005. if (!res)
  1006. goto err_mem;
  1007. spa_map->type = type;
  1008. if (type == SPA_MAP_APERTURE)
  1009. spa_map->addr.aperture = (void __pmem *)memremap(start, n,
  1010. ARCH_MEMREMAP_PMEM);
  1011. else
  1012. spa_map->addr.base = ioremap_nocache(start, n);
  1013. if (!spa_map->addr.base)
  1014. goto err_map;
  1015. list_add_tail(&spa_map->list, &acpi_desc->spa_maps);
  1016. return spa_map->addr.base;
  1017. err_map:
  1018. release_mem_region(start, n);
  1019. err_mem:
  1020. kfree(spa_map);
  1021. return NULL;
  1022. }
  1023. /**
  1024. * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges
  1025. * @nvdimm_bus: NFIT-bus that provided the spa table entry
  1026. * @nfit_spa: spa table to map
  1027. * @type: aperture or control region
  1028. *
  1029. * In the case where block-data-window apertures and
  1030. * dimm-control-regions are interleaved they will end up sharing a
  1031. * single request_mem_region() + ioremap() for the address range. In
  1032. * the style of devm nfit_spa_map() mappings are automatically dropped
  1033. * when all region devices referencing the same mapping are disabled /
  1034. * unbound.
  1035. */
  1036. static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
  1037. struct acpi_nfit_system_address *spa, enum spa_map_type type)
  1038. {
  1039. void __iomem *iomem;
  1040. mutex_lock(&acpi_desc->spa_map_mutex);
  1041. iomem = __nfit_spa_map(acpi_desc, spa, type);
  1042. mutex_unlock(&acpi_desc->spa_map_mutex);
  1043. return iomem;
  1044. }
  1045. static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
  1046. struct acpi_nfit_interleave *idt, u16 interleave_ways)
  1047. {
  1048. if (idt) {
  1049. mmio->num_lines = idt->line_count;
  1050. mmio->line_size = idt->line_size;
  1051. if (interleave_ways == 0)
  1052. return -ENXIO;
  1053. mmio->table_size = mmio->num_lines * interleave_ways
  1054. * mmio->line_size;
  1055. }
  1056. return 0;
  1057. }
  1058. static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
  1059. struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
  1060. {
  1061. struct nd_cmd_dimm_flags flags;
  1062. int rc;
  1063. memset(&flags, 0, sizeof(flags));
  1064. rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
  1065. sizeof(flags));
  1066. if (rc >= 0 && flags.status == 0)
  1067. nfit_blk->dimm_flags = flags.flags;
  1068. else if (rc == -ENOTTY) {
  1069. /* fall back to a conservative default */
  1070. nfit_blk->dimm_flags = ND_BLK_DCR_LATCH | ND_BLK_READ_FLUSH;
  1071. rc = 0;
  1072. } else
  1073. rc = -ENXIO;
  1074. return rc;
  1075. }
  1076. static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
  1077. struct device *dev)
  1078. {
  1079. struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
  1080. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  1081. struct nd_blk_region *ndbr = to_nd_blk_region(dev);
  1082. struct nfit_flush *nfit_flush;
  1083. struct nfit_blk_mmio *mmio;
  1084. struct nfit_blk *nfit_blk;
  1085. struct nfit_mem *nfit_mem;
  1086. struct nvdimm *nvdimm;
  1087. int rc;
  1088. nvdimm = nd_blk_region_to_dimm(ndbr);
  1089. nfit_mem = nvdimm_provider_data(nvdimm);
  1090. if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
  1091. dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
  1092. nfit_mem ? "" : " nfit_mem",
  1093. (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
  1094. (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
  1095. return -ENXIO;
  1096. }
  1097. nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
  1098. if (!nfit_blk)
  1099. return -ENOMEM;
  1100. nd_blk_region_set_provider_data(ndbr, nfit_blk);
  1101. nfit_blk->nd_region = to_nd_region(dev);
  1102. /* map block aperture memory */
  1103. nfit_blk->bdw_offset = nfit_mem->bdw->offset;
  1104. mmio = &nfit_blk->mmio[BDW];
  1105. mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw,
  1106. SPA_MAP_APERTURE);
  1107. if (!mmio->addr.base) {
  1108. dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
  1109. nvdimm_name(nvdimm));
  1110. return -ENOMEM;
  1111. }
  1112. mmio->size = nfit_mem->bdw->size;
  1113. mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
  1114. mmio->idt = nfit_mem->idt_bdw;
  1115. mmio->spa = nfit_mem->spa_bdw;
  1116. rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
  1117. nfit_mem->memdev_bdw->interleave_ways);
  1118. if (rc) {
  1119. dev_dbg(dev, "%s: %s failed to init bdw interleave\n",
  1120. __func__, nvdimm_name(nvdimm));
  1121. return rc;
  1122. }
  1123. /* map block control memory */
  1124. nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
  1125. nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
  1126. mmio = &nfit_blk->mmio[DCR];
  1127. mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr,
  1128. SPA_MAP_CONTROL);
  1129. if (!mmio->addr.base) {
  1130. dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
  1131. nvdimm_name(nvdimm));
  1132. return -ENOMEM;
  1133. }
  1134. mmio->size = nfit_mem->dcr->window_size;
  1135. mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
  1136. mmio->idt = nfit_mem->idt_dcr;
  1137. mmio->spa = nfit_mem->spa_dcr;
  1138. rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
  1139. nfit_mem->memdev_dcr->interleave_ways);
  1140. if (rc) {
  1141. dev_dbg(dev, "%s: %s failed to init dcr interleave\n",
  1142. __func__, nvdimm_name(nvdimm));
  1143. return rc;
  1144. }
  1145. rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
  1146. if (rc < 0) {
  1147. dev_dbg(dev, "%s: %s failed get DIMM flags\n",
  1148. __func__, nvdimm_name(nvdimm));
  1149. return rc;
  1150. }
  1151. nfit_flush = nfit_mem->nfit_flush;
  1152. if (nfit_flush && nfit_flush->flush->hint_count != 0) {
  1153. nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev,
  1154. nfit_flush->flush->hint_address[0], 8);
  1155. if (!nfit_blk->nvdimm_flush)
  1156. return -ENOMEM;
  1157. }
  1158. if (!arch_has_wmb_pmem() && !nfit_blk->nvdimm_flush)
  1159. dev_warn(dev, "unable to guarantee persistence of writes\n");
  1160. if (mmio->line_size == 0)
  1161. return 0;
  1162. if ((u32) nfit_blk->cmd_offset % mmio->line_size
  1163. + 8 > mmio->line_size) {
  1164. dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
  1165. return -ENXIO;
  1166. } else if ((u32) nfit_blk->stat_offset % mmio->line_size
  1167. + 8 > mmio->line_size) {
  1168. dev_dbg(dev, "stat_offset crosses interleave boundary\n");
  1169. return -ENXIO;
  1170. }
  1171. return 0;
  1172. }
  1173. static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus,
  1174. struct device *dev)
  1175. {
  1176. struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
  1177. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  1178. struct nd_blk_region *ndbr = to_nd_blk_region(dev);
  1179. struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
  1180. int i;
  1181. if (!nfit_blk)
  1182. return; /* never enabled */
  1183. /* auto-free BLK spa mappings */
  1184. for (i = 0; i < 2; i++) {
  1185. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i];
  1186. if (mmio->addr.base)
  1187. nfit_spa_unmap(acpi_desc, mmio->spa);
  1188. }
  1189. nd_blk_region_set_provider_data(ndbr, NULL);
  1190. /* devm will free nfit_blk */
  1191. }
  1192. static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
  1193. struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc,
  1194. struct acpi_nfit_memory_map *memdev,
  1195. struct acpi_nfit_system_address *spa)
  1196. {
  1197. struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
  1198. memdev->device_handle);
  1199. struct nd_blk_region_desc *ndbr_desc;
  1200. struct nfit_mem *nfit_mem;
  1201. int blk_valid = 0;
  1202. if (!nvdimm) {
  1203. dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
  1204. spa->range_index, memdev->device_handle);
  1205. return -ENODEV;
  1206. }
  1207. nd_mapping->nvdimm = nvdimm;
  1208. switch (nfit_spa_type(spa)) {
  1209. case NFIT_SPA_PM:
  1210. case NFIT_SPA_VOLATILE:
  1211. nd_mapping->start = memdev->address;
  1212. nd_mapping->size = memdev->region_size;
  1213. break;
  1214. case NFIT_SPA_DCR:
  1215. nfit_mem = nvdimm_provider_data(nvdimm);
  1216. if (!nfit_mem || !nfit_mem->bdw) {
  1217. dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
  1218. spa->range_index, nvdimm_name(nvdimm));
  1219. } else {
  1220. nd_mapping->size = nfit_mem->bdw->capacity;
  1221. nd_mapping->start = nfit_mem->bdw->start_address;
  1222. ndr_desc->num_lanes = nfit_mem->bdw->windows;
  1223. blk_valid = 1;
  1224. }
  1225. ndr_desc->nd_mapping = nd_mapping;
  1226. ndr_desc->num_mappings = blk_valid;
  1227. ndbr_desc = to_blk_region_desc(ndr_desc);
  1228. ndbr_desc->enable = acpi_nfit_blk_region_enable;
  1229. ndbr_desc->disable = acpi_nfit_blk_region_disable;
  1230. ndbr_desc->do_io = acpi_desc->blk_do_io;
  1231. if (!nvdimm_blk_region_create(acpi_desc->nvdimm_bus, ndr_desc))
  1232. return -ENOMEM;
  1233. break;
  1234. }
  1235. return 0;
  1236. }
  1237. static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
  1238. struct nfit_spa *nfit_spa)
  1239. {
  1240. static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS];
  1241. struct acpi_nfit_system_address *spa = nfit_spa->spa;
  1242. struct nd_blk_region_desc ndbr_desc;
  1243. struct nd_region_desc *ndr_desc;
  1244. struct nfit_memdev *nfit_memdev;
  1245. struct nvdimm_bus *nvdimm_bus;
  1246. struct resource res;
  1247. int count = 0, rc;
  1248. if (spa->range_index == 0) {
  1249. dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n",
  1250. __func__);
  1251. return 0;
  1252. }
  1253. memset(&res, 0, sizeof(res));
  1254. memset(&nd_mappings, 0, sizeof(nd_mappings));
  1255. memset(&ndbr_desc, 0, sizeof(ndbr_desc));
  1256. res.start = spa->address;
  1257. res.end = res.start + spa->length - 1;
  1258. ndr_desc = &ndbr_desc.ndr_desc;
  1259. ndr_desc->res = &res;
  1260. ndr_desc->provider_data = nfit_spa;
  1261. ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
  1262. if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
  1263. ndr_desc->numa_node = acpi_map_pxm_to_online_node(
  1264. spa->proximity_domain);
  1265. else
  1266. ndr_desc->numa_node = NUMA_NO_NODE;
  1267. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
  1268. struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
  1269. struct nd_mapping *nd_mapping;
  1270. if (memdev->range_index != spa->range_index)
  1271. continue;
  1272. if (count >= ND_MAX_MAPPINGS) {
  1273. dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
  1274. spa->range_index, ND_MAX_MAPPINGS);
  1275. return -ENXIO;
  1276. }
  1277. nd_mapping = &nd_mappings[count++];
  1278. rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc,
  1279. memdev, spa);
  1280. if (rc)
  1281. return rc;
  1282. }
  1283. ndr_desc->nd_mapping = nd_mappings;
  1284. ndr_desc->num_mappings = count;
  1285. rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
  1286. if (rc)
  1287. return rc;
  1288. nvdimm_bus = acpi_desc->nvdimm_bus;
  1289. if (nfit_spa_type(spa) == NFIT_SPA_PM) {
  1290. if (!nvdimm_pmem_region_create(nvdimm_bus, ndr_desc))
  1291. return -ENOMEM;
  1292. } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) {
  1293. if (!nvdimm_volatile_region_create(nvdimm_bus, ndr_desc))
  1294. return -ENOMEM;
  1295. }
  1296. return 0;
  1297. }
  1298. static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
  1299. {
  1300. struct nfit_spa *nfit_spa;
  1301. list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
  1302. int rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
  1303. if (rc)
  1304. return rc;
  1305. }
  1306. return 0;
  1307. }
  1308. int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
  1309. {
  1310. struct device *dev = acpi_desc->dev;
  1311. const void *end;
  1312. u8 *data;
  1313. int rc;
  1314. INIT_LIST_HEAD(&acpi_desc->spa_maps);
  1315. INIT_LIST_HEAD(&acpi_desc->spas);
  1316. INIT_LIST_HEAD(&acpi_desc->dcrs);
  1317. INIT_LIST_HEAD(&acpi_desc->bdws);
  1318. INIT_LIST_HEAD(&acpi_desc->idts);
  1319. INIT_LIST_HEAD(&acpi_desc->flushes);
  1320. INIT_LIST_HEAD(&acpi_desc->memdevs);
  1321. INIT_LIST_HEAD(&acpi_desc->dimms);
  1322. mutex_init(&acpi_desc->spa_map_mutex);
  1323. data = (u8 *) acpi_desc->nfit;
  1324. end = data + sz;
  1325. data += sizeof(struct acpi_table_nfit);
  1326. while (!IS_ERR_OR_NULL(data))
  1327. data = add_table(acpi_desc, data, end);
  1328. if (IS_ERR(data)) {
  1329. dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__,
  1330. PTR_ERR(data));
  1331. return PTR_ERR(data);
  1332. }
  1333. if (nfit_mem_init(acpi_desc) != 0)
  1334. return -ENOMEM;
  1335. acpi_nfit_init_dsms(acpi_desc);
  1336. rc = acpi_nfit_register_dimms(acpi_desc);
  1337. if (rc)
  1338. return rc;
  1339. return acpi_nfit_register_regions(acpi_desc);
  1340. }
  1341. EXPORT_SYMBOL_GPL(acpi_nfit_init);
  1342. static int acpi_nfit_add(struct acpi_device *adev)
  1343. {
  1344. struct nvdimm_bus_descriptor *nd_desc;
  1345. struct acpi_nfit_desc *acpi_desc;
  1346. struct device *dev = &adev->dev;
  1347. struct acpi_table_header *tbl;
  1348. acpi_status status = AE_OK;
  1349. acpi_size sz;
  1350. int rc;
  1351. status = acpi_get_table_with_size("NFIT", 0, &tbl, &sz);
  1352. if (ACPI_FAILURE(status)) {
  1353. dev_err(dev, "failed to find NFIT\n");
  1354. return -ENXIO;
  1355. }
  1356. acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
  1357. if (!acpi_desc)
  1358. return -ENOMEM;
  1359. dev_set_drvdata(dev, acpi_desc);
  1360. acpi_desc->dev = dev;
  1361. acpi_desc->nfit = (struct acpi_table_nfit *) tbl;
  1362. acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
  1363. nd_desc = &acpi_desc->nd_desc;
  1364. nd_desc->provider_name = "ACPI.NFIT";
  1365. nd_desc->ndctl = acpi_nfit_ctl;
  1366. nd_desc->attr_groups = acpi_nfit_attribute_groups;
  1367. acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, nd_desc);
  1368. if (!acpi_desc->nvdimm_bus)
  1369. return -ENXIO;
  1370. rc = acpi_nfit_init(acpi_desc, sz);
  1371. if (rc) {
  1372. nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
  1373. return rc;
  1374. }
  1375. return 0;
  1376. }
  1377. static int acpi_nfit_remove(struct acpi_device *adev)
  1378. {
  1379. struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
  1380. nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
  1381. return 0;
  1382. }
  1383. static const struct acpi_device_id acpi_nfit_ids[] = {
  1384. { "ACPI0012", 0 },
  1385. { "", 0 },
  1386. };
  1387. MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
  1388. static struct acpi_driver acpi_nfit_driver = {
  1389. .name = KBUILD_MODNAME,
  1390. .ids = acpi_nfit_ids,
  1391. .ops = {
  1392. .add = acpi_nfit_add,
  1393. .remove = acpi_nfit_remove,
  1394. },
  1395. };
  1396. static __init int nfit_init(void)
  1397. {
  1398. BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
  1399. BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
  1400. BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
  1401. BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
  1402. BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
  1403. BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
  1404. BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
  1405. acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]);
  1406. acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]);
  1407. acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]);
  1408. acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]);
  1409. acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]);
  1410. acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]);
  1411. acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]);
  1412. acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]);
  1413. acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
  1414. acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
  1415. return acpi_bus_register_driver(&acpi_nfit_driver);
  1416. }
  1417. static __exit void nfit_exit(void)
  1418. {
  1419. acpi_bus_unregister_driver(&acpi_nfit_driver);
  1420. }
  1421. module_init(nfit_init);
  1422. module_exit(nfit_exit);
  1423. MODULE_LICENSE("GPL v2");
  1424. MODULE_AUTHOR("Intel Corporation");