nfit.c 66 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483
  1. /*
  2. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #include <linux/list_sort.h>
  14. #include <linux/libnvdimm.h>
  15. #include <linux/module.h>
  16. #include <linux/mutex.h>
  17. #include <linux/ndctl.h>
  18. #include <linux/delay.h>
  19. #include <linux/list.h>
  20. #include <linux/acpi.h>
  21. #include <linux/sort.h>
  22. #include <linux/pmem.h>
  23. #include <linux/io.h>
  24. #include <linux/nd.h>
  25. #include <asm/cacheflush.h>
  26. #include "nfit.h"
  27. /*
  28. * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
  29. * irrelevant.
  30. */
  31. #include <linux/io-64-nonatomic-hi-lo.h>
  32. static bool force_enable_dimms;
  33. module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
  34. MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
  35. static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT;
  36. module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR);
  37. MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds");
  38. /* after three payloads of overflow, it's dead jim */
  39. static unsigned int scrub_overflow_abort = 3;
  40. module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR);
  41. MODULE_PARM_DESC(scrub_overflow_abort,
  42. "Number of times we overflow ARS results before abort");
  43. static struct workqueue_struct *nfit_wq;
  44. struct nfit_table_prev {
  45. struct list_head spas;
  46. struct list_head memdevs;
  47. struct list_head dcrs;
  48. struct list_head bdws;
  49. struct list_head idts;
  50. struct list_head flushes;
  51. };
  52. static u8 nfit_uuid[NFIT_UUID_MAX][16];
  53. const u8 *to_nfit_uuid(enum nfit_uuids id)
  54. {
  55. return nfit_uuid[id];
  56. }
  57. EXPORT_SYMBOL(to_nfit_uuid);
  58. static struct acpi_nfit_desc *to_acpi_nfit_desc(
  59. struct nvdimm_bus_descriptor *nd_desc)
  60. {
  61. return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
  62. }
  63. static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
  64. {
  65. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  66. /*
  67. * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
  68. * acpi_device.
  69. */
  70. if (!nd_desc->provider_name
  71. || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
  72. return NULL;
  73. return to_acpi_device(acpi_desc->dev);
  74. }
  75. static int xlat_status(void *buf, unsigned int cmd)
  76. {
  77. struct nd_cmd_clear_error *clear_err;
  78. struct nd_cmd_ars_status *ars_status;
  79. struct nd_cmd_ars_start *ars_start;
  80. struct nd_cmd_ars_cap *ars_cap;
  81. u16 flags;
  82. switch (cmd) {
  83. case ND_CMD_ARS_CAP:
  84. ars_cap = buf;
  85. if ((ars_cap->status & 0xffff) == NFIT_ARS_CAP_NONE)
  86. return -ENOTTY;
  87. /* Command failed */
  88. if (ars_cap->status & 0xffff)
  89. return -EIO;
  90. /* No supported scan types for this range */
  91. flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
  92. if ((ars_cap->status >> 16 & flags) == 0)
  93. return -ENOTTY;
  94. break;
  95. case ND_CMD_ARS_START:
  96. ars_start = buf;
  97. /* ARS is in progress */
  98. if ((ars_start->status & 0xffff) == NFIT_ARS_START_BUSY)
  99. return -EBUSY;
  100. /* Command failed */
  101. if (ars_start->status & 0xffff)
  102. return -EIO;
  103. break;
  104. case ND_CMD_ARS_STATUS:
  105. ars_status = buf;
  106. /* Command failed */
  107. if (ars_status->status & 0xffff)
  108. return -EIO;
  109. /* Check extended status (Upper two bytes) */
  110. if (ars_status->status == NFIT_ARS_STATUS_DONE)
  111. return 0;
  112. /* ARS is in progress */
  113. if (ars_status->status == NFIT_ARS_STATUS_BUSY)
  114. return -EBUSY;
  115. /* No ARS performed for the current boot */
  116. if (ars_status->status == NFIT_ARS_STATUS_NONE)
  117. return -EAGAIN;
  118. /*
  119. * ARS interrupted, either we overflowed or some other
  120. * agent wants the scan to stop. If we didn't overflow
  121. * then just continue with the returned results.
  122. */
  123. if (ars_status->status == NFIT_ARS_STATUS_INTR) {
  124. if (ars_status->flags & NFIT_ARS_F_OVERFLOW)
  125. return -ENOSPC;
  126. return 0;
  127. }
  128. /* Unknown status */
  129. if (ars_status->status >> 16)
  130. return -EIO;
  131. break;
  132. case ND_CMD_CLEAR_ERROR:
  133. clear_err = buf;
  134. if (clear_err->status & 0xffff)
  135. return -EIO;
  136. if (!clear_err->cleared)
  137. return -EIO;
  138. if (clear_err->length > clear_err->cleared)
  139. return clear_err->cleared;
  140. break;
  141. default:
  142. break;
  143. }
  144. return 0;
  145. }
  146. static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
  147. struct nvdimm *nvdimm, unsigned int cmd, void *buf,
  148. unsigned int buf_len, int *cmd_rc)
  149. {
  150. struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
  151. const struct nd_cmd_desc *desc = NULL;
  152. union acpi_object in_obj, in_buf, *out_obj;
  153. struct device *dev = acpi_desc->dev;
  154. const char *cmd_name, *dimm_name;
  155. unsigned long dsm_mask;
  156. acpi_handle handle;
  157. const u8 *uuid;
  158. u32 offset;
  159. int rc, i;
  160. if (nvdimm) {
  161. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  162. struct acpi_device *adev = nfit_mem->adev;
  163. if (!adev)
  164. return -ENOTTY;
  165. dimm_name = nvdimm_name(nvdimm);
  166. cmd_name = nvdimm_cmd_name(cmd);
  167. dsm_mask = nfit_mem->dsm_mask;
  168. desc = nd_cmd_dimm_desc(cmd);
  169. uuid = to_nfit_uuid(NFIT_DEV_DIMM);
  170. handle = adev->handle;
  171. } else {
  172. struct acpi_device *adev = to_acpi_dev(acpi_desc);
  173. cmd_name = nvdimm_bus_cmd_name(cmd);
  174. dsm_mask = nd_desc->dsm_mask;
  175. desc = nd_cmd_bus_desc(cmd);
  176. uuid = to_nfit_uuid(NFIT_DEV_BUS);
  177. handle = adev->handle;
  178. dimm_name = "bus";
  179. }
  180. if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
  181. return -ENOTTY;
  182. if (!test_bit(cmd, &dsm_mask))
  183. return -ENOTTY;
  184. in_obj.type = ACPI_TYPE_PACKAGE;
  185. in_obj.package.count = 1;
  186. in_obj.package.elements = &in_buf;
  187. in_buf.type = ACPI_TYPE_BUFFER;
  188. in_buf.buffer.pointer = buf;
  189. in_buf.buffer.length = 0;
  190. /* libnvdimm has already validated the input envelope */
  191. for (i = 0; i < desc->in_num; i++)
  192. in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
  193. i, buf);
  194. if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
  195. dev_dbg(dev, "%s:%s cmd: %s input length: %d\n", __func__,
  196. dimm_name, cmd_name, in_buf.buffer.length);
  197. print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
  198. 4, in_buf.buffer.pointer, min_t(u32, 128,
  199. in_buf.buffer.length), true);
  200. }
  201. out_obj = acpi_evaluate_dsm(handle, uuid, 1, cmd, &in_obj);
  202. if (!out_obj) {
  203. dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
  204. cmd_name);
  205. return -EINVAL;
  206. }
  207. if (out_obj->package.type != ACPI_TYPE_BUFFER) {
  208. dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n",
  209. __func__, dimm_name, cmd_name, out_obj->type);
  210. rc = -EINVAL;
  211. goto out;
  212. }
  213. if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
  214. dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__,
  215. dimm_name, cmd_name, out_obj->buffer.length);
  216. print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
  217. 4, out_obj->buffer.pointer, min_t(u32, 128,
  218. out_obj->buffer.length), true);
  219. }
  220. for (i = 0, offset = 0; i < desc->out_num; i++) {
  221. u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
  222. (u32 *) out_obj->buffer.pointer);
  223. if (offset + out_size > out_obj->buffer.length) {
  224. dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n",
  225. __func__, dimm_name, cmd_name, i);
  226. break;
  227. }
  228. if (in_buf.buffer.length + offset + out_size > buf_len) {
  229. dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n",
  230. __func__, dimm_name, cmd_name, i);
  231. rc = -ENXIO;
  232. goto out;
  233. }
  234. memcpy(buf + in_buf.buffer.length + offset,
  235. out_obj->buffer.pointer + offset, out_size);
  236. offset += out_size;
  237. }
  238. if (offset + in_buf.buffer.length < buf_len) {
  239. if (i >= 1) {
  240. /*
  241. * status valid, return the number of bytes left
  242. * unfilled in the output buffer
  243. */
  244. rc = buf_len - offset - in_buf.buffer.length;
  245. if (cmd_rc)
  246. *cmd_rc = xlat_status(buf, cmd);
  247. } else {
  248. dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
  249. __func__, dimm_name, cmd_name, buf_len,
  250. offset);
  251. rc = -ENXIO;
  252. }
  253. } else
  254. rc = 0;
  255. out:
  256. ACPI_FREE(out_obj);
  257. return rc;
  258. }
  259. static const char *spa_type_name(u16 type)
  260. {
  261. static const char *to_name[] = {
  262. [NFIT_SPA_VOLATILE] = "volatile",
  263. [NFIT_SPA_PM] = "pmem",
  264. [NFIT_SPA_DCR] = "dimm-control-region",
  265. [NFIT_SPA_BDW] = "block-data-window",
  266. [NFIT_SPA_VDISK] = "volatile-disk",
  267. [NFIT_SPA_VCD] = "volatile-cd",
  268. [NFIT_SPA_PDISK] = "persistent-disk",
  269. [NFIT_SPA_PCD] = "persistent-cd",
  270. };
  271. if (type > NFIT_SPA_PCD)
  272. return "unknown";
  273. return to_name[type];
  274. }
  275. static int nfit_spa_type(struct acpi_nfit_system_address *spa)
  276. {
  277. int i;
  278. for (i = 0; i < NFIT_UUID_MAX; i++)
  279. if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0)
  280. return i;
  281. return -1;
  282. }
  283. static bool add_spa(struct acpi_nfit_desc *acpi_desc,
  284. struct nfit_table_prev *prev,
  285. struct acpi_nfit_system_address *spa)
  286. {
  287. size_t length = min_t(size_t, sizeof(*spa), spa->header.length);
  288. struct device *dev = acpi_desc->dev;
  289. struct nfit_spa *nfit_spa;
  290. list_for_each_entry(nfit_spa, &prev->spas, list) {
  291. if (memcmp(nfit_spa->spa, spa, length) == 0) {
  292. list_move_tail(&nfit_spa->list, &acpi_desc->spas);
  293. return true;
  294. }
  295. }
  296. nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa), GFP_KERNEL);
  297. if (!nfit_spa)
  298. return false;
  299. INIT_LIST_HEAD(&nfit_spa->list);
  300. nfit_spa->spa = spa;
  301. list_add_tail(&nfit_spa->list, &acpi_desc->spas);
  302. dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__,
  303. spa->range_index,
  304. spa_type_name(nfit_spa_type(spa)));
  305. return true;
  306. }
  307. static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
  308. struct nfit_table_prev *prev,
  309. struct acpi_nfit_memory_map *memdev)
  310. {
  311. size_t length = min_t(size_t, sizeof(*memdev), memdev->header.length);
  312. struct device *dev = acpi_desc->dev;
  313. struct nfit_memdev *nfit_memdev;
  314. list_for_each_entry(nfit_memdev, &prev->memdevs, list)
  315. if (memcmp(nfit_memdev->memdev, memdev, length) == 0) {
  316. list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
  317. return true;
  318. }
  319. nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev), GFP_KERNEL);
  320. if (!nfit_memdev)
  321. return false;
  322. INIT_LIST_HEAD(&nfit_memdev->list);
  323. nfit_memdev->memdev = memdev;
  324. list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
  325. dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n",
  326. __func__, memdev->device_handle, memdev->range_index,
  327. memdev->region_index);
  328. return true;
  329. }
  330. static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
  331. struct nfit_table_prev *prev,
  332. struct acpi_nfit_control_region *dcr)
  333. {
  334. size_t length = min_t(size_t, sizeof(*dcr), dcr->header.length);
  335. struct device *dev = acpi_desc->dev;
  336. struct nfit_dcr *nfit_dcr;
  337. list_for_each_entry(nfit_dcr, &prev->dcrs, list)
  338. if (memcmp(nfit_dcr->dcr, dcr, length) == 0) {
  339. list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
  340. return true;
  341. }
  342. nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr), GFP_KERNEL);
  343. if (!nfit_dcr)
  344. return false;
  345. INIT_LIST_HEAD(&nfit_dcr->list);
  346. nfit_dcr->dcr = dcr;
  347. list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
  348. dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__,
  349. dcr->region_index, dcr->windows);
  350. return true;
  351. }
  352. static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
  353. struct nfit_table_prev *prev,
  354. struct acpi_nfit_data_region *bdw)
  355. {
  356. size_t length = min_t(size_t, sizeof(*bdw), bdw->header.length);
  357. struct device *dev = acpi_desc->dev;
  358. struct nfit_bdw *nfit_bdw;
  359. list_for_each_entry(nfit_bdw, &prev->bdws, list)
  360. if (memcmp(nfit_bdw->bdw, bdw, length) == 0) {
  361. list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
  362. return true;
  363. }
  364. nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw), GFP_KERNEL);
  365. if (!nfit_bdw)
  366. return false;
  367. INIT_LIST_HEAD(&nfit_bdw->list);
  368. nfit_bdw->bdw = bdw;
  369. list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
  370. dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__,
  371. bdw->region_index, bdw->windows);
  372. return true;
  373. }
  374. static bool add_idt(struct acpi_nfit_desc *acpi_desc,
  375. struct nfit_table_prev *prev,
  376. struct acpi_nfit_interleave *idt)
  377. {
  378. size_t length = min_t(size_t, sizeof(*idt), idt->header.length);
  379. struct device *dev = acpi_desc->dev;
  380. struct nfit_idt *nfit_idt;
  381. list_for_each_entry(nfit_idt, &prev->idts, list)
  382. if (memcmp(nfit_idt->idt, idt, length) == 0) {
  383. list_move_tail(&nfit_idt->list, &acpi_desc->idts);
  384. return true;
  385. }
  386. nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt), GFP_KERNEL);
  387. if (!nfit_idt)
  388. return false;
  389. INIT_LIST_HEAD(&nfit_idt->list);
  390. nfit_idt->idt = idt;
  391. list_add_tail(&nfit_idt->list, &acpi_desc->idts);
  392. dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__,
  393. idt->interleave_index, idt->line_count);
  394. return true;
  395. }
  396. static bool add_flush(struct acpi_nfit_desc *acpi_desc,
  397. struct nfit_table_prev *prev,
  398. struct acpi_nfit_flush_address *flush)
  399. {
  400. size_t length = min_t(size_t, sizeof(*flush), flush->header.length);
  401. struct device *dev = acpi_desc->dev;
  402. struct nfit_flush *nfit_flush;
  403. list_for_each_entry(nfit_flush, &prev->flushes, list)
  404. if (memcmp(nfit_flush->flush, flush, length) == 0) {
  405. list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
  406. return true;
  407. }
  408. nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush), GFP_KERNEL);
  409. if (!nfit_flush)
  410. return false;
  411. INIT_LIST_HEAD(&nfit_flush->list);
  412. nfit_flush->flush = flush;
  413. list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
  414. dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
  415. flush->device_handle, flush->hint_count);
  416. return true;
  417. }
  418. static void *add_table(struct acpi_nfit_desc *acpi_desc,
  419. struct nfit_table_prev *prev, void *table, const void *end)
  420. {
  421. struct device *dev = acpi_desc->dev;
  422. struct acpi_nfit_header *hdr;
  423. void *err = ERR_PTR(-ENOMEM);
  424. if (table >= end)
  425. return NULL;
  426. hdr = table;
  427. if (!hdr->length) {
  428. dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
  429. hdr->type);
  430. return NULL;
  431. }
  432. switch (hdr->type) {
  433. case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
  434. if (!add_spa(acpi_desc, prev, table))
  435. return err;
  436. break;
  437. case ACPI_NFIT_TYPE_MEMORY_MAP:
  438. if (!add_memdev(acpi_desc, prev, table))
  439. return err;
  440. break;
  441. case ACPI_NFIT_TYPE_CONTROL_REGION:
  442. if (!add_dcr(acpi_desc, prev, table))
  443. return err;
  444. break;
  445. case ACPI_NFIT_TYPE_DATA_REGION:
  446. if (!add_bdw(acpi_desc, prev, table))
  447. return err;
  448. break;
  449. case ACPI_NFIT_TYPE_INTERLEAVE:
  450. if (!add_idt(acpi_desc, prev, table))
  451. return err;
  452. break;
  453. case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
  454. if (!add_flush(acpi_desc, prev, table))
  455. return err;
  456. break;
  457. case ACPI_NFIT_TYPE_SMBIOS:
  458. dev_dbg(dev, "%s: smbios\n", __func__);
  459. break;
  460. default:
  461. dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
  462. break;
  463. }
  464. return table + hdr->length;
  465. }
  466. static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
  467. struct nfit_mem *nfit_mem)
  468. {
  469. u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
  470. u16 dcr = nfit_mem->dcr->region_index;
  471. struct nfit_spa *nfit_spa;
  472. list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
  473. u16 range_index = nfit_spa->spa->range_index;
  474. int type = nfit_spa_type(nfit_spa->spa);
  475. struct nfit_memdev *nfit_memdev;
  476. if (type != NFIT_SPA_BDW)
  477. continue;
  478. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
  479. if (nfit_memdev->memdev->range_index != range_index)
  480. continue;
  481. if (nfit_memdev->memdev->device_handle != device_handle)
  482. continue;
  483. if (nfit_memdev->memdev->region_index != dcr)
  484. continue;
  485. nfit_mem->spa_bdw = nfit_spa->spa;
  486. return;
  487. }
  488. }
  489. dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
  490. nfit_mem->spa_dcr->range_index);
  491. nfit_mem->bdw = NULL;
  492. }
  493. static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
  494. struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
  495. {
  496. u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
  497. struct nfit_memdev *nfit_memdev;
  498. struct nfit_flush *nfit_flush;
  499. struct nfit_bdw *nfit_bdw;
  500. struct nfit_idt *nfit_idt;
  501. u16 idt_idx, range_index;
  502. list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
  503. if (nfit_bdw->bdw->region_index != dcr)
  504. continue;
  505. nfit_mem->bdw = nfit_bdw->bdw;
  506. break;
  507. }
  508. if (!nfit_mem->bdw)
  509. return;
  510. nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
  511. if (!nfit_mem->spa_bdw)
  512. return;
  513. range_index = nfit_mem->spa_bdw->range_index;
  514. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
  515. if (nfit_memdev->memdev->range_index != range_index ||
  516. nfit_memdev->memdev->region_index != dcr)
  517. continue;
  518. nfit_mem->memdev_bdw = nfit_memdev->memdev;
  519. idt_idx = nfit_memdev->memdev->interleave_index;
  520. list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
  521. if (nfit_idt->idt->interleave_index != idt_idx)
  522. continue;
  523. nfit_mem->idt_bdw = nfit_idt->idt;
  524. break;
  525. }
  526. list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
  527. if (nfit_flush->flush->device_handle !=
  528. nfit_memdev->memdev->device_handle)
  529. continue;
  530. nfit_mem->nfit_flush = nfit_flush;
  531. break;
  532. }
  533. break;
  534. }
  535. }
  536. static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
  537. struct acpi_nfit_system_address *spa)
  538. {
  539. struct nfit_mem *nfit_mem, *found;
  540. struct nfit_memdev *nfit_memdev;
  541. int type = nfit_spa_type(spa);
  542. switch (type) {
  543. case NFIT_SPA_DCR:
  544. case NFIT_SPA_PM:
  545. break;
  546. default:
  547. return 0;
  548. }
  549. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
  550. struct nfit_dcr *nfit_dcr;
  551. u32 device_handle;
  552. u16 dcr;
  553. if (nfit_memdev->memdev->range_index != spa->range_index)
  554. continue;
  555. found = NULL;
  556. dcr = nfit_memdev->memdev->region_index;
  557. device_handle = nfit_memdev->memdev->device_handle;
  558. list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
  559. if (__to_nfit_memdev(nfit_mem)->device_handle
  560. == device_handle) {
  561. found = nfit_mem;
  562. break;
  563. }
  564. if (found)
  565. nfit_mem = found;
  566. else {
  567. nfit_mem = devm_kzalloc(acpi_desc->dev,
  568. sizeof(*nfit_mem), GFP_KERNEL);
  569. if (!nfit_mem)
  570. return -ENOMEM;
  571. INIT_LIST_HEAD(&nfit_mem->list);
  572. list_add(&nfit_mem->list, &acpi_desc->dimms);
  573. }
  574. list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
  575. if (nfit_dcr->dcr->region_index != dcr)
  576. continue;
  577. /*
  578. * Record the control region for the dimm. For
  579. * the ACPI 6.1 case, where there are separate
  580. * control regions for the pmem vs blk
  581. * interfaces, be sure to record the extended
  582. * blk details.
  583. */
  584. if (!nfit_mem->dcr)
  585. nfit_mem->dcr = nfit_dcr->dcr;
  586. else if (nfit_mem->dcr->windows == 0
  587. && nfit_dcr->dcr->windows)
  588. nfit_mem->dcr = nfit_dcr->dcr;
  589. break;
  590. }
  591. if (dcr && !nfit_mem->dcr) {
  592. dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
  593. spa->range_index, dcr);
  594. return -ENODEV;
  595. }
  596. if (type == NFIT_SPA_DCR) {
  597. struct nfit_idt *nfit_idt;
  598. u16 idt_idx;
  599. /* multiple dimms may share a SPA when interleaved */
  600. nfit_mem->spa_dcr = spa;
  601. nfit_mem->memdev_dcr = nfit_memdev->memdev;
  602. idt_idx = nfit_memdev->memdev->interleave_index;
  603. list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
  604. if (nfit_idt->idt->interleave_index != idt_idx)
  605. continue;
  606. nfit_mem->idt_dcr = nfit_idt->idt;
  607. break;
  608. }
  609. nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
  610. } else {
  611. /*
  612. * A single dimm may belong to multiple SPA-PM
  613. * ranges, record at least one in addition to
  614. * any SPA-DCR range.
  615. */
  616. nfit_mem->memdev_pmem = nfit_memdev->memdev;
  617. }
  618. }
  619. return 0;
  620. }
  621. static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
  622. {
  623. struct nfit_mem *a = container_of(_a, typeof(*a), list);
  624. struct nfit_mem *b = container_of(_b, typeof(*b), list);
  625. u32 handleA, handleB;
  626. handleA = __to_nfit_memdev(a)->device_handle;
  627. handleB = __to_nfit_memdev(b)->device_handle;
  628. if (handleA < handleB)
  629. return -1;
  630. else if (handleA > handleB)
  631. return 1;
  632. return 0;
  633. }
  634. static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
  635. {
  636. struct nfit_spa *nfit_spa;
  637. /*
  638. * For each SPA-DCR or SPA-PMEM address range find its
  639. * corresponding MEMDEV(s). From each MEMDEV find the
  640. * corresponding DCR. Then, if we're operating on a SPA-DCR,
  641. * try to find a SPA-BDW and a corresponding BDW that references
  642. * the DCR. Throw it all into an nfit_mem object. Note, that
  643. * BDWs are optional.
  644. */
  645. list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
  646. int rc;
  647. rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa);
  648. if (rc)
  649. return rc;
  650. }
  651. list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
  652. return 0;
  653. }
  654. static ssize_t revision_show(struct device *dev,
  655. struct device_attribute *attr, char *buf)
  656. {
  657. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  658. struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
  659. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  660. return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
  661. }
  662. static DEVICE_ATTR_RO(revision);
  663. static struct attribute *acpi_nfit_attributes[] = {
  664. &dev_attr_revision.attr,
  665. NULL,
  666. };
  667. static struct attribute_group acpi_nfit_attribute_group = {
  668. .name = "nfit",
  669. .attrs = acpi_nfit_attributes,
  670. };
  671. static const struct attribute_group *acpi_nfit_attribute_groups[] = {
  672. &nvdimm_bus_attribute_group,
  673. &acpi_nfit_attribute_group,
  674. NULL,
  675. };
  676. static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
  677. {
  678. struct nvdimm *nvdimm = to_nvdimm(dev);
  679. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  680. return __to_nfit_memdev(nfit_mem);
  681. }
  682. static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
  683. {
  684. struct nvdimm *nvdimm = to_nvdimm(dev);
  685. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  686. return nfit_mem->dcr;
  687. }
  688. static ssize_t handle_show(struct device *dev,
  689. struct device_attribute *attr, char *buf)
  690. {
  691. struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
  692. return sprintf(buf, "%#x\n", memdev->device_handle);
  693. }
  694. static DEVICE_ATTR_RO(handle);
  695. static ssize_t phys_id_show(struct device *dev,
  696. struct device_attribute *attr, char *buf)
  697. {
  698. struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
  699. return sprintf(buf, "%#x\n", memdev->physical_id);
  700. }
  701. static DEVICE_ATTR_RO(phys_id);
  702. static ssize_t vendor_show(struct device *dev,
  703. struct device_attribute *attr, char *buf)
  704. {
  705. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  706. return sprintf(buf, "%#x\n", dcr->vendor_id);
  707. }
  708. static DEVICE_ATTR_RO(vendor);
  709. static ssize_t rev_id_show(struct device *dev,
  710. struct device_attribute *attr, char *buf)
  711. {
  712. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  713. return sprintf(buf, "%#x\n", dcr->revision_id);
  714. }
  715. static DEVICE_ATTR_RO(rev_id);
  716. static ssize_t device_show(struct device *dev,
  717. struct device_attribute *attr, char *buf)
  718. {
  719. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  720. return sprintf(buf, "%#x\n", dcr->device_id);
  721. }
  722. static DEVICE_ATTR_RO(device);
  723. static ssize_t format_show(struct device *dev,
  724. struct device_attribute *attr, char *buf)
  725. {
  726. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  727. return sprintf(buf, "%#x\n", dcr->code);
  728. }
  729. static DEVICE_ATTR_RO(format);
  730. static ssize_t serial_show(struct device *dev,
  731. struct device_attribute *attr, char *buf)
  732. {
  733. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  734. return sprintf(buf, "%#x\n", dcr->serial_number);
  735. }
  736. static DEVICE_ATTR_RO(serial);
  737. static ssize_t flags_show(struct device *dev,
  738. struct device_attribute *attr, char *buf)
  739. {
  740. u16 flags = to_nfit_memdev(dev)->flags;
  741. return sprintf(buf, "%s%s%s%s%s\n",
  742. flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
  743. flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
  744. flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
  745. flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
  746. flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "");
  747. }
  748. static DEVICE_ATTR_RO(flags);
  749. static struct attribute *acpi_nfit_dimm_attributes[] = {
  750. &dev_attr_handle.attr,
  751. &dev_attr_phys_id.attr,
  752. &dev_attr_vendor.attr,
  753. &dev_attr_device.attr,
  754. &dev_attr_format.attr,
  755. &dev_attr_serial.attr,
  756. &dev_attr_rev_id.attr,
  757. &dev_attr_flags.attr,
  758. NULL,
  759. };
  760. static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
  761. struct attribute *a, int n)
  762. {
  763. struct device *dev = container_of(kobj, struct device, kobj);
  764. if (to_nfit_dcr(dev))
  765. return a->mode;
  766. else
  767. return 0;
  768. }
  769. static struct attribute_group acpi_nfit_dimm_attribute_group = {
  770. .name = "nfit",
  771. .attrs = acpi_nfit_dimm_attributes,
  772. .is_visible = acpi_nfit_dimm_attr_visible,
  773. };
  774. static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
  775. &nvdimm_attribute_group,
  776. &nd_device_attribute_group,
  777. &acpi_nfit_dimm_attribute_group,
  778. NULL,
  779. };
  780. static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
  781. u32 device_handle)
  782. {
  783. struct nfit_mem *nfit_mem;
  784. list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
  785. if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
  786. return nfit_mem->nvdimm;
  787. return NULL;
  788. }
  789. static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
  790. struct nfit_mem *nfit_mem, u32 device_handle)
  791. {
  792. struct acpi_device *adev, *adev_dimm;
  793. struct device *dev = acpi_desc->dev;
  794. const u8 *uuid = to_nfit_uuid(NFIT_DEV_DIMM);
  795. int i;
  796. nfit_mem->dsm_mask = acpi_desc->dimm_dsm_force_en;
  797. adev = to_acpi_dev(acpi_desc);
  798. if (!adev)
  799. return 0;
  800. adev_dimm = acpi_find_child_device(adev, device_handle, false);
  801. nfit_mem->adev = adev_dimm;
  802. if (!adev_dimm) {
  803. dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
  804. device_handle);
  805. return force_enable_dimms ? 0 : -ENODEV;
  806. }
  807. for (i = ND_CMD_SMART; i <= ND_CMD_VENDOR; i++)
  808. if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
  809. set_bit(i, &nfit_mem->dsm_mask);
  810. return 0;
  811. }
  812. static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
  813. {
  814. struct nfit_mem *nfit_mem;
  815. int dimm_count = 0;
  816. list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
  817. struct nvdimm *nvdimm;
  818. unsigned long flags = 0;
  819. u32 device_handle;
  820. u16 mem_flags;
  821. int rc;
  822. device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
  823. nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
  824. if (nvdimm) {
  825. dimm_count++;
  826. continue;
  827. }
  828. if (nfit_mem->bdw && nfit_mem->memdev_pmem)
  829. flags |= NDD_ALIASING;
  830. mem_flags = __to_nfit_memdev(nfit_mem)->flags;
  831. if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
  832. flags |= NDD_UNARMED;
  833. rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
  834. if (rc)
  835. continue;
  836. nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
  837. acpi_nfit_dimm_attribute_groups,
  838. flags, &nfit_mem->dsm_mask);
  839. if (!nvdimm)
  840. return -ENOMEM;
  841. nfit_mem->nvdimm = nvdimm;
  842. dimm_count++;
  843. if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
  844. continue;
  845. dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n",
  846. nvdimm_name(nvdimm),
  847. mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
  848. mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
  849. mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
  850. mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "");
  851. }
  852. return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
  853. }
  854. static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
  855. {
  856. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  857. const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS);
  858. struct acpi_device *adev;
  859. int i;
  860. nd_desc->dsm_mask = acpi_desc->bus_dsm_force_en;
  861. adev = to_acpi_dev(acpi_desc);
  862. if (!adev)
  863. return;
  864. for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
  865. if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
  866. set_bit(i, &nd_desc->dsm_mask);
  867. }
  868. static ssize_t range_index_show(struct device *dev,
  869. struct device_attribute *attr, char *buf)
  870. {
  871. struct nd_region *nd_region = to_nd_region(dev);
  872. struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
  873. return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
  874. }
  875. static DEVICE_ATTR_RO(range_index);
  876. static struct attribute *acpi_nfit_region_attributes[] = {
  877. &dev_attr_range_index.attr,
  878. NULL,
  879. };
  880. static struct attribute_group acpi_nfit_region_attribute_group = {
  881. .name = "nfit",
  882. .attrs = acpi_nfit_region_attributes,
  883. };
  884. static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
  885. &nd_region_attribute_group,
  886. &nd_mapping_attribute_group,
  887. &nd_device_attribute_group,
  888. &nd_numa_attribute_group,
  889. &acpi_nfit_region_attribute_group,
  890. NULL,
  891. };
  892. /* enough info to uniquely specify an interleave set */
  893. struct nfit_set_info {
  894. struct nfit_set_info_map {
  895. u64 region_offset;
  896. u32 serial_number;
  897. u32 pad;
  898. } mapping[0];
  899. };
  900. static size_t sizeof_nfit_set_info(int num_mappings)
  901. {
  902. return sizeof(struct nfit_set_info)
  903. + num_mappings * sizeof(struct nfit_set_info_map);
  904. }
  905. static int cmp_map(const void *m0, const void *m1)
  906. {
  907. const struct nfit_set_info_map *map0 = m0;
  908. const struct nfit_set_info_map *map1 = m1;
  909. return memcmp(&map0->region_offset, &map1->region_offset,
  910. sizeof(u64));
  911. }
  912. /* Retrieve the nth entry referencing this spa */
  913. static struct acpi_nfit_memory_map *memdev_from_spa(
  914. struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
  915. {
  916. struct nfit_memdev *nfit_memdev;
  917. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
  918. if (nfit_memdev->memdev->range_index == range_index)
  919. if (n-- == 0)
  920. return nfit_memdev->memdev;
  921. return NULL;
  922. }
  923. static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
  924. struct nd_region_desc *ndr_desc,
  925. struct acpi_nfit_system_address *spa)
  926. {
  927. int i, spa_type = nfit_spa_type(spa);
  928. struct device *dev = acpi_desc->dev;
  929. struct nd_interleave_set *nd_set;
  930. u16 nr = ndr_desc->num_mappings;
  931. struct nfit_set_info *info;
  932. if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE)
  933. /* pass */;
  934. else
  935. return 0;
  936. nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
  937. if (!nd_set)
  938. return -ENOMEM;
  939. info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
  940. if (!info)
  941. return -ENOMEM;
  942. for (i = 0; i < nr; i++) {
  943. struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
  944. struct nfit_set_info_map *map = &info->mapping[i];
  945. struct nvdimm *nvdimm = nd_mapping->nvdimm;
  946. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  947. struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
  948. spa->range_index, i);
  949. if (!memdev || !nfit_mem->dcr) {
  950. dev_err(dev, "%s: failed to find DCR\n", __func__);
  951. return -ENODEV;
  952. }
  953. map->region_offset = memdev->region_offset;
  954. map->serial_number = nfit_mem->dcr->serial_number;
  955. }
  956. sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
  957. cmp_map, NULL);
  958. nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
  959. ndr_desc->nd_set = nd_set;
  960. devm_kfree(dev, info);
  961. return 0;
  962. }
  963. static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
  964. {
  965. struct acpi_nfit_interleave *idt = mmio->idt;
  966. u32 sub_line_offset, line_index, line_offset;
  967. u64 line_no, table_skip_count, table_offset;
  968. line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
  969. table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
  970. line_offset = idt->line_offset[line_index]
  971. * mmio->line_size;
  972. table_offset = table_skip_count * mmio->table_size;
  973. return mmio->base_offset + line_offset + table_offset + sub_line_offset;
  974. }
  975. static void wmb_blk(struct nfit_blk *nfit_blk)
  976. {
  977. if (nfit_blk->nvdimm_flush) {
  978. /*
  979. * The first wmb() is needed to 'sfence' all previous writes
  980. * such that they are architecturally visible for the platform
  981. * buffer flush. Note that we've already arranged for pmem
  982. * writes to avoid the cache via arch_memcpy_to_pmem(). The
  983. * final wmb() ensures ordering for the NVDIMM flush write.
  984. */
  985. wmb();
  986. writeq(1, nfit_blk->nvdimm_flush);
  987. wmb();
  988. } else
  989. wmb_pmem();
  990. }
  991. static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
  992. {
  993. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
  994. u64 offset = nfit_blk->stat_offset + mmio->size * bw;
  995. if (mmio->num_lines)
  996. offset = to_interleave_offset(offset, mmio);
  997. return readl(mmio->addr.base + offset);
  998. }
  999. static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
  1000. resource_size_t dpa, unsigned int len, unsigned int write)
  1001. {
  1002. u64 cmd, offset;
  1003. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
  1004. enum {
  1005. BCW_OFFSET_MASK = (1ULL << 48)-1,
  1006. BCW_LEN_SHIFT = 48,
  1007. BCW_LEN_MASK = (1ULL << 8) - 1,
  1008. BCW_CMD_SHIFT = 56,
  1009. };
  1010. cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
  1011. len = len >> L1_CACHE_SHIFT;
  1012. cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
  1013. cmd |= ((u64) write) << BCW_CMD_SHIFT;
  1014. offset = nfit_blk->cmd_offset + mmio->size * bw;
  1015. if (mmio->num_lines)
  1016. offset = to_interleave_offset(offset, mmio);
  1017. writeq(cmd, mmio->addr.base + offset);
  1018. wmb_blk(nfit_blk);
  1019. if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
  1020. readq(mmio->addr.base + offset);
  1021. }
  1022. static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
  1023. resource_size_t dpa, void *iobuf, size_t len, int rw,
  1024. unsigned int lane)
  1025. {
  1026. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
  1027. unsigned int copied = 0;
  1028. u64 base_offset;
  1029. int rc;
  1030. base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
  1031. + lane * mmio->size;
  1032. write_blk_ctl(nfit_blk, lane, dpa, len, rw);
  1033. while (len) {
  1034. unsigned int c;
  1035. u64 offset;
  1036. if (mmio->num_lines) {
  1037. u32 line_offset;
  1038. offset = to_interleave_offset(base_offset + copied,
  1039. mmio);
  1040. div_u64_rem(offset, mmio->line_size, &line_offset);
  1041. c = min_t(size_t, len, mmio->line_size - line_offset);
  1042. } else {
  1043. offset = base_offset + nfit_blk->bdw_offset;
  1044. c = len;
  1045. }
  1046. if (rw)
  1047. memcpy_to_pmem(mmio->addr.aperture + offset,
  1048. iobuf + copied, c);
  1049. else {
  1050. if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
  1051. mmio_flush_range((void __force *)
  1052. mmio->addr.aperture + offset, c);
  1053. memcpy_from_pmem(iobuf + copied,
  1054. mmio->addr.aperture + offset, c);
  1055. }
  1056. copied += c;
  1057. len -= c;
  1058. }
  1059. if (rw)
  1060. wmb_blk(nfit_blk);
  1061. rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
  1062. return rc;
  1063. }
  1064. static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
  1065. resource_size_t dpa, void *iobuf, u64 len, int rw)
  1066. {
  1067. struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
  1068. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
  1069. struct nd_region *nd_region = nfit_blk->nd_region;
  1070. unsigned int lane, copied = 0;
  1071. int rc = 0;
  1072. lane = nd_region_acquire_lane(nd_region);
  1073. while (len) {
  1074. u64 c = min(len, mmio->size);
  1075. rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
  1076. iobuf + copied, c, rw, lane);
  1077. if (rc)
  1078. break;
  1079. copied += c;
  1080. len -= c;
  1081. }
  1082. nd_region_release_lane(nd_region, lane);
  1083. return rc;
  1084. }
  1085. static void nfit_spa_mapping_release(struct kref *kref)
  1086. {
  1087. struct nfit_spa_mapping *spa_map = to_spa_map(kref);
  1088. struct acpi_nfit_system_address *spa = spa_map->spa;
  1089. struct acpi_nfit_desc *acpi_desc = spa_map->acpi_desc;
  1090. WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
  1091. dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index);
  1092. if (spa_map->type == SPA_MAP_APERTURE)
  1093. memunmap((void __force *)spa_map->addr.aperture);
  1094. else
  1095. iounmap(spa_map->addr.base);
  1096. release_mem_region(spa->address, spa->length);
  1097. list_del(&spa_map->list);
  1098. kfree(spa_map);
  1099. }
  1100. static struct nfit_spa_mapping *find_spa_mapping(
  1101. struct acpi_nfit_desc *acpi_desc,
  1102. struct acpi_nfit_system_address *spa)
  1103. {
  1104. struct nfit_spa_mapping *spa_map;
  1105. WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
  1106. list_for_each_entry(spa_map, &acpi_desc->spa_maps, list)
  1107. if (spa_map->spa == spa)
  1108. return spa_map;
  1109. return NULL;
  1110. }
  1111. static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc,
  1112. struct acpi_nfit_system_address *spa)
  1113. {
  1114. struct nfit_spa_mapping *spa_map;
  1115. mutex_lock(&acpi_desc->spa_map_mutex);
  1116. spa_map = find_spa_mapping(acpi_desc, spa);
  1117. if (spa_map)
  1118. kref_put(&spa_map->kref, nfit_spa_mapping_release);
  1119. mutex_unlock(&acpi_desc->spa_map_mutex);
  1120. }
  1121. static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
  1122. struct acpi_nfit_system_address *spa, enum spa_map_type type)
  1123. {
  1124. resource_size_t start = spa->address;
  1125. resource_size_t n = spa->length;
  1126. struct nfit_spa_mapping *spa_map;
  1127. struct resource *res;
  1128. WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
  1129. spa_map = find_spa_mapping(acpi_desc, spa);
  1130. if (spa_map) {
  1131. kref_get(&spa_map->kref);
  1132. return spa_map->addr.base;
  1133. }
  1134. spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL);
  1135. if (!spa_map)
  1136. return NULL;
  1137. INIT_LIST_HEAD(&spa_map->list);
  1138. spa_map->spa = spa;
  1139. kref_init(&spa_map->kref);
  1140. spa_map->acpi_desc = acpi_desc;
  1141. res = request_mem_region(start, n, dev_name(acpi_desc->dev));
  1142. if (!res)
  1143. goto err_mem;
  1144. spa_map->type = type;
  1145. if (type == SPA_MAP_APERTURE)
  1146. spa_map->addr.aperture = (void __pmem *)memremap(start, n,
  1147. ARCH_MEMREMAP_PMEM);
  1148. else
  1149. spa_map->addr.base = ioremap_nocache(start, n);
  1150. if (!spa_map->addr.base)
  1151. goto err_map;
  1152. list_add_tail(&spa_map->list, &acpi_desc->spa_maps);
  1153. return spa_map->addr.base;
  1154. err_map:
  1155. release_mem_region(start, n);
  1156. err_mem:
  1157. kfree(spa_map);
  1158. return NULL;
  1159. }
  1160. /**
  1161. * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges
  1162. * @nvdimm_bus: NFIT-bus that provided the spa table entry
  1163. * @nfit_spa: spa table to map
  1164. * @type: aperture or control region
  1165. *
  1166. * In the case where block-data-window apertures and
  1167. * dimm-control-regions are interleaved they will end up sharing a
  1168. * single request_mem_region() + ioremap() for the address range. In
  1169. * the style of devm nfit_spa_map() mappings are automatically dropped
  1170. * when all region devices referencing the same mapping are disabled /
  1171. * unbound.
  1172. */
  1173. static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
  1174. struct acpi_nfit_system_address *spa, enum spa_map_type type)
  1175. {
  1176. void __iomem *iomem;
  1177. mutex_lock(&acpi_desc->spa_map_mutex);
  1178. iomem = __nfit_spa_map(acpi_desc, spa, type);
  1179. mutex_unlock(&acpi_desc->spa_map_mutex);
  1180. return iomem;
  1181. }
  1182. static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
  1183. struct acpi_nfit_interleave *idt, u16 interleave_ways)
  1184. {
  1185. if (idt) {
  1186. mmio->num_lines = idt->line_count;
  1187. mmio->line_size = idt->line_size;
  1188. if (interleave_ways == 0)
  1189. return -ENXIO;
  1190. mmio->table_size = mmio->num_lines * interleave_ways
  1191. * mmio->line_size;
  1192. }
  1193. return 0;
  1194. }
  1195. static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
  1196. struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
  1197. {
  1198. struct nd_cmd_dimm_flags flags;
  1199. int rc;
  1200. memset(&flags, 0, sizeof(flags));
  1201. rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
  1202. sizeof(flags), NULL);
  1203. if (rc >= 0 && flags.status == 0)
  1204. nfit_blk->dimm_flags = flags.flags;
  1205. else if (rc == -ENOTTY) {
  1206. /* fall back to a conservative default */
  1207. nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
  1208. rc = 0;
  1209. } else
  1210. rc = -ENXIO;
  1211. return rc;
  1212. }
  1213. static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
  1214. struct device *dev)
  1215. {
  1216. struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
  1217. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  1218. struct nd_blk_region *ndbr = to_nd_blk_region(dev);
  1219. struct nfit_flush *nfit_flush;
  1220. struct nfit_blk_mmio *mmio;
  1221. struct nfit_blk *nfit_blk;
  1222. struct nfit_mem *nfit_mem;
  1223. struct nvdimm *nvdimm;
  1224. int rc;
  1225. nvdimm = nd_blk_region_to_dimm(ndbr);
  1226. nfit_mem = nvdimm_provider_data(nvdimm);
  1227. if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
  1228. dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
  1229. nfit_mem ? "" : " nfit_mem",
  1230. (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
  1231. (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
  1232. return -ENXIO;
  1233. }
  1234. nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
  1235. if (!nfit_blk)
  1236. return -ENOMEM;
  1237. nd_blk_region_set_provider_data(ndbr, nfit_blk);
  1238. nfit_blk->nd_region = to_nd_region(dev);
  1239. /* map block aperture memory */
  1240. nfit_blk->bdw_offset = nfit_mem->bdw->offset;
  1241. mmio = &nfit_blk->mmio[BDW];
  1242. mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw,
  1243. SPA_MAP_APERTURE);
  1244. if (!mmio->addr.base) {
  1245. dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
  1246. nvdimm_name(nvdimm));
  1247. return -ENOMEM;
  1248. }
  1249. mmio->size = nfit_mem->bdw->size;
  1250. mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
  1251. mmio->idt = nfit_mem->idt_bdw;
  1252. mmio->spa = nfit_mem->spa_bdw;
  1253. rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
  1254. nfit_mem->memdev_bdw->interleave_ways);
  1255. if (rc) {
  1256. dev_dbg(dev, "%s: %s failed to init bdw interleave\n",
  1257. __func__, nvdimm_name(nvdimm));
  1258. return rc;
  1259. }
  1260. /* map block control memory */
  1261. nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
  1262. nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
  1263. mmio = &nfit_blk->mmio[DCR];
  1264. mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr,
  1265. SPA_MAP_CONTROL);
  1266. if (!mmio->addr.base) {
  1267. dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
  1268. nvdimm_name(nvdimm));
  1269. return -ENOMEM;
  1270. }
  1271. mmio->size = nfit_mem->dcr->window_size;
  1272. mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
  1273. mmio->idt = nfit_mem->idt_dcr;
  1274. mmio->spa = nfit_mem->spa_dcr;
  1275. rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
  1276. nfit_mem->memdev_dcr->interleave_ways);
  1277. if (rc) {
  1278. dev_dbg(dev, "%s: %s failed to init dcr interleave\n",
  1279. __func__, nvdimm_name(nvdimm));
  1280. return rc;
  1281. }
  1282. rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
  1283. if (rc < 0) {
  1284. dev_dbg(dev, "%s: %s failed get DIMM flags\n",
  1285. __func__, nvdimm_name(nvdimm));
  1286. return rc;
  1287. }
  1288. nfit_flush = nfit_mem->nfit_flush;
  1289. if (nfit_flush && nfit_flush->flush->hint_count != 0) {
  1290. nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev,
  1291. nfit_flush->flush->hint_address[0], 8);
  1292. if (!nfit_blk->nvdimm_flush)
  1293. return -ENOMEM;
  1294. }
  1295. if (!arch_has_wmb_pmem() && !nfit_blk->nvdimm_flush)
  1296. dev_warn(dev, "unable to guarantee persistence of writes\n");
  1297. if (mmio->line_size == 0)
  1298. return 0;
  1299. if ((u32) nfit_blk->cmd_offset % mmio->line_size
  1300. + 8 > mmio->line_size) {
  1301. dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
  1302. return -ENXIO;
  1303. } else if ((u32) nfit_blk->stat_offset % mmio->line_size
  1304. + 8 > mmio->line_size) {
  1305. dev_dbg(dev, "stat_offset crosses interleave boundary\n");
  1306. return -ENXIO;
  1307. }
  1308. return 0;
  1309. }
  1310. static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus,
  1311. struct device *dev)
  1312. {
  1313. struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
  1314. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  1315. struct nd_blk_region *ndbr = to_nd_blk_region(dev);
  1316. struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
  1317. int i;
  1318. if (!nfit_blk)
  1319. return; /* never enabled */
  1320. /* auto-free BLK spa mappings */
  1321. for (i = 0; i < 2; i++) {
  1322. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i];
  1323. if (mmio->addr.base)
  1324. nfit_spa_unmap(acpi_desc, mmio->spa);
  1325. }
  1326. nd_blk_region_set_provider_data(ndbr, NULL);
  1327. /* devm will free nfit_blk */
  1328. }
  1329. static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
  1330. struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
  1331. {
  1332. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  1333. struct acpi_nfit_system_address *spa = nfit_spa->spa;
  1334. int cmd_rc, rc;
  1335. cmd->address = spa->address;
  1336. cmd->length = spa->length;
  1337. rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
  1338. sizeof(*cmd), &cmd_rc);
  1339. if (rc < 0)
  1340. return rc;
  1341. return cmd_rc;
  1342. }
  1343. static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa)
  1344. {
  1345. int rc;
  1346. int cmd_rc;
  1347. struct nd_cmd_ars_start ars_start;
  1348. struct acpi_nfit_system_address *spa = nfit_spa->spa;
  1349. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  1350. memset(&ars_start, 0, sizeof(ars_start));
  1351. ars_start.address = spa->address;
  1352. ars_start.length = spa->length;
  1353. if (nfit_spa_type(spa) == NFIT_SPA_PM)
  1354. ars_start.type = ND_ARS_PERSISTENT;
  1355. else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
  1356. ars_start.type = ND_ARS_VOLATILE;
  1357. else
  1358. return -ENOTTY;
  1359. rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
  1360. sizeof(ars_start), &cmd_rc);
  1361. if (rc < 0)
  1362. return rc;
  1363. return cmd_rc;
  1364. }
  1365. static int ars_continue(struct acpi_nfit_desc *acpi_desc)
  1366. {
  1367. int rc, cmd_rc;
  1368. struct nd_cmd_ars_start ars_start;
  1369. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  1370. struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
  1371. memset(&ars_start, 0, sizeof(ars_start));
  1372. ars_start.address = ars_status->restart_address;
  1373. ars_start.length = ars_status->restart_length;
  1374. ars_start.type = ars_status->type;
  1375. rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
  1376. sizeof(ars_start), &cmd_rc);
  1377. if (rc < 0)
  1378. return rc;
  1379. return cmd_rc;
  1380. }
  1381. static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
  1382. {
  1383. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  1384. struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
  1385. int rc, cmd_rc;
  1386. rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
  1387. acpi_desc->ars_status_size, &cmd_rc);
  1388. if (rc < 0)
  1389. return rc;
  1390. return cmd_rc;
  1391. }
  1392. static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus,
  1393. struct nd_cmd_ars_status *ars_status)
  1394. {
  1395. int rc;
  1396. u32 i;
  1397. for (i = 0; i < ars_status->num_records; i++) {
  1398. rc = nvdimm_bus_add_poison(nvdimm_bus,
  1399. ars_status->records[i].err_address,
  1400. ars_status->records[i].length);
  1401. if (rc)
  1402. return rc;
  1403. }
  1404. return 0;
  1405. }
  1406. static void acpi_nfit_remove_resource(void *data)
  1407. {
  1408. struct resource *res = data;
  1409. remove_resource(res);
  1410. }
  1411. static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
  1412. struct nd_region_desc *ndr_desc)
  1413. {
  1414. struct resource *res, *nd_res = ndr_desc->res;
  1415. int is_pmem, ret;
  1416. /* No operation if the region is already registered as PMEM */
  1417. is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
  1418. IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
  1419. if (is_pmem == REGION_INTERSECTS)
  1420. return 0;
  1421. res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
  1422. if (!res)
  1423. return -ENOMEM;
  1424. res->name = "Persistent Memory";
  1425. res->start = nd_res->start;
  1426. res->end = nd_res->end;
  1427. res->flags = IORESOURCE_MEM;
  1428. res->desc = IORES_DESC_PERSISTENT_MEMORY;
  1429. ret = insert_resource(&iomem_resource, res);
  1430. if (ret)
  1431. return ret;
  1432. ret = devm_add_action(acpi_desc->dev, acpi_nfit_remove_resource, res);
  1433. if (ret) {
  1434. remove_resource(res);
  1435. return ret;
  1436. }
  1437. return 0;
  1438. }
  1439. static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
  1440. struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc,
  1441. struct acpi_nfit_memory_map *memdev,
  1442. struct nfit_spa *nfit_spa)
  1443. {
  1444. struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
  1445. memdev->device_handle);
  1446. struct acpi_nfit_system_address *spa = nfit_spa->spa;
  1447. struct nd_blk_region_desc *ndbr_desc;
  1448. struct nfit_mem *nfit_mem;
  1449. int blk_valid = 0;
  1450. if (!nvdimm) {
  1451. dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
  1452. spa->range_index, memdev->device_handle);
  1453. return -ENODEV;
  1454. }
  1455. nd_mapping->nvdimm = nvdimm;
  1456. switch (nfit_spa_type(spa)) {
  1457. case NFIT_SPA_PM:
  1458. case NFIT_SPA_VOLATILE:
  1459. nd_mapping->start = memdev->address;
  1460. nd_mapping->size = memdev->region_size;
  1461. break;
  1462. case NFIT_SPA_DCR:
  1463. nfit_mem = nvdimm_provider_data(nvdimm);
  1464. if (!nfit_mem || !nfit_mem->bdw) {
  1465. dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
  1466. spa->range_index, nvdimm_name(nvdimm));
  1467. } else {
  1468. nd_mapping->size = nfit_mem->bdw->capacity;
  1469. nd_mapping->start = nfit_mem->bdw->start_address;
  1470. ndr_desc->num_lanes = nfit_mem->bdw->windows;
  1471. blk_valid = 1;
  1472. }
  1473. ndr_desc->nd_mapping = nd_mapping;
  1474. ndr_desc->num_mappings = blk_valid;
  1475. ndbr_desc = to_blk_region_desc(ndr_desc);
  1476. ndbr_desc->enable = acpi_nfit_blk_region_enable;
  1477. ndbr_desc->disable = acpi_nfit_blk_region_disable;
  1478. ndbr_desc->do_io = acpi_desc->blk_do_io;
  1479. nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
  1480. ndr_desc);
  1481. if (!nfit_spa->nd_region)
  1482. return -ENOMEM;
  1483. break;
  1484. }
  1485. return 0;
  1486. }
  1487. static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
  1488. struct nfit_spa *nfit_spa)
  1489. {
  1490. static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS];
  1491. struct acpi_nfit_system_address *spa = nfit_spa->spa;
  1492. struct nd_blk_region_desc ndbr_desc;
  1493. struct nd_region_desc *ndr_desc;
  1494. struct nfit_memdev *nfit_memdev;
  1495. struct nvdimm_bus *nvdimm_bus;
  1496. struct resource res;
  1497. int count = 0, rc;
  1498. if (nfit_spa->nd_region)
  1499. return 0;
  1500. if (spa->range_index == 0) {
  1501. dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n",
  1502. __func__);
  1503. return 0;
  1504. }
  1505. memset(&res, 0, sizeof(res));
  1506. memset(&nd_mappings, 0, sizeof(nd_mappings));
  1507. memset(&ndbr_desc, 0, sizeof(ndbr_desc));
  1508. res.start = spa->address;
  1509. res.end = res.start + spa->length - 1;
  1510. ndr_desc = &ndbr_desc.ndr_desc;
  1511. ndr_desc->res = &res;
  1512. ndr_desc->provider_data = nfit_spa;
  1513. ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
  1514. if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
  1515. ndr_desc->numa_node = acpi_map_pxm_to_online_node(
  1516. spa->proximity_domain);
  1517. else
  1518. ndr_desc->numa_node = NUMA_NO_NODE;
  1519. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
  1520. struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
  1521. struct nd_mapping *nd_mapping;
  1522. if (memdev->range_index != spa->range_index)
  1523. continue;
  1524. if (count >= ND_MAX_MAPPINGS) {
  1525. dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
  1526. spa->range_index, ND_MAX_MAPPINGS);
  1527. return -ENXIO;
  1528. }
  1529. nd_mapping = &nd_mappings[count++];
  1530. rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc,
  1531. memdev, nfit_spa);
  1532. if (rc)
  1533. goto out;
  1534. }
  1535. ndr_desc->nd_mapping = nd_mappings;
  1536. ndr_desc->num_mappings = count;
  1537. rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
  1538. if (rc)
  1539. goto out;
  1540. nvdimm_bus = acpi_desc->nvdimm_bus;
  1541. if (nfit_spa_type(spa) == NFIT_SPA_PM) {
  1542. rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
  1543. if (rc) {
  1544. dev_warn(acpi_desc->dev,
  1545. "failed to insert pmem resource to iomem: %d\n",
  1546. rc);
  1547. goto out;
  1548. }
  1549. nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
  1550. ndr_desc);
  1551. if (!nfit_spa->nd_region)
  1552. rc = -ENOMEM;
  1553. } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) {
  1554. nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
  1555. ndr_desc);
  1556. if (!nfit_spa->nd_region)
  1557. rc = -ENOMEM;
  1558. }
  1559. out:
  1560. if (rc)
  1561. dev_err(acpi_desc->dev, "failed to register spa range %d\n",
  1562. nfit_spa->spa->range_index);
  1563. return rc;
  1564. }
  1565. static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc,
  1566. u32 max_ars)
  1567. {
  1568. struct device *dev = acpi_desc->dev;
  1569. struct nd_cmd_ars_status *ars_status;
  1570. if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) {
  1571. memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size);
  1572. return 0;
  1573. }
  1574. if (acpi_desc->ars_status)
  1575. devm_kfree(dev, acpi_desc->ars_status);
  1576. acpi_desc->ars_status = NULL;
  1577. ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL);
  1578. if (!ars_status)
  1579. return -ENOMEM;
  1580. acpi_desc->ars_status = ars_status;
  1581. acpi_desc->ars_status_size = max_ars;
  1582. return 0;
  1583. }
  1584. static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc,
  1585. struct nfit_spa *nfit_spa)
  1586. {
  1587. struct acpi_nfit_system_address *spa = nfit_spa->spa;
  1588. int rc;
  1589. if (!nfit_spa->max_ars) {
  1590. struct nd_cmd_ars_cap ars_cap;
  1591. memset(&ars_cap, 0, sizeof(ars_cap));
  1592. rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
  1593. if (rc < 0)
  1594. return rc;
  1595. nfit_spa->max_ars = ars_cap.max_ars_out;
  1596. nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
  1597. /* check that the supported scrub types match the spa type */
  1598. if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE &&
  1599. ((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0)
  1600. return -ENOTTY;
  1601. else if (nfit_spa_type(spa) == NFIT_SPA_PM &&
  1602. ((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0)
  1603. return -ENOTTY;
  1604. }
  1605. if (ars_status_alloc(acpi_desc, nfit_spa->max_ars))
  1606. return -ENOMEM;
  1607. rc = ars_get_status(acpi_desc);
  1608. if (rc < 0 && rc != -ENOSPC)
  1609. return rc;
  1610. if (ars_status_process_records(acpi_desc->nvdimm_bus,
  1611. acpi_desc->ars_status))
  1612. return -ENOMEM;
  1613. return 0;
  1614. }
  1615. static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc,
  1616. struct nfit_spa *nfit_spa)
  1617. {
  1618. struct acpi_nfit_system_address *spa = nfit_spa->spa;
  1619. unsigned int overflow_retry = scrub_overflow_abort;
  1620. u64 init_ars_start = 0, init_ars_len = 0;
  1621. struct device *dev = acpi_desc->dev;
  1622. unsigned int tmo = scrub_timeout;
  1623. int rc;
  1624. if (nfit_spa->ars_done || !nfit_spa->nd_region)
  1625. return;
  1626. rc = ars_start(acpi_desc, nfit_spa);
  1627. /*
  1628. * If we timed out the initial scan we'll still be busy here,
  1629. * and will wait another timeout before giving up permanently.
  1630. */
  1631. if (rc < 0 && rc != -EBUSY)
  1632. return;
  1633. do {
  1634. u64 ars_start, ars_len;
  1635. if (acpi_desc->cancel)
  1636. break;
  1637. rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
  1638. if (rc == -ENOTTY)
  1639. break;
  1640. if (rc == -EBUSY && !tmo) {
  1641. dev_warn(dev, "range %d ars timeout, aborting\n",
  1642. spa->range_index);
  1643. break;
  1644. }
  1645. if (rc == -EBUSY) {
  1646. /*
  1647. * Note, entries may be appended to the list
  1648. * while the lock is dropped, but the workqueue
  1649. * being active prevents entries being deleted /
  1650. * freed.
  1651. */
  1652. mutex_unlock(&acpi_desc->init_mutex);
  1653. ssleep(1);
  1654. tmo--;
  1655. mutex_lock(&acpi_desc->init_mutex);
  1656. continue;
  1657. }
  1658. /* we got some results, but there are more pending... */
  1659. if (rc == -ENOSPC && overflow_retry--) {
  1660. if (!init_ars_len) {
  1661. init_ars_len = acpi_desc->ars_status->length;
  1662. init_ars_start = acpi_desc->ars_status->address;
  1663. }
  1664. rc = ars_continue(acpi_desc);
  1665. }
  1666. if (rc < 0) {
  1667. dev_warn(dev, "range %d ars continuation failed\n",
  1668. spa->range_index);
  1669. break;
  1670. }
  1671. if (init_ars_len) {
  1672. ars_start = init_ars_start;
  1673. ars_len = init_ars_len;
  1674. } else {
  1675. ars_start = acpi_desc->ars_status->address;
  1676. ars_len = acpi_desc->ars_status->length;
  1677. }
  1678. dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n",
  1679. spa->range_index, ars_start, ars_len);
  1680. /* notify the region about new poison entries */
  1681. nvdimm_region_notify(nfit_spa->nd_region,
  1682. NVDIMM_REVALIDATE_POISON);
  1683. break;
  1684. } while (1);
  1685. }
  1686. static void acpi_nfit_scrub(struct work_struct *work)
  1687. {
  1688. struct device *dev;
  1689. u64 init_scrub_length = 0;
  1690. struct nfit_spa *nfit_spa;
  1691. u64 init_scrub_address = 0;
  1692. bool init_ars_done = false;
  1693. struct acpi_nfit_desc *acpi_desc;
  1694. unsigned int tmo = scrub_timeout;
  1695. unsigned int overflow_retry = scrub_overflow_abort;
  1696. acpi_desc = container_of(work, typeof(*acpi_desc), work);
  1697. dev = acpi_desc->dev;
  1698. /*
  1699. * We scrub in 2 phases. The first phase waits for any platform
  1700. * firmware initiated scrubs to complete and then we go search for the
  1701. * affected spa regions to mark them scanned. In the second phase we
  1702. * initiate a directed scrub for every range that was not scrubbed in
  1703. * phase 1.
  1704. */
  1705. /* process platform firmware initiated scrubs */
  1706. retry:
  1707. mutex_lock(&acpi_desc->init_mutex);
  1708. list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
  1709. struct nd_cmd_ars_status *ars_status;
  1710. struct acpi_nfit_system_address *spa;
  1711. u64 ars_start, ars_len;
  1712. int rc;
  1713. if (acpi_desc->cancel)
  1714. break;
  1715. if (nfit_spa->nd_region)
  1716. continue;
  1717. if (init_ars_done) {
  1718. /*
  1719. * No need to re-query, we're now just
  1720. * reconciling all the ranges covered by the
  1721. * initial scrub
  1722. */
  1723. rc = 0;
  1724. } else
  1725. rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
  1726. if (rc == -ENOTTY) {
  1727. /* no ars capability, just register spa and move on */
  1728. acpi_nfit_register_region(acpi_desc, nfit_spa);
  1729. continue;
  1730. }
  1731. if (rc == -EBUSY && !tmo) {
  1732. /* fallthrough to directed scrub in phase 2 */
  1733. dev_warn(dev, "timeout awaiting ars results, continuing...\n");
  1734. break;
  1735. } else if (rc == -EBUSY) {
  1736. mutex_unlock(&acpi_desc->init_mutex);
  1737. ssleep(1);
  1738. tmo--;
  1739. goto retry;
  1740. }
  1741. /* we got some results, but there are more pending... */
  1742. if (rc == -ENOSPC && overflow_retry--) {
  1743. ars_status = acpi_desc->ars_status;
  1744. /*
  1745. * Record the original scrub range, so that we
  1746. * can recall all the ranges impacted by the
  1747. * initial scrub.
  1748. */
  1749. if (!init_scrub_length) {
  1750. init_scrub_length = ars_status->length;
  1751. init_scrub_address = ars_status->address;
  1752. }
  1753. rc = ars_continue(acpi_desc);
  1754. if (rc == 0) {
  1755. mutex_unlock(&acpi_desc->init_mutex);
  1756. goto retry;
  1757. }
  1758. }
  1759. if (rc < 0) {
  1760. /*
  1761. * Initial scrub failed, we'll give it one more
  1762. * try below...
  1763. */
  1764. break;
  1765. }
  1766. /* We got some final results, record completed ranges */
  1767. ars_status = acpi_desc->ars_status;
  1768. if (init_scrub_length) {
  1769. ars_start = init_scrub_address;
  1770. ars_len = ars_start + init_scrub_length;
  1771. } else {
  1772. ars_start = ars_status->address;
  1773. ars_len = ars_status->length;
  1774. }
  1775. spa = nfit_spa->spa;
  1776. if (!init_ars_done) {
  1777. init_ars_done = true;
  1778. dev_dbg(dev, "init scrub %#llx + %#llx complete\n",
  1779. ars_start, ars_len);
  1780. }
  1781. if (ars_start <= spa->address && ars_start + ars_len
  1782. >= spa->address + spa->length)
  1783. acpi_nfit_register_region(acpi_desc, nfit_spa);
  1784. }
  1785. /*
  1786. * For all the ranges not covered by an initial scrub we still
  1787. * want to see if there are errors, but it's ok to discover them
  1788. * asynchronously.
  1789. */
  1790. list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
  1791. /*
  1792. * Flag all the ranges that still need scrubbing, but
  1793. * register them now to make data available.
  1794. */
  1795. if (nfit_spa->nd_region)
  1796. nfit_spa->ars_done = 1;
  1797. else
  1798. acpi_nfit_register_region(acpi_desc, nfit_spa);
  1799. }
  1800. list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
  1801. acpi_nfit_async_scrub(acpi_desc, nfit_spa);
  1802. mutex_unlock(&acpi_desc->init_mutex);
  1803. }
  1804. static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
  1805. {
  1806. struct nfit_spa *nfit_spa;
  1807. int rc;
  1808. list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
  1809. if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) {
  1810. /* BLK regions don't need to wait for ars results */
  1811. rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
  1812. if (rc)
  1813. return rc;
  1814. }
  1815. queue_work(nfit_wq, &acpi_desc->work);
  1816. return 0;
  1817. }
  1818. static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
  1819. struct nfit_table_prev *prev)
  1820. {
  1821. struct device *dev = acpi_desc->dev;
  1822. if (!list_empty(&prev->spas) ||
  1823. !list_empty(&prev->memdevs) ||
  1824. !list_empty(&prev->dcrs) ||
  1825. !list_empty(&prev->bdws) ||
  1826. !list_empty(&prev->idts) ||
  1827. !list_empty(&prev->flushes)) {
  1828. dev_err(dev, "new nfit deletes entries (unsupported)\n");
  1829. return -ENXIO;
  1830. }
  1831. return 0;
  1832. }
  1833. int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
  1834. {
  1835. struct device *dev = acpi_desc->dev;
  1836. struct nfit_table_prev prev;
  1837. const void *end;
  1838. u8 *data;
  1839. int rc;
  1840. mutex_lock(&acpi_desc->init_mutex);
  1841. INIT_LIST_HEAD(&prev.spas);
  1842. INIT_LIST_HEAD(&prev.memdevs);
  1843. INIT_LIST_HEAD(&prev.dcrs);
  1844. INIT_LIST_HEAD(&prev.bdws);
  1845. INIT_LIST_HEAD(&prev.idts);
  1846. INIT_LIST_HEAD(&prev.flushes);
  1847. list_cut_position(&prev.spas, &acpi_desc->spas,
  1848. acpi_desc->spas.prev);
  1849. list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
  1850. acpi_desc->memdevs.prev);
  1851. list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
  1852. acpi_desc->dcrs.prev);
  1853. list_cut_position(&prev.bdws, &acpi_desc->bdws,
  1854. acpi_desc->bdws.prev);
  1855. list_cut_position(&prev.idts, &acpi_desc->idts,
  1856. acpi_desc->idts.prev);
  1857. list_cut_position(&prev.flushes, &acpi_desc->flushes,
  1858. acpi_desc->flushes.prev);
  1859. data = (u8 *) acpi_desc->nfit;
  1860. end = data + sz;
  1861. while (!IS_ERR_OR_NULL(data))
  1862. data = add_table(acpi_desc, &prev, data, end);
  1863. if (IS_ERR(data)) {
  1864. dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__,
  1865. PTR_ERR(data));
  1866. rc = PTR_ERR(data);
  1867. goto out_unlock;
  1868. }
  1869. rc = acpi_nfit_check_deletions(acpi_desc, &prev);
  1870. if (rc)
  1871. goto out_unlock;
  1872. if (nfit_mem_init(acpi_desc) != 0) {
  1873. rc = -ENOMEM;
  1874. goto out_unlock;
  1875. }
  1876. acpi_nfit_init_dsms(acpi_desc);
  1877. rc = acpi_nfit_register_dimms(acpi_desc);
  1878. if (rc)
  1879. goto out_unlock;
  1880. rc = acpi_nfit_register_regions(acpi_desc);
  1881. out_unlock:
  1882. mutex_unlock(&acpi_desc->init_mutex);
  1883. return rc;
  1884. }
  1885. EXPORT_SYMBOL_GPL(acpi_nfit_init);
  1886. struct acpi_nfit_flush_work {
  1887. struct work_struct work;
  1888. struct completion cmp;
  1889. };
  1890. static void flush_probe(struct work_struct *work)
  1891. {
  1892. struct acpi_nfit_flush_work *flush;
  1893. flush = container_of(work, typeof(*flush), work);
  1894. complete(&flush->cmp);
  1895. }
  1896. static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
  1897. {
  1898. struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
  1899. struct device *dev = acpi_desc->dev;
  1900. struct acpi_nfit_flush_work flush;
  1901. /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
  1902. device_lock(dev);
  1903. device_unlock(dev);
  1904. /*
  1905. * Scrub work could take 10s of seconds, userspace may give up so we
  1906. * need to be interruptible while waiting.
  1907. */
  1908. INIT_WORK_ONSTACK(&flush.work, flush_probe);
  1909. COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
  1910. queue_work(nfit_wq, &flush.work);
  1911. return wait_for_completion_interruptible(&flush.cmp);
  1912. }
  1913. static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
  1914. struct nvdimm *nvdimm, unsigned int cmd)
  1915. {
  1916. struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
  1917. if (nvdimm)
  1918. return 0;
  1919. if (cmd != ND_CMD_ARS_START)
  1920. return 0;
  1921. /*
  1922. * The kernel and userspace may race to initiate a scrub, but
  1923. * the scrub thread is prepared to lose that initial race. It
  1924. * just needs guarantees that any ars it initiates are not
  1925. * interrupted by any intervening start reqeusts from userspace.
  1926. */
  1927. if (work_busy(&acpi_desc->work))
  1928. return -EBUSY;
  1929. return 0;
  1930. }
  1931. void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
  1932. {
  1933. struct nvdimm_bus_descriptor *nd_desc;
  1934. dev_set_drvdata(dev, acpi_desc);
  1935. acpi_desc->dev = dev;
  1936. acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
  1937. nd_desc = &acpi_desc->nd_desc;
  1938. nd_desc->provider_name = "ACPI.NFIT";
  1939. nd_desc->ndctl = acpi_nfit_ctl;
  1940. nd_desc->flush_probe = acpi_nfit_flush_probe;
  1941. nd_desc->clear_to_send = acpi_nfit_clear_to_send;
  1942. nd_desc->attr_groups = acpi_nfit_attribute_groups;
  1943. INIT_LIST_HEAD(&acpi_desc->spa_maps);
  1944. INIT_LIST_HEAD(&acpi_desc->spas);
  1945. INIT_LIST_HEAD(&acpi_desc->dcrs);
  1946. INIT_LIST_HEAD(&acpi_desc->bdws);
  1947. INIT_LIST_HEAD(&acpi_desc->idts);
  1948. INIT_LIST_HEAD(&acpi_desc->flushes);
  1949. INIT_LIST_HEAD(&acpi_desc->memdevs);
  1950. INIT_LIST_HEAD(&acpi_desc->dimms);
  1951. mutex_init(&acpi_desc->spa_map_mutex);
  1952. mutex_init(&acpi_desc->init_mutex);
  1953. INIT_WORK(&acpi_desc->work, acpi_nfit_scrub);
  1954. }
  1955. EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
  1956. static int acpi_nfit_add(struct acpi_device *adev)
  1957. {
  1958. struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
  1959. struct acpi_nfit_desc *acpi_desc;
  1960. struct device *dev = &adev->dev;
  1961. struct acpi_table_header *tbl;
  1962. acpi_status status = AE_OK;
  1963. acpi_size sz;
  1964. int rc;
  1965. status = acpi_get_table_with_size("NFIT", 0, &tbl, &sz);
  1966. if (ACPI_FAILURE(status)) {
  1967. /* This is ok, we could have an nvdimm hotplugged later */
  1968. dev_dbg(dev, "failed to find NFIT at startup\n");
  1969. return 0;
  1970. }
  1971. acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
  1972. if (!acpi_desc)
  1973. return -ENOMEM;
  1974. acpi_nfit_desc_init(acpi_desc, &adev->dev);
  1975. acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc);
  1976. if (!acpi_desc->nvdimm_bus)
  1977. return -ENOMEM;
  1978. /*
  1979. * Save the acpi header for later and then skip it,
  1980. * making nfit point to the first nfit table header.
  1981. */
  1982. acpi_desc->acpi_header = *tbl;
  1983. acpi_desc->nfit = (void *) tbl + sizeof(struct acpi_table_nfit);
  1984. sz -= sizeof(struct acpi_table_nfit);
  1985. /* Evaluate _FIT and override with that if present */
  1986. status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
  1987. if (ACPI_SUCCESS(status) && buf.length > 0) {
  1988. union acpi_object *obj;
  1989. /*
  1990. * Adjust for the acpi_object header of the _FIT
  1991. */
  1992. obj = buf.pointer;
  1993. if (obj->type == ACPI_TYPE_BUFFER) {
  1994. acpi_desc->nfit =
  1995. (struct acpi_nfit_header *)obj->buffer.pointer;
  1996. sz = obj->buffer.length;
  1997. } else
  1998. dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
  1999. __func__, (int) obj->type);
  2000. }
  2001. rc = acpi_nfit_init(acpi_desc, sz);
  2002. if (rc) {
  2003. nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
  2004. return rc;
  2005. }
  2006. return 0;
  2007. }
  2008. static int acpi_nfit_remove(struct acpi_device *adev)
  2009. {
  2010. struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
  2011. acpi_desc->cancel = 1;
  2012. flush_workqueue(nfit_wq);
  2013. nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
  2014. return 0;
  2015. }
  2016. static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
  2017. {
  2018. struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
  2019. struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
  2020. struct acpi_nfit_header *nfit_saved;
  2021. union acpi_object *obj;
  2022. struct device *dev = &adev->dev;
  2023. acpi_status status;
  2024. int ret;
  2025. dev_dbg(dev, "%s: event: %d\n", __func__, event);
  2026. device_lock(dev);
  2027. if (!dev->driver) {
  2028. /* dev->driver may be null if we're being removed */
  2029. dev_dbg(dev, "%s: no driver found for dev\n", __func__);
  2030. goto out_unlock;
  2031. }
  2032. if (!acpi_desc) {
  2033. acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
  2034. if (!acpi_desc)
  2035. goto out_unlock;
  2036. acpi_nfit_desc_init(acpi_desc, &adev->dev);
  2037. acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc);
  2038. if (!acpi_desc->nvdimm_bus)
  2039. goto out_unlock;
  2040. } else {
  2041. /*
  2042. * Finish previous registration before considering new
  2043. * regions.
  2044. */
  2045. flush_workqueue(nfit_wq);
  2046. }
  2047. /* Evaluate _FIT */
  2048. status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
  2049. if (ACPI_FAILURE(status)) {
  2050. dev_err(dev, "failed to evaluate _FIT\n");
  2051. goto out_unlock;
  2052. }
  2053. nfit_saved = acpi_desc->nfit;
  2054. obj = buf.pointer;
  2055. if (obj->type == ACPI_TYPE_BUFFER) {
  2056. acpi_desc->nfit =
  2057. (struct acpi_nfit_header *)obj->buffer.pointer;
  2058. ret = acpi_nfit_init(acpi_desc, obj->buffer.length);
  2059. if (ret) {
  2060. /* Merge failed, restore old nfit, and exit */
  2061. acpi_desc->nfit = nfit_saved;
  2062. dev_err(dev, "failed to merge updated NFIT\n");
  2063. }
  2064. } else {
  2065. /* Bad _FIT, restore old nfit */
  2066. dev_err(dev, "Invalid _FIT\n");
  2067. }
  2068. kfree(buf.pointer);
  2069. out_unlock:
  2070. device_unlock(dev);
  2071. }
  2072. static const struct acpi_device_id acpi_nfit_ids[] = {
  2073. { "ACPI0012", 0 },
  2074. { "", 0 },
  2075. };
  2076. MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
  2077. static struct acpi_driver acpi_nfit_driver = {
  2078. .name = KBUILD_MODNAME,
  2079. .ids = acpi_nfit_ids,
  2080. .ops = {
  2081. .add = acpi_nfit_add,
  2082. .remove = acpi_nfit_remove,
  2083. .notify = acpi_nfit_notify,
  2084. },
  2085. };
  2086. static __init int nfit_init(void)
  2087. {
  2088. BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
  2089. BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
  2090. BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
  2091. BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
  2092. BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
  2093. BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
  2094. BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
  2095. acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]);
  2096. acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]);
  2097. acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]);
  2098. acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]);
  2099. acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]);
  2100. acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]);
  2101. acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]);
  2102. acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]);
  2103. acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
  2104. acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
  2105. nfit_wq = create_singlethread_workqueue("nfit");
  2106. if (!nfit_wq)
  2107. return -ENOMEM;
  2108. return acpi_bus_register_driver(&acpi_nfit_driver);
  2109. }
  2110. static __exit void nfit_exit(void)
  2111. {
  2112. acpi_bus_unregister_driver(&acpi_nfit_driver);
  2113. destroy_workqueue(nfit_wq);
  2114. }
  2115. module_init(nfit_init);
  2116. module_exit(nfit_exit);
  2117. MODULE_LICENSE("GPL v2");
  2118. MODULE_AUTHOR("Intel Corporation");