nfit.c 72 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712
  1. /*
  2. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #include <linux/list_sort.h>
  14. #include <linux/libnvdimm.h>
  15. #include <linux/module.h>
  16. #include <linux/mutex.h>
  17. #include <linux/ndctl.h>
  18. #include <linux/delay.h>
  19. #include <linux/list.h>
  20. #include <linux/acpi.h>
  21. #include <linux/sort.h>
  22. #include <linux/pmem.h>
  23. #include <linux/io.h>
  24. #include <linux/nd.h>
  25. #include <asm/cacheflush.h>
  26. #include "nfit.h"
  27. /*
  28. * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
  29. * irrelevant.
  30. */
  31. #include <linux/io-64-nonatomic-hi-lo.h>
  32. static bool force_enable_dimms;
  33. module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
  34. MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
  35. static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT;
  36. module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR);
  37. MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds");
  38. /* after three payloads of overflow, it's dead jim */
  39. static unsigned int scrub_overflow_abort = 3;
  40. module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR);
  41. MODULE_PARM_DESC(scrub_overflow_abort,
  42. "Number of times we overflow ARS results before abort");
  43. static bool disable_vendor_specific;
  44. module_param(disable_vendor_specific, bool, S_IRUGO);
  45. MODULE_PARM_DESC(disable_vendor_specific,
  46. "Limit commands to the publicly specified set\n");
  47. static struct workqueue_struct *nfit_wq;
  48. struct nfit_table_prev {
  49. struct list_head spas;
  50. struct list_head memdevs;
  51. struct list_head dcrs;
  52. struct list_head bdws;
  53. struct list_head idts;
  54. struct list_head flushes;
  55. };
  56. static u8 nfit_uuid[NFIT_UUID_MAX][16];
  57. const u8 *to_nfit_uuid(enum nfit_uuids id)
  58. {
  59. return nfit_uuid[id];
  60. }
  61. EXPORT_SYMBOL(to_nfit_uuid);
  62. static struct acpi_nfit_desc *to_acpi_nfit_desc(
  63. struct nvdimm_bus_descriptor *nd_desc)
  64. {
  65. return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
  66. }
  67. static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
  68. {
  69. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  70. /*
  71. * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
  72. * acpi_device.
  73. */
  74. if (!nd_desc->provider_name
  75. || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
  76. return NULL;
  77. return to_acpi_device(acpi_desc->dev);
  78. }
  79. static int xlat_status(void *buf, unsigned int cmd)
  80. {
  81. struct nd_cmd_clear_error *clear_err;
  82. struct nd_cmd_ars_status *ars_status;
  83. struct nd_cmd_ars_start *ars_start;
  84. struct nd_cmd_ars_cap *ars_cap;
  85. u16 flags;
  86. switch (cmd) {
  87. case ND_CMD_ARS_CAP:
  88. ars_cap = buf;
  89. if ((ars_cap->status & 0xffff) == NFIT_ARS_CAP_NONE)
  90. return -ENOTTY;
  91. /* Command failed */
  92. if (ars_cap->status & 0xffff)
  93. return -EIO;
  94. /* No supported scan types for this range */
  95. flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
  96. if ((ars_cap->status >> 16 & flags) == 0)
  97. return -ENOTTY;
  98. break;
  99. case ND_CMD_ARS_START:
  100. ars_start = buf;
  101. /* ARS is in progress */
  102. if ((ars_start->status & 0xffff) == NFIT_ARS_START_BUSY)
  103. return -EBUSY;
  104. /* Command failed */
  105. if (ars_start->status & 0xffff)
  106. return -EIO;
  107. break;
  108. case ND_CMD_ARS_STATUS:
  109. ars_status = buf;
  110. /* Command failed */
  111. if (ars_status->status & 0xffff)
  112. return -EIO;
  113. /* Check extended status (Upper two bytes) */
  114. if (ars_status->status == NFIT_ARS_STATUS_DONE)
  115. return 0;
  116. /* ARS is in progress */
  117. if (ars_status->status == NFIT_ARS_STATUS_BUSY)
  118. return -EBUSY;
  119. /* No ARS performed for the current boot */
  120. if (ars_status->status == NFIT_ARS_STATUS_NONE)
  121. return -EAGAIN;
  122. /*
  123. * ARS interrupted, either we overflowed or some other
  124. * agent wants the scan to stop. If we didn't overflow
  125. * then just continue with the returned results.
  126. */
  127. if (ars_status->status == NFIT_ARS_STATUS_INTR) {
  128. if (ars_status->flags & NFIT_ARS_F_OVERFLOW)
  129. return -ENOSPC;
  130. return 0;
  131. }
  132. /* Unknown status */
  133. if (ars_status->status >> 16)
  134. return -EIO;
  135. break;
  136. case ND_CMD_CLEAR_ERROR:
  137. clear_err = buf;
  138. if (clear_err->status & 0xffff)
  139. return -EIO;
  140. if (!clear_err->cleared)
  141. return -EIO;
  142. if (clear_err->length > clear_err->cleared)
  143. return clear_err->cleared;
  144. break;
  145. default:
  146. break;
  147. }
  148. return 0;
  149. }
  150. static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
  151. struct nvdimm *nvdimm, unsigned int cmd, void *buf,
  152. unsigned int buf_len, int *cmd_rc)
  153. {
  154. struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
  155. union acpi_object in_obj, in_buf, *out_obj;
  156. const struct nd_cmd_desc *desc = NULL;
  157. struct device *dev = acpi_desc->dev;
  158. struct nd_cmd_pkg *call_pkg = NULL;
  159. const char *cmd_name, *dimm_name;
  160. unsigned long cmd_mask, dsm_mask;
  161. acpi_handle handle;
  162. unsigned int func;
  163. const u8 *uuid;
  164. u32 offset;
  165. int rc, i;
  166. func = cmd;
  167. if (cmd == ND_CMD_CALL) {
  168. call_pkg = buf;
  169. func = call_pkg->nd_command;
  170. }
  171. if (nvdimm) {
  172. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  173. struct acpi_device *adev = nfit_mem->adev;
  174. if (!adev)
  175. return -ENOTTY;
  176. if (call_pkg && nfit_mem->family != call_pkg->nd_family)
  177. return -ENOTTY;
  178. dimm_name = nvdimm_name(nvdimm);
  179. cmd_name = nvdimm_cmd_name(cmd);
  180. cmd_mask = nvdimm_cmd_mask(nvdimm);
  181. dsm_mask = nfit_mem->dsm_mask;
  182. desc = nd_cmd_dimm_desc(cmd);
  183. uuid = to_nfit_uuid(nfit_mem->family);
  184. handle = adev->handle;
  185. } else {
  186. struct acpi_device *adev = to_acpi_dev(acpi_desc);
  187. cmd_name = nvdimm_bus_cmd_name(cmd);
  188. cmd_mask = nd_desc->cmd_mask;
  189. dsm_mask = cmd_mask;
  190. desc = nd_cmd_bus_desc(cmd);
  191. uuid = to_nfit_uuid(NFIT_DEV_BUS);
  192. handle = adev->handle;
  193. dimm_name = "bus";
  194. }
  195. if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
  196. return -ENOTTY;
  197. if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
  198. return -ENOTTY;
  199. in_obj.type = ACPI_TYPE_PACKAGE;
  200. in_obj.package.count = 1;
  201. in_obj.package.elements = &in_buf;
  202. in_buf.type = ACPI_TYPE_BUFFER;
  203. in_buf.buffer.pointer = buf;
  204. in_buf.buffer.length = 0;
  205. /* libnvdimm has already validated the input envelope */
  206. for (i = 0; i < desc->in_num; i++)
  207. in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
  208. i, buf);
  209. if (call_pkg) {
  210. /* skip over package wrapper */
  211. in_buf.buffer.pointer = (void *) &call_pkg->nd_payload;
  212. in_buf.buffer.length = call_pkg->nd_size_in;
  213. }
  214. if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
  215. dev_dbg(dev, "%s:%s cmd: %d: func: %d input length: %d\n",
  216. __func__, dimm_name, cmd, func,
  217. in_buf.buffer.length);
  218. print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4,
  219. in_buf.buffer.pointer,
  220. min_t(u32, 256, in_buf.buffer.length), true);
  221. }
  222. out_obj = acpi_evaluate_dsm(handle, uuid, 1, func, &in_obj);
  223. if (!out_obj) {
  224. dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
  225. cmd_name);
  226. return -EINVAL;
  227. }
  228. if (call_pkg) {
  229. call_pkg->nd_fw_size = out_obj->buffer.length;
  230. memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
  231. out_obj->buffer.pointer,
  232. min(call_pkg->nd_fw_size, call_pkg->nd_size_out));
  233. ACPI_FREE(out_obj);
  234. /*
  235. * Need to support FW function w/o known size in advance.
  236. * Caller can determine required size based upon nd_fw_size.
  237. * If we return an error (like elsewhere) then caller wouldn't
  238. * be able to rely upon data returned to make calculation.
  239. */
  240. return 0;
  241. }
  242. if (out_obj->package.type != ACPI_TYPE_BUFFER) {
  243. dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n",
  244. __func__, dimm_name, cmd_name, out_obj->type);
  245. rc = -EINVAL;
  246. goto out;
  247. }
  248. if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
  249. dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__,
  250. dimm_name, cmd_name, out_obj->buffer.length);
  251. print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
  252. 4, out_obj->buffer.pointer, min_t(u32, 128,
  253. out_obj->buffer.length), true);
  254. }
  255. for (i = 0, offset = 0; i < desc->out_num; i++) {
  256. u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
  257. (u32 *) out_obj->buffer.pointer);
  258. if (offset + out_size > out_obj->buffer.length) {
  259. dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n",
  260. __func__, dimm_name, cmd_name, i);
  261. break;
  262. }
  263. if (in_buf.buffer.length + offset + out_size > buf_len) {
  264. dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n",
  265. __func__, dimm_name, cmd_name, i);
  266. rc = -ENXIO;
  267. goto out;
  268. }
  269. memcpy(buf + in_buf.buffer.length + offset,
  270. out_obj->buffer.pointer + offset, out_size);
  271. offset += out_size;
  272. }
  273. if (offset + in_buf.buffer.length < buf_len) {
  274. if (i >= 1) {
  275. /*
  276. * status valid, return the number of bytes left
  277. * unfilled in the output buffer
  278. */
  279. rc = buf_len - offset - in_buf.buffer.length;
  280. if (cmd_rc)
  281. *cmd_rc = xlat_status(buf, cmd);
  282. } else {
  283. dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
  284. __func__, dimm_name, cmd_name, buf_len,
  285. offset);
  286. rc = -ENXIO;
  287. }
  288. } else {
  289. rc = 0;
  290. if (cmd_rc)
  291. *cmd_rc = xlat_status(buf, cmd);
  292. }
  293. out:
  294. ACPI_FREE(out_obj);
  295. return rc;
  296. }
  297. static const char *spa_type_name(u16 type)
  298. {
  299. static const char *to_name[] = {
  300. [NFIT_SPA_VOLATILE] = "volatile",
  301. [NFIT_SPA_PM] = "pmem",
  302. [NFIT_SPA_DCR] = "dimm-control-region",
  303. [NFIT_SPA_BDW] = "block-data-window",
  304. [NFIT_SPA_VDISK] = "volatile-disk",
  305. [NFIT_SPA_VCD] = "volatile-cd",
  306. [NFIT_SPA_PDISK] = "persistent-disk",
  307. [NFIT_SPA_PCD] = "persistent-cd",
  308. };
  309. if (type > NFIT_SPA_PCD)
  310. return "unknown";
  311. return to_name[type];
  312. }
  313. static int nfit_spa_type(struct acpi_nfit_system_address *spa)
  314. {
  315. int i;
  316. for (i = 0; i < NFIT_UUID_MAX; i++)
  317. if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0)
  318. return i;
  319. return -1;
  320. }
  321. static bool add_spa(struct acpi_nfit_desc *acpi_desc,
  322. struct nfit_table_prev *prev,
  323. struct acpi_nfit_system_address *spa)
  324. {
  325. size_t length = min_t(size_t, sizeof(*spa), spa->header.length);
  326. struct device *dev = acpi_desc->dev;
  327. struct nfit_spa *nfit_spa;
  328. list_for_each_entry(nfit_spa, &prev->spas, list) {
  329. if (memcmp(nfit_spa->spa, spa, length) == 0) {
  330. list_move_tail(&nfit_spa->list, &acpi_desc->spas);
  331. return true;
  332. }
  333. }
  334. nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa), GFP_KERNEL);
  335. if (!nfit_spa)
  336. return false;
  337. INIT_LIST_HEAD(&nfit_spa->list);
  338. nfit_spa->spa = spa;
  339. list_add_tail(&nfit_spa->list, &acpi_desc->spas);
  340. dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__,
  341. spa->range_index,
  342. spa_type_name(nfit_spa_type(spa)));
  343. return true;
  344. }
  345. static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
  346. struct nfit_table_prev *prev,
  347. struct acpi_nfit_memory_map *memdev)
  348. {
  349. size_t length = min_t(size_t, sizeof(*memdev), memdev->header.length);
  350. struct device *dev = acpi_desc->dev;
  351. struct nfit_memdev *nfit_memdev;
  352. list_for_each_entry(nfit_memdev, &prev->memdevs, list)
  353. if (memcmp(nfit_memdev->memdev, memdev, length) == 0) {
  354. list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
  355. return true;
  356. }
  357. nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev), GFP_KERNEL);
  358. if (!nfit_memdev)
  359. return false;
  360. INIT_LIST_HEAD(&nfit_memdev->list);
  361. nfit_memdev->memdev = memdev;
  362. list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
  363. dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n",
  364. __func__, memdev->device_handle, memdev->range_index,
  365. memdev->region_index);
  366. return true;
  367. }
  368. static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
  369. struct nfit_table_prev *prev,
  370. struct acpi_nfit_control_region *dcr)
  371. {
  372. size_t length = min_t(size_t, sizeof(*dcr), dcr->header.length);
  373. struct device *dev = acpi_desc->dev;
  374. struct nfit_dcr *nfit_dcr;
  375. list_for_each_entry(nfit_dcr, &prev->dcrs, list)
  376. if (memcmp(nfit_dcr->dcr, dcr, length) == 0) {
  377. list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
  378. return true;
  379. }
  380. nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr), GFP_KERNEL);
  381. if (!nfit_dcr)
  382. return false;
  383. INIT_LIST_HEAD(&nfit_dcr->list);
  384. nfit_dcr->dcr = dcr;
  385. list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
  386. dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__,
  387. dcr->region_index, dcr->windows);
  388. return true;
  389. }
  390. static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
  391. struct nfit_table_prev *prev,
  392. struct acpi_nfit_data_region *bdw)
  393. {
  394. size_t length = min_t(size_t, sizeof(*bdw), bdw->header.length);
  395. struct device *dev = acpi_desc->dev;
  396. struct nfit_bdw *nfit_bdw;
  397. list_for_each_entry(nfit_bdw, &prev->bdws, list)
  398. if (memcmp(nfit_bdw->bdw, bdw, length) == 0) {
  399. list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
  400. return true;
  401. }
  402. nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw), GFP_KERNEL);
  403. if (!nfit_bdw)
  404. return false;
  405. INIT_LIST_HEAD(&nfit_bdw->list);
  406. nfit_bdw->bdw = bdw;
  407. list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
  408. dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__,
  409. bdw->region_index, bdw->windows);
  410. return true;
  411. }
  412. static bool add_idt(struct acpi_nfit_desc *acpi_desc,
  413. struct nfit_table_prev *prev,
  414. struct acpi_nfit_interleave *idt)
  415. {
  416. size_t length = min_t(size_t, sizeof(*idt), idt->header.length);
  417. struct device *dev = acpi_desc->dev;
  418. struct nfit_idt *nfit_idt;
  419. list_for_each_entry(nfit_idt, &prev->idts, list)
  420. if (memcmp(nfit_idt->idt, idt, length) == 0) {
  421. list_move_tail(&nfit_idt->list, &acpi_desc->idts);
  422. return true;
  423. }
  424. nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt), GFP_KERNEL);
  425. if (!nfit_idt)
  426. return false;
  427. INIT_LIST_HEAD(&nfit_idt->list);
  428. nfit_idt->idt = idt;
  429. list_add_tail(&nfit_idt->list, &acpi_desc->idts);
  430. dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__,
  431. idt->interleave_index, idt->line_count);
  432. return true;
  433. }
  434. static bool add_flush(struct acpi_nfit_desc *acpi_desc,
  435. struct nfit_table_prev *prev,
  436. struct acpi_nfit_flush_address *flush)
  437. {
  438. size_t length = min_t(size_t, sizeof(*flush), flush->header.length);
  439. struct device *dev = acpi_desc->dev;
  440. struct nfit_flush *nfit_flush;
  441. list_for_each_entry(nfit_flush, &prev->flushes, list)
  442. if (memcmp(nfit_flush->flush, flush, length) == 0) {
  443. list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
  444. return true;
  445. }
  446. nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush), GFP_KERNEL);
  447. if (!nfit_flush)
  448. return false;
  449. INIT_LIST_HEAD(&nfit_flush->list);
  450. nfit_flush->flush = flush;
  451. list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
  452. dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
  453. flush->device_handle, flush->hint_count);
  454. return true;
  455. }
  456. static void *add_table(struct acpi_nfit_desc *acpi_desc,
  457. struct nfit_table_prev *prev, void *table, const void *end)
  458. {
  459. struct device *dev = acpi_desc->dev;
  460. struct acpi_nfit_header *hdr;
  461. void *err = ERR_PTR(-ENOMEM);
  462. if (table >= end)
  463. return NULL;
  464. hdr = table;
  465. if (!hdr->length) {
  466. dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
  467. hdr->type);
  468. return NULL;
  469. }
  470. switch (hdr->type) {
  471. case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
  472. if (!add_spa(acpi_desc, prev, table))
  473. return err;
  474. break;
  475. case ACPI_NFIT_TYPE_MEMORY_MAP:
  476. if (!add_memdev(acpi_desc, prev, table))
  477. return err;
  478. break;
  479. case ACPI_NFIT_TYPE_CONTROL_REGION:
  480. if (!add_dcr(acpi_desc, prev, table))
  481. return err;
  482. break;
  483. case ACPI_NFIT_TYPE_DATA_REGION:
  484. if (!add_bdw(acpi_desc, prev, table))
  485. return err;
  486. break;
  487. case ACPI_NFIT_TYPE_INTERLEAVE:
  488. if (!add_idt(acpi_desc, prev, table))
  489. return err;
  490. break;
  491. case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
  492. if (!add_flush(acpi_desc, prev, table))
  493. return err;
  494. break;
  495. case ACPI_NFIT_TYPE_SMBIOS:
  496. dev_dbg(dev, "%s: smbios\n", __func__);
  497. break;
  498. default:
  499. dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
  500. break;
  501. }
  502. return table + hdr->length;
  503. }
  504. static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
  505. struct nfit_mem *nfit_mem)
  506. {
  507. u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
  508. u16 dcr = nfit_mem->dcr->region_index;
  509. struct nfit_spa *nfit_spa;
  510. list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
  511. u16 range_index = nfit_spa->spa->range_index;
  512. int type = nfit_spa_type(nfit_spa->spa);
  513. struct nfit_memdev *nfit_memdev;
  514. if (type != NFIT_SPA_BDW)
  515. continue;
  516. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
  517. if (nfit_memdev->memdev->range_index != range_index)
  518. continue;
  519. if (nfit_memdev->memdev->device_handle != device_handle)
  520. continue;
  521. if (nfit_memdev->memdev->region_index != dcr)
  522. continue;
  523. nfit_mem->spa_bdw = nfit_spa->spa;
  524. return;
  525. }
  526. }
  527. dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
  528. nfit_mem->spa_dcr->range_index);
  529. nfit_mem->bdw = NULL;
  530. }
  531. static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
  532. struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
  533. {
  534. u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
  535. struct nfit_memdev *nfit_memdev;
  536. struct nfit_flush *nfit_flush;
  537. struct nfit_bdw *nfit_bdw;
  538. struct nfit_idt *nfit_idt;
  539. u16 idt_idx, range_index;
  540. list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
  541. if (nfit_bdw->bdw->region_index != dcr)
  542. continue;
  543. nfit_mem->bdw = nfit_bdw->bdw;
  544. break;
  545. }
  546. if (!nfit_mem->bdw)
  547. return;
  548. nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
  549. if (!nfit_mem->spa_bdw)
  550. return;
  551. range_index = nfit_mem->spa_bdw->range_index;
  552. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
  553. if (nfit_memdev->memdev->range_index != range_index ||
  554. nfit_memdev->memdev->region_index != dcr)
  555. continue;
  556. nfit_mem->memdev_bdw = nfit_memdev->memdev;
  557. idt_idx = nfit_memdev->memdev->interleave_index;
  558. list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
  559. if (nfit_idt->idt->interleave_index != idt_idx)
  560. continue;
  561. nfit_mem->idt_bdw = nfit_idt->idt;
  562. break;
  563. }
  564. list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
  565. if (nfit_flush->flush->device_handle !=
  566. nfit_memdev->memdev->device_handle)
  567. continue;
  568. nfit_mem->nfit_flush = nfit_flush;
  569. break;
  570. }
  571. break;
  572. }
  573. }
  574. static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
  575. struct acpi_nfit_system_address *spa)
  576. {
  577. struct nfit_mem *nfit_mem, *found;
  578. struct nfit_memdev *nfit_memdev;
  579. int type = nfit_spa_type(spa);
  580. switch (type) {
  581. case NFIT_SPA_DCR:
  582. case NFIT_SPA_PM:
  583. break;
  584. default:
  585. return 0;
  586. }
  587. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
  588. struct nfit_dcr *nfit_dcr;
  589. u32 device_handle;
  590. u16 dcr;
  591. if (nfit_memdev->memdev->range_index != spa->range_index)
  592. continue;
  593. found = NULL;
  594. dcr = nfit_memdev->memdev->region_index;
  595. device_handle = nfit_memdev->memdev->device_handle;
  596. list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
  597. if (__to_nfit_memdev(nfit_mem)->device_handle
  598. == device_handle) {
  599. found = nfit_mem;
  600. break;
  601. }
  602. if (found)
  603. nfit_mem = found;
  604. else {
  605. nfit_mem = devm_kzalloc(acpi_desc->dev,
  606. sizeof(*nfit_mem), GFP_KERNEL);
  607. if (!nfit_mem)
  608. return -ENOMEM;
  609. INIT_LIST_HEAD(&nfit_mem->list);
  610. nfit_mem->acpi_desc = acpi_desc;
  611. list_add(&nfit_mem->list, &acpi_desc->dimms);
  612. }
  613. list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
  614. if (nfit_dcr->dcr->region_index != dcr)
  615. continue;
  616. /*
  617. * Record the control region for the dimm. For
  618. * the ACPI 6.1 case, where there are separate
  619. * control regions for the pmem vs blk
  620. * interfaces, be sure to record the extended
  621. * blk details.
  622. */
  623. if (!nfit_mem->dcr)
  624. nfit_mem->dcr = nfit_dcr->dcr;
  625. else if (nfit_mem->dcr->windows == 0
  626. && nfit_dcr->dcr->windows)
  627. nfit_mem->dcr = nfit_dcr->dcr;
  628. break;
  629. }
  630. if (dcr && !nfit_mem->dcr) {
  631. dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
  632. spa->range_index, dcr);
  633. return -ENODEV;
  634. }
  635. if (type == NFIT_SPA_DCR) {
  636. struct nfit_idt *nfit_idt;
  637. u16 idt_idx;
  638. /* multiple dimms may share a SPA when interleaved */
  639. nfit_mem->spa_dcr = spa;
  640. nfit_mem->memdev_dcr = nfit_memdev->memdev;
  641. idt_idx = nfit_memdev->memdev->interleave_index;
  642. list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
  643. if (nfit_idt->idt->interleave_index != idt_idx)
  644. continue;
  645. nfit_mem->idt_dcr = nfit_idt->idt;
  646. break;
  647. }
  648. nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
  649. } else {
  650. /*
  651. * A single dimm may belong to multiple SPA-PM
  652. * ranges, record at least one in addition to
  653. * any SPA-DCR range.
  654. */
  655. nfit_mem->memdev_pmem = nfit_memdev->memdev;
  656. }
  657. }
  658. return 0;
  659. }
  660. static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
  661. {
  662. struct nfit_mem *a = container_of(_a, typeof(*a), list);
  663. struct nfit_mem *b = container_of(_b, typeof(*b), list);
  664. u32 handleA, handleB;
  665. handleA = __to_nfit_memdev(a)->device_handle;
  666. handleB = __to_nfit_memdev(b)->device_handle;
  667. if (handleA < handleB)
  668. return -1;
  669. else if (handleA > handleB)
  670. return 1;
  671. return 0;
  672. }
  673. static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
  674. {
  675. struct nfit_spa *nfit_spa;
  676. /*
  677. * For each SPA-DCR or SPA-PMEM address range find its
  678. * corresponding MEMDEV(s). From each MEMDEV find the
  679. * corresponding DCR. Then, if we're operating on a SPA-DCR,
  680. * try to find a SPA-BDW and a corresponding BDW that references
  681. * the DCR. Throw it all into an nfit_mem object. Note, that
  682. * BDWs are optional.
  683. */
  684. list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
  685. int rc;
  686. rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa);
  687. if (rc)
  688. return rc;
  689. }
  690. list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
  691. return 0;
  692. }
  693. static ssize_t revision_show(struct device *dev,
  694. struct device_attribute *attr, char *buf)
  695. {
  696. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  697. struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
  698. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  699. return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
  700. }
  701. static DEVICE_ATTR_RO(revision);
  702. static struct attribute *acpi_nfit_attributes[] = {
  703. &dev_attr_revision.attr,
  704. NULL,
  705. };
  706. static struct attribute_group acpi_nfit_attribute_group = {
  707. .name = "nfit",
  708. .attrs = acpi_nfit_attributes,
  709. };
  710. static const struct attribute_group *acpi_nfit_attribute_groups[] = {
  711. &nvdimm_bus_attribute_group,
  712. &acpi_nfit_attribute_group,
  713. NULL,
  714. };
  715. static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
  716. {
  717. struct nvdimm *nvdimm = to_nvdimm(dev);
  718. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  719. return __to_nfit_memdev(nfit_mem);
  720. }
  721. static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
  722. {
  723. struct nvdimm *nvdimm = to_nvdimm(dev);
  724. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  725. return nfit_mem->dcr;
  726. }
  727. static ssize_t handle_show(struct device *dev,
  728. struct device_attribute *attr, char *buf)
  729. {
  730. struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
  731. return sprintf(buf, "%#x\n", memdev->device_handle);
  732. }
  733. static DEVICE_ATTR_RO(handle);
  734. static ssize_t phys_id_show(struct device *dev,
  735. struct device_attribute *attr, char *buf)
  736. {
  737. struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
  738. return sprintf(buf, "%#x\n", memdev->physical_id);
  739. }
  740. static DEVICE_ATTR_RO(phys_id);
  741. static ssize_t vendor_show(struct device *dev,
  742. struct device_attribute *attr, char *buf)
  743. {
  744. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  745. return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
  746. }
  747. static DEVICE_ATTR_RO(vendor);
  748. static ssize_t rev_id_show(struct device *dev,
  749. struct device_attribute *attr, char *buf)
  750. {
  751. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  752. return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
  753. }
  754. static DEVICE_ATTR_RO(rev_id);
  755. static ssize_t device_show(struct device *dev,
  756. struct device_attribute *attr, char *buf)
  757. {
  758. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  759. return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
  760. }
  761. static DEVICE_ATTR_RO(device);
  762. static ssize_t subsystem_vendor_show(struct device *dev,
  763. struct device_attribute *attr, char *buf)
  764. {
  765. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  766. return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
  767. }
  768. static DEVICE_ATTR_RO(subsystem_vendor);
  769. static ssize_t subsystem_rev_id_show(struct device *dev,
  770. struct device_attribute *attr, char *buf)
  771. {
  772. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  773. return sprintf(buf, "0x%04x\n",
  774. be16_to_cpu(dcr->subsystem_revision_id));
  775. }
  776. static DEVICE_ATTR_RO(subsystem_rev_id);
  777. static ssize_t subsystem_device_show(struct device *dev,
  778. struct device_attribute *attr, char *buf)
  779. {
  780. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  781. return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
  782. }
  783. static DEVICE_ATTR_RO(subsystem_device);
  784. static int num_nvdimm_formats(struct nvdimm *nvdimm)
  785. {
  786. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  787. int formats = 0;
  788. if (nfit_mem->memdev_pmem)
  789. formats++;
  790. if (nfit_mem->memdev_bdw)
  791. formats++;
  792. return formats;
  793. }
  794. static ssize_t format_show(struct device *dev,
  795. struct device_attribute *attr, char *buf)
  796. {
  797. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  798. return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
  799. }
  800. static DEVICE_ATTR_RO(format);
  801. static ssize_t format1_show(struct device *dev,
  802. struct device_attribute *attr, char *buf)
  803. {
  804. u32 handle;
  805. ssize_t rc = -ENXIO;
  806. struct nfit_mem *nfit_mem;
  807. struct nfit_memdev *nfit_memdev;
  808. struct acpi_nfit_desc *acpi_desc;
  809. struct nvdimm *nvdimm = to_nvdimm(dev);
  810. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  811. nfit_mem = nvdimm_provider_data(nvdimm);
  812. acpi_desc = nfit_mem->acpi_desc;
  813. handle = to_nfit_memdev(dev)->device_handle;
  814. /* assumes DIMMs have at most 2 published interface codes */
  815. mutex_lock(&acpi_desc->init_mutex);
  816. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
  817. struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
  818. struct nfit_dcr *nfit_dcr;
  819. if (memdev->device_handle != handle)
  820. continue;
  821. list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
  822. if (nfit_dcr->dcr->region_index != memdev->region_index)
  823. continue;
  824. if (nfit_dcr->dcr->code == dcr->code)
  825. continue;
  826. rc = sprintf(buf, "0x%04x\n",
  827. le16_to_cpu(nfit_dcr->dcr->code));
  828. break;
  829. }
  830. if (rc != ENXIO)
  831. break;
  832. }
  833. mutex_unlock(&acpi_desc->init_mutex);
  834. return rc;
  835. }
  836. static DEVICE_ATTR_RO(format1);
  837. static ssize_t formats_show(struct device *dev,
  838. struct device_attribute *attr, char *buf)
  839. {
  840. struct nvdimm *nvdimm = to_nvdimm(dev);
  841. return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
  842. }
  843. static DEVICE_ATTR_RO(formats);
  844. static ssize_t serial_show(struct device *dev,
  845. struct device_attribute *attr, char *buf)
  846. {
  847. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  848. return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
  849. }
  850. static DEVICE_ATTR_RO(serial);
  851. static ssize_t family_show(struct device *dev,
  852. struct device_attribute *attr, char *buf)
  853. {
  854. struct nvdimm *nvdimm = to_nvdimm(dev);
  855. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  856. if (nfit_mem->family < 0)
  857. return -ENXIO;
  858. return sprintf(buf, "%d\n", nfit_mem->family);
  859. }
  860. static DEVICE_ATTR_RO(family);
  861. static ssize_t dsm_mask_show(struct device *dev,
  862. struct device_attribute *attr, char *buf)
  863. {
  864. struct nvdimm *nvdimm = to_nvdimm(dev);
  865. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  866. if (nfit_mem->family < 0)
  867. return -ENXIO;
  868. return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
  869. }
  870. static DEVICE_ATTR_RO(dsm_mask);
  871. static ssize_t flags_show(struct device *dev,
  872. struct device_attribute *attr, char *buf)
  873. {
  874. u16 flags = to_nfit_memdev(dev)->flags;
  875. return sprintf(buf, "%s%s%s%s%s\n",
  876. flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
  877. flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
  878. flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
  879. flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
  880. flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "");
  881. }
  882. static DEVICE_ATTR_RO(flags);
  883. static ssize_t id_show(struct device *dev,
  884. struct device_attribute *attr, char *buf)
  885. {
  886. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  887. if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
  888. return sprintf(buf, "%04x-%02x-%04x-%08x\n",
  889. be16_to_cpu(dcr->vendor_id),
  890. dcr->manufacturing_location,
  891. be16_to_cpu(dcr->manufacturing_date),
  892. be32_to_cpu(dcr->serial_number));
  893. else
  894. return sprintf(buf, "%04x-%08x\n",
  895. be16_to_cpu(dcr->vendor_id),
  896. be32_to_cpu(dcr->serial_number));
  897. }
  898. static DEVICE_ATTR_RO(id);
  899. static struct attribute *acpi_nfit_dimm_attributes[] = {
  900. &dev_attr_handle.attr,
  901. &dev_attr_phys_id.attr,
  902. &dev_attr_vendor.attr,
  903. &dev_attr_device.attr,
  904. &dev_attr_rev_id.attr,
  905. &dev_attr_subsystem_vendor.attr,
  906. &dev_attr_subsystem_device.attr,
  907. &dev_attr_subsystem_rev_id.attr,
  908. &dev_attr_format.attr,
  909. &dev_attr_formats.attr,
  910. &dev_attr_format1.attr,
  911. &dev_attr_serial.attr,
  912. &dev_attr_flags.attr,
  913. &dev_attr_id.attr,
  914. &dev_attr_family.attr,
  915. &dev_attr_dsm_mask.attr,
  916. NULL,
  917. };
  918. static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
  919. struct attribute *a, int n)
  920. {
  921. struct device *dev = container_of(kobj, struct device, kobj);
  922. struct nvdimm *nvdimm = to_nvdimm(dev);
  923. if (!to_nfit_dcr(dev))
  924. return 0;
  925. if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
  926. return 0;
  927. return a->mode;
  928. }
  929. static struct attribute_group acpi_nfit_dimm_attribute_group = {
  930. .name = "nfit",
  931. .attrs = acpi_nfit_dimm_attributes,
  932. .is_visible = acpi_nfit_dimm_attr_visible,
  933. };
  934. static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
  935. &nvdimm_attribute_group,
  936. &nd_device_attribute_group,
  937. &acpi_nfit_dimm_attribute_group,
  938. NULL,
  939. };
  940. static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
  941. u32 device_handle)
  942. {
  943. struct nfit_mem *nfit_mem;
  944. list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
  945. if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
  946. return nfit_mem->nvdimm;
  947. return NULL;
  948. }
  949. static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
  950. struct nfit_mem *nfit_mem, u32 device_handle)
  951. {
  952. struct acpi_device *adev, *adev_dimm;
  953. struct device *dev = acpi_desc->dev;
  954. unsigned long dsm_mask;
  955. const u8 *uuid;
  956. int i;
  957. /* nfit test assumes 1:1 relationship between commands and dsms */
  958. nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
  959. nfit_mem->family = NVDIMM_FAMILY_INTEL;
  960. adev = to_acpi_dev(acpi_desc);
  961. if (!adev)
  962. return 0;
  963. adev_dimm = acpi_find_child_device(adev, device_handle, false);
  964. nfit_mem->adev = adev_dimm;
  965. if (!adev_dimm) {
  966. dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
  967. device_handle);
  968. return force_enable_dimms ? 0 : -ENODEV;
  969. }
  970. /*
  971. * Until standardization materializes we need to consider up to 3
  972. * different command sets. Note, that checking for zero functions
  973. * tells us if any commands might be reachable through this uuid.
  974. */
  975. for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_HPE2; i++)
  976. if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 0))
  977. break;
  978. /* limit the supported commands to those that are publicly documented */
  979. nfit_mem->family = i;
  980. if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
  981. dsm_mask = 0x3fe;
  982. if (disable_vendor_specific)
  983. dsm_mask &= ~(1 << ND_CMD_VENDOR);
  984. } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1)
  985. dsm_mask = 0x1c3c76;
  986. else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) {
  987. dsm_mask = 0x1fe;
  988. if (disable_vendor_specific)
  989. dsm_mask &= ~(1 << 8);
  990. } else {
  991. dev_err(dev, "unknown dimm command family\n");
  992. nfit_mem->family = -1;
  993. return force_enable_dimms ? 0 : -ENODEV;
  994. }
  995. uuid = to_nfit_uuid(nfit_mem->family);
  996. for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
  997. if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
  998. set_bit(i, &nfit_mem->dsm_mask);
  999. return 0;
  1000. }
  1001. static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
  1002. {
  1003. struct nfit_mem *nfit_mem;
  1004. int dimm_count = 0;
  1005. list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
  1006. unsigned long flags = 0, cmd_mask;
  1007. struct nvdimm *nvdimm;
  1008. u32 device_handle;
  1009. u16 mem_flags;
  1010. int rc;
  1011. device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
  1012. nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
  1013. if (nvdimm) {
  1014. dimm_count++;
  1015. continue;
  1016. }
  1017. if (nfit_mem->bdw && nfit_mem->memdev_pmem)
  1018. flags |= NDD_ALIASING;
  1019. mem_flags = __to_nfit_memdev(nfit_mem)->flags;
  1020. if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
  1021. flags |= NDD_UNARMED;
  1022. rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
  1023. if (rc)
  1024. continue;
  1025. /*
  1026. * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
  1027. * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
  1028. * userspace interface.
  1029. */
  1030. cmd_mask = 1UL << ND_CMD_CALL;
  1031. if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
  1032. cmd_mask |= nfit_mem->dsm_mask;
  1033. nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
  1034. acpi_nfit_dimm_attribute_groups,
  1035. flags, cmd_mask);
  1036. if (!nvdimm)
  1037. return -ENOMEM;
  1038. nfit_mem->nvdimm = nvdimm;
  1039. dimm_count++;
  1040. if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
  1041. continue;
  1042. dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n",
  1043. nvdimm_name(nvdimm),
  1044. mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
  1045. mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
  1046. mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
  1047. mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "");
  1048. }
  1049. return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
  1050. }
  1051. static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
  1052. {
  1053. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  1054. const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS);
  1055. struct acpi_device *adev;
  1056. int i;
  1057. nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
  1058. adev = to_acpi_dev(acpi_desc);
  1059. if (!adev)
  1060. return;
  1061. for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
  1062. if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
  1063. set_bit(i, &nd_desc->cmd_mask);
  1064. }
  1065. static ssize_t range_index_show(struct device *dev,
  1066. struct device_attribute *attr, char *buf)
  1067. {
  1068. struct nd_region *nd_region = to_nd_region(dev);
  1069. struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
  1070. return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
  1071. }
  1072. static DEVICE_ATTR_RO(range_index);
  1073. static struct attribute *acpi_nfit_region_attributes[] = {
  1074. &dev_attr_range_index.attr,
  1075. NULL,
  1076. };
  1077. static struct attribute_group acpi_nfit_region_attribute_group = {
  1078. .name = "nfit",
  1079. .attrs = acpi_nfit_region_attributes,
  1080. };
  1081. static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
  1082. &nd_region_attribute_group,
  1083. &nd_mapping_attribute_group,
  1084. &nd_device_attribute_group,
  1085. &nd_numa_attribute_group,
  1086. &acpi_nfit_region_attribute_group,
  1087. NULL,
  1088. };
  1089. /* enough info to uniquely specify an interleave set */
  1090. struct nfit_set_info {
  1091. struct nfit_set_info_map {
  1092. u64 region_offset;
  1093. u32 serial_number;
  1094. u32 pad;
  1095. } mapping[0];
  1096. };
  1097. static size_t sizeof_nfit_set_info(int num_mappings)
  1098. {
  1099. return sizeof(struct nfit_set_info)
  1100. + num_mappings * sizeof(struct nfit_set_info_map);
  1101. }
  1102. static int cmp_map(const void *m0, const void *m1)
  1103. {
  1104. const struct nfit_set_info_map *map0 = m0;
  1105. const struct nfit_set_info_map *map1 = m1;
  1106. return memcmp(&map0->region_offset, &map1->region_offset,
  1107. sizeof(u64));
  1108. }
  1109. /* Retrieve the nth entry referencing this spa */
  1110. static struct acpi_nfit_memory_map *memdev_from_spa(
  1111. struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
  1112. {
  1113. struct nfit_memdev *nfit_memdev;
  1114. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
  1115. if (nfit_memdev->memdev->range_index == range_index)
  1116. if (n-- == 0)
  1117. return nfit_memdev->memdev;
  1118. return NULL;
  1119. }
  1120. static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
  1121. struct nd_region_desc *ndr_desc,
  1122. struct acpi_nfit_system_address *spa)
  1123. {
  1124. int i, spa_type = nfit_spa_type(spa);
  1125. struct device *dev = acpi_desc->dev;
  1126. struct nd_interleave_set *nd_set;
  1127. u16 nr = ndr_desc->num_mappings;
  1128. struct nfit_set_info *info;
  1129. if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE)
  1130. /* pass */;
  1131. else
  1132. return 0;
  1133. nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
  1134. if (!nd_set)
  1135. return -ENOMEM;
  1136. info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
  1137. if (!info)
  1138. return -ENOMEM;
  1139. for (i = 0; i < nr; i++) {
  1140. struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
  1141. struct nfit_set_info_map *map = &info->mapping[i];
  1142. struct nvdimm *nvdimm = nd_mapping->nvdimm;
  1143. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  1144. struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
  1145. spa->range_index, i);
  1146. if (!memdev || !nfit_mem->dcr) {
  1147. dev_err(dev, "%s: failed to find DCR\n", __func__);
  1148. return -ENODEV;
  1149. }
  1150. map->region_offset = memdev->region_offset;
  1151. map->serial_number = nfit_mem->dcr->serial_number;
  1152. }
  1153. sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
  1154. cmp_map, NULL);
  1155. nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
  1156. ndr_desc->nd_set = nd_set;
  1157. devm_kfree(dev, info);
  1158. return 0;
  1159. }
  1160. static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
  1161. {
  1162. struct acpi_nfit_interleave *idt = mmio->idt;
  1163. u32 sub_line_offset, line_index, line_offset;
  1164. u64 line_no, table_skip_count, table_offset;
  1165. line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
  1166. table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
  1167. line_offset = idt->line_offset[line_index]
  1168. * mmio->line_size;
  1169. table_offset = table_skip_count * mmio->table_size;
  1170. return mmio->base_offset + line_offset + table_offset + sub_line_offset;
  1171. }
  1172. static void wmb_blk(struct nfit_blk *nfit_blk)
  1173. {
  1174. if (nfit_blk->nvdimm_flush) {
  1175. /*
  1176. * The first wmb() is needed to 'sfence' all previous writes
  1177. * such that they are architecturally visible for the platform
  1178. * buffer flush. Note that we've already arranged for pmem
  1179. * writes to avoid the cache via arch_memcpy_to_pmem(). The
  1180. * final wmb() ensures ordering for the NVDIMM flush write.
  1181. */
  1182. wmb();
  1183. writeq(1, nfit_blk->nvdimm_flush);
  1184. wmb();
  1185. } else
  1186. wmb_pmem();
  1187. }
  1188. static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
  1189. {
  1190. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
  1191. u64 offset = nfit_blk->stat_offset + mmio->size * bw;
  1192. if (mmio->num_lines)
  1193. offset = to_interleave_offset(offset, mmio);
  1194. return readl(mmio->addr.base + offset);
  1195. }
  1196. static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
  1197. resource_size_t dpa, unsigned int len, unsigned int write)
  1198. {
  1199. u64 cmd, offset;
  1200. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
  1201. enum {
  1202. BCW_OFFSET_MASK = (1ULL << 48)-1,
  1203. BCW_LEN_SHIFT = 48,
  1204. BCW_LEN_MASK = (1ULL << 8) - 1,
  1205. BCW_CMD_SHIFT = 56,
  1206. };
  1207. cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
  1208. len = len >> L1_CACHE_SHIFT;
  1209. cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
  1210. cmd |= ((u64) write) << BCW_CMD_SHIFT;
  1211. offset = nfit_blk->cmd_offset + mmio->size * bw;
  1212. if (mmio->num_lines)
  1213. offset = to_interleave_offset(offset, mmio);
  1214. writeq(cmd, mmio->addr.base + offset);
  1215. wmb_blk(nfit_blk);
  1216. if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
  1217. readq(mmio->addr.base + offset);
  1218. }
  1219. static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
  1220. resource_size_t dpa, void *iobuf, size_t len, int rw,
  1221. unsigned int lane)
  1222. {
  1223. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
  1224. unsigned int copied = 0;
  1225. u64 base_offset;
  1226. int rc;
  1227. base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
  1228. + lane * mmio->size;
  1229. write_blk_ctl(nfit_blk, lane, dpa, len, rw);
  1230. while (len) {
  1231. unsigned int c;
  1232. u64 offset;
  1233. if (mmio->num_lines) {
  1234. u32 line_offset;
  1235. offset = to_interleave_offset(base_offset + copied,
  1236. mmio);
  1237. div_u64_rem(offset, mmio->line_size, &line_offset);
  1238. c = min_t(size_t, len, mmio->line_size - line_offset);
  1239. } else {
  1240. offset = base_offset + nfit_blk->bdw_offset;
  1241. c = len;
  1242. }
  1243. if (rw)
  1244. memcpy_to_pmem(mmio->addr.aperture + offset,
  1245. iobuf + copied, c);
  1246. else {
  1247. if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
  1248. mmio_flush_range((void __force *)
  1249. mmio->addr.aperture + offset, c);
  1250. memcpy_from_pmem(iobuf + copied,
  1251. mmio->addr.aperture + offset, c);
  1252. }
  1253. copied += c;
  1254. len -= c;
  1255. }
  1256. if (rw)
  1257. wmb_blk(nfit_blk);
  1258. rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
  1259. return rc;
  1260. }
  1261. static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
  1262. resource_size_t dpa, void *iobuf, u64 len, int rw)
  1263. {
  1264. struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
  1265. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
  1266. struct nd_region *nd_region = nfit_blk->nd_region;
  1267. unsigned int lane, copied = 0;
  1268. int rc = 0;
  1269. lane = nd_region_acquire_lane(nd_region);
  1270. while (len) {
  1271. u64 c = min(len, mmio->size);
  1272. rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
  1273. iobuf + copied, c, rw, lane);
  1274. if (rc)
  1275. break;
  1276. copied += c;
  1277. len -= c;
  1278. }
  1279. nd_region_release_lane(nd_region, lane);
  1280. return rc;
  1281. }
  1282. static void nfit_spa_mapping_release(struct kref *kref)
  1283. {
  1284. struct nfit_spa_mapping *spa_map = to_spa_map(kref);
  1285. struct acpi_nfit_system_address *spa = spa_map->spa;
  1286. struct acpi_nfit_desc *acpi_desc = spa_map->acpi_desc;
  1287. WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
  1288. dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index);
  1289. if (spa_map->type == SPA_MAP_APERTURE)
  1290. memunmap((void __force *)spa_map->addr.aperture);
  1291. else
  1292. iounmap(spa_map->addr.base);
  1293. release_mem_region(spa->address, spa->length);
  1294. list_del(&spa_map->list);
  1295. kfree(spa_map);
  1296. }
  1297. static struct nfit_spa_mapping *find_spa_mapping(
  1298. struct acpi_nfit_desc *acpi_desc,
  1299. struct acpi_nfit_system_address *spa)
  1300. {
  1301. struct nfit_spa_mapping *spa_map;
  1302. WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
  1303. list_for_each_entry(spa_map, &acpi_desc->spa_maps, list)
  1304. if (spa_map->spa == spa)
  1305. return spa_map;
  1306. return NULL;
  1307. }
  1308. static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc,
  1309. struct acpi_nfit_system_address *spa)
  1310. {
  1311. struct nfit_spa_mapping *spa_map;
  1312. mutex_lock(&acpi_desc->spa_map_mutex);
  1313. spa_map = find_spa_mapping(acpi_desc, spa);
  1314. if (spa_map)
  1315. kref_put(&spa_map->kref, nfit_spa_mapping_release);
  1316. mutex_unlock(&acpi_desc->spa_map_mutex);
  1317. }
  1318. static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
  1319. struct acpi_nfit_system_address *spa, enum spa_map_type type)
  1320. {
  1321. resource_size_t start = spa->address;
  1322. resource_size_t n = spa->length;
  1323. struct nfit_spa_mapping *spa_map;
  1324. struct resource *res;
  1325. WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
  1326. spa_map = find_spa_mapping(acpi_desc, spa);
  1327. if (spa_map) {
  1328. kref_get(&spa_map->kref);
  1329. return spa_map->addr.base;
  1330. }
  1331. spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL);
  1332. if (!spa_map)
  1333. return NULL;
  1334. INIT_LIST_HEAD(&spa_map->list);
  1335. spa_map->spa = spa;
  1336. kref_init(&spa_map->kref);
  1337. spa_map->acpi_desc = acpi_desc;
  1338. res = request_mem_region(start, n, dev_name(acpi_desc->dev));
  1339. if (!res)
  1340. goto err_mem;
  1341. spa_map->type = type;
  1342. if (type == SPA_MAP_APERTURE)
  1343. spa_map->addr.aperture = (void __pmem *)memremap(start, n,
  1344. ARCH_MEMREMAP_PMEM);
  1345. else
  1346. spa_map->addr.base = ioremap_nocache(start, n);
  1347. if (!spa_map->addr.base)
  1348. goto err_map;
  1349. list_add_tail(&spa_map->list, &acpi_desc->spa_maps);
  1350. return spa_map->addr.base;
  1351. err_map:
  1352. release_mem_region(start, n);
  1353. err_mem:
  1354. kfree(spa_map);
  1355. return NULL;
  1356. }
  1357. /**
  1358. * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges
  1359. * @nvdimm_bus: NFIT-bus that provided the spa table entry
  1360. * @nfit_spa: spa table to map
  1361. * @type: aperture or control region
  1362. *
  1363. * In the case where block-data-window apertures and
  1364. * dimm-control-regions are interleaved they will end up sharing a
  1365. * single request_mem_region() + ioremap() for the address range. In
  1366. * the style of devm nfit_spa_map() mappings are automatically dropped
  1367. * when all region devices referencing the same mapping are disabled /
  1368. * unbound.
  1369. */
  1370. static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
  1371. struct acpi_nfit_system_address *spa, enum spa_map_type type)
  1372. {
  1373. void __iomem *iomem;
  1374. mutex_lock(&acpi_desc->spa_map_mutex);
  1375. iomem = __nfit_spa_map(acpi_desc, spa, type);
  1376. mutex_unlock(&acpi_desc->spa_map_mutex);
  1377. return iomem;
  1378. }
  1379. static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
  1380. struct acpi_nfit_interleave *idt, u16 interleave_ways)
  1381. {
  1382. if (idt) {
  1383. mmio->num_lines = idt->line_count;
  1384. mmio->line_size = idt->line_size;
  1385. if (interleave_ways == 0)
  1386. return -ENXIO;
  1387. mmio->table_size = mmio->num_lines * interleave_ways
  1388. * mmio->line_size;
  1389. }
  1390. return 0;
  1391. }
  1392. static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
  1393. struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
  1394. {
  1395. struct nd_cmd_dimm_flags flags;
  1396. int rc;
  1397. memset(&flags, 0, sizeof(flags));
  1398. rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
  1399. sizeof(flags), NULL);
  1400. if (rc >= 0 && flags.status == 0)
  1401. nfit_blk->dimm_flags = flags.flags;
  1402. else if (rc == -ENOTTY) {
  1403. /* fall back to a conservative default */
  1404. nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
  1405. rc = 0;
  1406. } else
  1407. rc = -ENXIO;
  1408. return rc;
  1409. }
  1410. static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
  1411. struct device *dev)
  1412. {
  1413. struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
  1414. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  1415. struct nd_blk_region *ndbr = to_nd_blk_region(dev);
  1416. struct nfit_flush *nfit_flush;
  1417. struct nfit_blk_mmio *mmio;
  1418. struct nfit_blk *nfit_blk;
  1419. struct nfit_mem *nfit_mem;
  1420. struct nvdimm *nvdimm;
  1421. int rc;
  1422. nvdimm = nd_blk_region_to_dimm(ndbr);
  1423. nfit_mem = nvdimm_provider_data(nvdimm);
  1424. if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
  1425. dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
  1426. nfit_mem ? "" : " nfit_mem",
  1427. (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
  1428. (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
  1429. return -ENXIO;
  1430. }
  1431. nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
  1432. if (!nfit_blk)
  1433. return -ENOMEM;
  1434. nd_blk_region_set_provider_data(ndbr, nfit_blk);
  1435. nfit_blk->nd_region = to_nd_region(dev);
  1436. /* map block aperture memory */
  1437. nfit_blk->bdw_offset = nfit_mem->bdw->offset;
  1438. mmio = &nfit_blk->mmio[BDW];
  1439. mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw,
  1440. SPA_MAP_APERTURE);
  1441. if (!mmio->addr.base) {
  1442. dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
  1443. nvdimm_name(nvdimm));
  1444. return -ENOMEM;
  1445. }
  1446. mmio->size = nfit_mem->bdw->size;
  1447. mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
  1448. mmio->idt = nfit_mem->idt_bdw;
  1449. mmio->spa = nfit_mem->spa_bdw;
  1450. rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
  1451. nfit_mem->memdev_bdw->interleave_ways);
  1452. if (rc) {
  1453. dev_dbg(dev, "%s: %s failed to init bdw interleave\n",
  1454. __func__, nvdimm_name(nvdimm));
  1455. return rc;
  1456. }
  1457. /* map block control memory */
  1458. nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
  1459. nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
  1460. mmio = &nfit_blk->mmio[DCR];
  1461. mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr,
  1462. SPA_MAP_CONTROL);
  1463. if (!mmio->addr.base) {
  1464. dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
  1465. nvdimm_name(nvdimm));
  1466. return -ENOMEM;
  1467. }
  1468. mmio->size = nfit_mem->dcr->window_size;
  1469. mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
  1470. mmio->idt = nfit_mem->idt_dcr;
  1471. mmio->spa = nfit_mem->spa_dcr;
  1472. rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
  1473. nfit_mem->memdev_dcr->interleave_ways);
  1474. if (rc) {
  1475. dev_dbg(dev, "%s: %s failed to init dcr interleave\n",
  1476. __func__, nvdimm_name(nvdimm));
  1477. return rc;
  1478. }
  1479. rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
  1480. if (rc < 0) {
  1481. dev_dbg(dev, "%s: %s failed get DIMM flags\n",
  1482. __func__, nvdimm_name(nvdimm));
  1483. return rc;
  1484. }
  1485. nfit_flush = nfit_mem->nfit_flush;
  1486. if (nfit_flush && nfit_flush->flush->hint_count != 0) {
  1487. nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev,
  1488. nfit_flush->flush->hint_address[0], 8);
  1489. if (!nfit_blk->nvdimm_flush)
  1490. return -ENOMEM;
  1491. }
  1492. if (!arch_has_wmb_pmem() && !nfit_blk->nvdimm_flush)
  1493. dev_warn(dev, "unable to guarantee persistence of writes\n");
  1494. if (mmio->line_size == 0)
  1495. return 0;
  1496. if ((u32) nfit_blk->cmd_offset % mmio->line_size
  1497. + 8 > mmio->line_size) {
  1498. dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
  1499. return -ENXIO;
  1500. } else if ((u32) nfit_blk->stat_offset % mmio->line_size
  1501. + 8 > mmio->line_size) {
  1502. dev_dbg(dev, "stat_offset crosses interleave boundary\n");
  1503. return -ENXIO;
  1504. }
  1505. return 0;
  1506. }
  1507. static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus,
  1508. struct device *dev)
  1509. {
  1510. struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
  1511. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  1512. struct nd_blk_region *ndbr = to_nd_blk_region(dev);
  1513. struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
  1514. int i;
  1515. if (!nfit_blk)
  1516. return; /* never enabled */
  1517. /* auto-free BLK spa mappings */
  1518. for (i = 0; i < 2; i++) {
  1519. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i];
  1520. if (mmio->addr.base)
  1521. nfit_spa_unmap(acpi_desc, mmio->spa);
  1522. }
  1523. nd_blk_region_set_provider_data(ndbr, NULL);
  1524. /* devm will free nfit_blk */
  1525. }
  1526. static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
  1527. struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
  1528. {
  1529. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  1530. struct acpi_nfit_system_address *spa = nfit_spa->spa;
  1531. int cmd_rc, rc;
  1532. cmd->address = spa->address;
  1533. cmd->length = spa->length;
  1534. rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
  1535. sizeof(*cmd), &cmd_rc);
  1536. if (rc < 0)
  1537. return rc;
  1538. return cmd_rc;
  1539. }
  1540. static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa)
  1541. {
  1542. int rc;
  1543. int cmd_rc;
  1544. struct nd_cmd_ars_start ars_start;
  1545. struct acpi_nfit_system_address *spa = nfit_spa->spa;
  1546. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  1547. memset(&ars_start, 0, sizeof(ars_start));
  1548. ars_start.address = spa->address;
  1549. ars_start.length = spa->length;
  1550. if (nfit_spa_type(spa) == NFIT_SPA_PM)
  1551. ars_start.type = ND_ARS_PERSISTENT;
  1552. else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
  1553. ars_start.type = ND_ARS_VOLATILE;
  1554. else
  1555. return -ENOTTY;
  1556. rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
  1557. sizeof(ars_start), &cmd_rc);
  1558. if (rc < 0)
  1559. return rc;
  1560. return cmd_rc;
  1561. }
  1562. static int ars_continue(struct acpi_nfit_desc *acpi_desc)
  1563. {
  1564. int rc, cmd_rc;
  1565. struct nd_cmd_ars_start ars_start;
  1566. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  1567. struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
  1568. memset(&ars_start, 0, sizeof(ars_start));
  1569. ars_start.address = ars_status->restart_address;
  1570. ars_start.length = ars_status->restart_length;
  1571. ars_start.type = ars_status->type;
  1572. rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
  1573. sizeof(ars_start), &cmd_rc);
  1574. if (rc < 0)
  1575. return rc;
  1576. return cmd_rc;
  1577. }
  1578. static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
  1579. {
  1580. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  1581. struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
  1582. int rc, cmd_rc;
  1583. rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
  1584. acpi_desc->ars_status_size, &cmd_rc);
  1585. if (rc < 0)
  1586. return rc;
  1587. return cmd_rc;
  1588. }
  1589. static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus,
  1590. struct nd_cmd_ars_status *ars_status)
  1591. {
  1592. int rc;
  1593. u32 i;
  1594. for (i = 0; i < ars_status->num_records; i++) {
  1595. rc = nvdimm_bus_add_poison(nvdimm_bus,
  1596. ars_status->records[i].err_address,
  1597. ars_status->records[i].length);
  1598. if (rc)
  1599. return rc;
  1600. }
  1601. return 0;
  1602. }
  1603. static void acpi_nfit_remove_resource(void *data)
  1604. {
  1605. struct resource *res = data;
  1606. remove_resource(res);
  1607. }
  1608. static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
  1609. struct nd_region_desc *ndr_desc)
  1610. {
  1611. struct resource *res, *nd_res = ndr_desc->res;
  1612. int is_pmem, ret;
  1613. /* No operation if the region is already registered as PMEM */
  1614. is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
  1615. IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
  1616. if (is_pmem == REGION_INTERSECTS)
  1617. return 0;
  1618. res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
  1619. if (!res)
  1620. return -ENOMEM;
  1621. res->name = "Persistent Memory";
  1622. res->start = nd_res->start;
  1623. res->end = nd_res->end;
  1624. res->flags = IORESOURCE_MEM;
  1625. res->desc = IORES_DESC_PERSISTENT_MEMORY;
  1626. ret = insert_resource(&iomem_resource, res);
  1627. if (ret)
  1628. return ret;
  1629. ret = devm_add_action(acpi_desc->dev, acpi_nfit_remove_resource, res);
  1630. if (ret) {
  1631. remove_resource(res);
  1632. return ret;
  1633. }
  1634. return 0;
  1635. }
  1636. static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
  1637. struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc,
  1638. struct acpi_nfit_memory_map *memdev,
  1639. struct nfit_spa *nfit_spa)
  1640. {
  1641. struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
  1642. memdev->device_handle);
  1643. struct acpi_nfit_system_address *spa = nfit_spa->spa;
  1644. struct nd_blk_region_desc *ndbr_desc;
  1645. struct nfit_mem *nfit_mem;
  1646. int blk_valid = 0;
  1647. if (!nvdimm) {
  1648. dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
  1649. spa->range_index, memdev->device_handle);
  1650. return -ENODEV;
  1651. }
  1652. nd_mapping->nvdimm = nvdimm;
  1653. switch (nfit_spa_type(spa)) {
  1654. case NFIT_SPA_PM:
  1655. case NFIT_SPA_VOLATILE:
  1656. nd_mapping->start = memdev->address;
  1657. nd_mapping->size = memdev->region_size;
  1658. break;
  1659. case NFIT_SPA_DCR:
  1660. nfit_mem = nvdimm_provider_data(nvdimm);
  1661. if (!nfit_mem || !nfit_mem->bdw) {
  1662. dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
  1663. spa->range_index, nvdimm_name(nvdimm));
  1664. } else {
  1665. nd_mapping->size = nfit_mem->bdw->capacity;
  1666. nd_mapping->start = nfit_mem->bdw->start_address;
  1667. ndr_desc->num_lanes = nfit_mem->bdw->windows;
  1668. blk_valid = 1;
  1669. }
  1670. ndr_desc->nd_mapping = nd_mapping;
  1671. ndr_desc->num_mappings = blk_valid;
  1672. ndbr_desc = to_blk_region_desc(ndr_desc);
  1673. ndbr_desc->enable = acpi_nfit_blk_region_enable;
  1674. ndbr_desc->disable = acpi_nfit_blk_region_disable;
  1675. ndbr_desc->do_io = acpi_desc->blk_do_io;
  1676. nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
  1677. ndr_desc);
  1678. if (!nfit_spa->nd_region)
  1679. return -ENOMEM;
  1680. break;
  1681. }
  1682. return 0;
  1683. }
  1684. static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
  1685. struct nfit_spa *nfit_spa)
  1686. {
  1687. static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS];
  1688. struct acpi_nfit_system_address *spa = nfit_spa->spa;
  1689. struct nd_blk_region_desc ndbr_desc;
  1690. struct nd_region_desc *ndr_desc;
  1691. struct nfit_memdev *nfit_memdev;
  1692. struct nvdimm_bus *nvdimm_bus;
  1693. struct resource res;
  1694. int count = 0, rc;
  1695. if (nfit_spa->nd_region)
  1696. return 0;
  1697. if (spa->range_index == 0) {
  1698. dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n",
  1699. __func__);
  1700. return 0;
  1701. }
  1702. memset(&res, 0, sizeof(res));
  1703. memset(&nd_mappings, 0, sizeof(nd_mappings));
  1704. memset(&ndbr_desc, 0, sizeof(ndbr_desc));
  1705. res.start = spa->address;
  1706. res.end = res.start + spa->length - 1;
  1707. ndr_desc = &ndbr_desc.ndr_desc;
  1708. ndr_desc->res = &res;
  1709. ndr_desc->provider_data = nfit_spa;
  1710. ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
  1711. if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
  1712. ndr_desc->numa_node = acpi_map_pxm_to_online_node(
  1713. spa->proximity_domain);
  1714. else
  1715. ndr_desc->numa_node = NUMA_NO_NODE;
  1716. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
  1717. struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
  1718. struct nd_mapping *nd_mapping;
  1719. if (memdev->range_index != spa->range_index)
  1720. continue;
  1721. if (count >= ND_MAX_MAPPINGS) {
  1722. dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
  1723. spa->range_index, ND_MAX_MAPPINGS);
  1724. return -ENXIO;
  1725. }
  1726. nd_mapping = &nd_mappings[count++];
  1727. rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc,
  1728. memdev, nfit_spa);
  1729. if (rc)
  1730. goto out;
  1731. }
  1732. ndr_desc->nd_mapping = nd_mappings;
  1733. ndr_desc->num_mappings = count;
  1734. rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
  1735. if (rc)
  1736. goto out;
  1737. nvdimm_bus = acpi_desc->nvdimm_bus;
  1738. if (nfit_spa_type(spa) == NFIT_SPA_PM) {
  1739. rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
  1740. if (rc) {
  1741. dev_warn(acpi_desc->dev,
  1742. "failed to insert pmem resource to iomem: %d\n",
  1743. rc);
  1744. goto out;
  1745. }
  1746. nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
  1747. ndr_desc);
  1748. if (!nfit_spa->nd_region)
  1749. rc = -ENOMEM;
  1750. } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) {
  1751. nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
  1752. ndr_desc);
  1753. if (!nfit_spa->nd_region)
  1754. rc = -ENOMEM;
  1755. }
  1756. out:
  1757. if (rc)
  1758. dev_err(acpi_desc->dev, "failed to register spa range %d\n",
  1759. nfit_spa->spa->range_index);
  1760. return rc;
  1761. }
  1762. static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc,
  1763. u32 max_ars)
  1764. {
  1765. struct device *dev = acpi_desc->dev;
  1766. struct nd_cmd_ars_status *ars_status;
  1767. if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) {
  1768. memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size);
  1769. return 0;
  1770. }
  1771. if (acpi_desc->ars_status)
  1772. devm_kfree(dev, acpi_desc->ars_status);
  1773. acpi_desc->ars_status = NULL;
  1774. ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL);
  1775. if (!ars_status)
  1776. return -ENOMEM;
  1777. acpi_desc->ars_status = ars_status;
  1778. acpi_desc->ars_status_size = max_ars;
  1779. return 0;
  1780. }
  1781. static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc,
  1782. struct nfit_spa *nfit_spa)
  1783. {
  1784. struct acpi_nfit_system_address *spa = nfit_spa->spa;
  1785. int rc;
  1786. if (!nfit_spa->max_ars) {
  1787. struct nd_cmd_ars_cap ars_cap;
  1788. memset(&ars_cap, 0, sizeof(ars_cap));
  1789. rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
  1790. if (rc < 0)
  1791. return rc;
  1792. nfit_spa->max_ars = ars_cap.max_ars_out;
  1793. nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
  1794. /* check that the supported scrub types match the spa type */
  1795. if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE &&
  1796. ((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0)
  1797. return -ENOTTY;
  1798. else if (nfit_spa_type(spa) == NFIT_SPA_PM &&
  1799. ((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0)
  1800. return -ENOTTY;
  1801. }
  1802. if (ars_status_alloc(acpi_desc, nfit_spa->max_ars))
  1803. return -ENOMEM;
  1804. rc = ars_get_status(acpi_desc);
  1805. if (rc < 0 && rc != -ENOSPC)
  1806. return rc;
  1807. if (ars_status_process_records(acpi_desc->nvdimm_bus,
  1808. acpi_desc->ars_status))
  1809. return -ENOMEM;
  1810. return 0;
  1811. }
  1812. static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc,
  1813. struct nfit_spa *nfit_spa)
  1814. {
  1815. struct acpi_nfit_system_address *spa = nfit_spa->spa;
  1816. unsigned int overflow_retry = scrub_overflow_abort;
  1817. u64 init_ars_start = 0, init_ars_len = 0;
  1818. struct device *dev = acpi_desc->dev;
  1819. unsigned int tmo = scrub_timeout;
  1820. int rc;
  1821. if (nfit_spa->ars_done || !nfit_spa->nd_region)
  1822. return;
  1823. rc = ars_start(acpi_desc, nfit_spa);
  1824. /*
  1825. * If we timed out the initial scan we'll still be busy here,
  1826. * and will wait another timeout before giving up permanently.
  1827. */
  1828. if (rc < 0 && rc != -EBUSY)
  1829. return;
  1830. do {
  1831. u64 ars_start, ars_len;
  1832. if (acpi_desc->cancel)
  1833. break;
  1834. rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
  1835. if (rc == -ENOTTY)
  1836. break;
  1837. if (rc == -EBUSY && !tmo) {
  1838. dev_warn(dev, "range %d ars timeout, aborting\n",
  1839. spa->range_index);
  1840. break;
  1841. }
  1842. if (rc == -EBUSY) {
  1843. /*
  1844. * Note, entries may be appended to the list
  1845. * while the lock is dropped, but the workqueue
  1846. * being active prevents entries being deleted /
  1847. * freed.
  1848. */
  1849. mutex_unlock(&acpi_desc->init_mutex);
  1850. ssleep(1);
  1851. tmo--;
  1852. mutex_lock(&acpi_desc->init_mutex);
  1853. continue;
  1854. }
  1855. /* we got some results, but there are more pending... */
  1856. if (rc == -ENOSPC && overflow_retry--) {
  1857. if (!init_ars_len) {
  1858. init_ars_len = acpi_desc->ars_status->length;
  1859. init_ars_start = acpi_desc->ars_status->address;
  1860. }
  1861. rc = ars_continue(acpi_desc);
  1862. }
  1863. if (rc < 0) {
  1864. dev_warn(dev, "range %d ars continuation failed\n",
  1865. spa->range_index);
  1866. break;
  1867. }
  1868. if (init_ars_len) {
  1869. ars_start = init_ars_start;
  1870. ars_len = init_ars_len;
  1871. } else {
  1872. ars_start = acpi_desc->ars_status->address;
  1873. ars_len = acpi_desc->ars_status->length;
  1874. }
  1875. dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n",
  1876. spa->range_index, ars_start, ars_len);
  1877. /* notify the region about new poison entries */
  1878. nvdimm_region_notify(nfit_spa->nd_region,
  1879. NVDIMM_REVALIDATE_POISON);
  1880. break;
  1881. } while (1);
  1882. }
  1883. static void acpi_nfit_scrub(struct work_struct *work)
  1884. {
  1885. struct device *dev;
  1886. u64 init_scrub_length = 0;
  1887. struct nfit_spa *nfit_spa;
  1888. u64 init_scrub_address = 0;
  1889. bool init_ars_done = false;
  1890. struct acpi_nfit_desc *acpi_desc;
  1891. unsigned int tmo = scrub_timeout;
  1892. unsigned int overflow_retry = scrub_overflow_abort;
  1893. acpi_desc = container_of(work, typeof(*acpi_desc), work);
  1894. dev = acpi_desc->dev;
  1895. /*
  1896. * We scrub in 2 phases. The first phase waits for any platform
  1897. * firmware initiated scrubs to complete and then we go search for the
  1898. * affected spa regions to mark them scanned. In the second phase we
  1899. * initiate a directed scrub for every range that was not scrubbed in
  1900. * phase 1.
  1901. */
  1902. /* process platform firmware initiated scrubs */
  1903. retry:
  1904. mutex_lock(&acpi_desc->init_mutex);
  1905. list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
  1906. struct nd_cmd_ars_status *ars_status;
  1907. struct acpi_nfit_system_address *spa;
  1908. u64 ars_start, ars_len;
  1909. int rc;
  1910. if (acpi_desc->cancel)
  1911. break;
  1912. if (nfit_spa->nd_region)
  1913. continue;
  1914. if (init_ars_done) {
  1915. /*
  1916. * No need to re-query, we're now just
  1917. * reconciling all the ranges covered by the
  1918. * initial scrub
  1919. */
  1920. rc = 0;
  1921. } else
  1922. rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
  1923. if (rc == -ENOTTY) {
  1924. /* no ars capability, just register spa and move on */
  1925. acpi_nfit_register_region(acpi_desc, nfit_spa);
  1926. continue;
  1927. }
  1928. if (rc == -EBUSY && !tmo) {
  1929. /* fallthrough to directed scrub in phase 2 */
  1930. dev_warn(dev, "timeout awaiting ars results, continuing...\n");
  1931. break;
  1932. } else if (rc == -EBUSY) {
  1933. mutex_unlock(&acpi_desc->init_mutex);
  1934. ssleep(1);
  1935. tmo--;
  1936. goto retry;
  1937. }
  1938. /* we got some results, but there are more pending... */
  1939. if (rc == -ENOSPC && overflow_retry--) {
  1940. ars_status = acpi_desc->ars_status;
  1941. /*
  1942. * Record the original scrub range, so that we
  1943. * can recall all the ranges impacted by the
  1944. * initial scrub.
  1945. */
  1946. if (!init_scrub_length) {
  1947. init_scrub_length = ars_status->length;
  1948. init_scrub_address = ars_status->address;
  1949. }
  1950. rc = ars_continue(acpi_desc);
  1951. if (rc == 0) {
  1952. mutex_unlock(&acpi_desc->init_mutex);
  1953. goto retry;
  1954. }
  1955. }
  1956. if (rc < 0) {
  1957. /*
  1958. * Initial scrub failed, we'll give it one more
  1959. * try below...
  1960. */
  1961. break;
  1962. }
  1963. /* We got some final results, record completed ranges */
  1964. ars_status = acpi_desc->ars_status;
  1965. if (init_scrub_length) {
  1966. ars_start = init_scrub_address;
  1967. ars_len = ars_start + init_scrub_length;
  1968. } else {
  1969. ars_start = ars_status->address;
  1970. ars_len = ars_status->length;
  1971. }
  1972. spa = nfit_spa->spa;
  1973. if (!init_ars_done) {
  1974. init_ars_done = true;
  1975. dev_dbg(dev, "init scrub %#llx + %#llx complete\n",
  1976. ars_start, ars_len);
  1977. }
  1978. if (ars_start <= spa->address && ars_start + ars_len
  1979. >= spa->address + spa->length)
  1980. acpi_nfit_register_region(acpi_desc, nfit_spa);
  1981. }
  1982. /*
  1983. * For all the ranges not covered by an initial scrub we still
  1984. * want to see if there are errors, but it's ok to discover them
  1985. * asynchronously.
  1986. */
  1987. list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
  1988. /*
  1989. * Flag all the ranges that still need scrubbing, but
  1990. * register them now to make data available.
  1991. */
  1992. if (nfit_spa->nd_region)
  1993. nfit_spa->ars_done = 1;
  1994. else
  1995. acpi_nfit_register_region(acpi_desc, nfit_spa);
  1996. }
  1997. list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
  1998. acpi_nfit_async_scrub(acpi_desc, nfit_spa);
  1999. mutex_unlock(&acpi_desc->init_mutex);
  2000. }
  2001. static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
  2002. {
  2003. struct nfit_spa *nfit_spa;
  2004. int rc;
  2005. list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
  2006. if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) {
  2007. /* BLK regions don't need to wait for ars results */
  2008. rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
  2009. if (rc)
  2010. return rc;
  2011. }
  2012. queue_work(nfit_wq, &acpi_desc->work);
  2013. return 0;
  2014. }
  2015. static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
  2016. struct nfit_table_prev *prev)
  2017. {
  2018. struct device *dev = acpi_desc->dev;
  2019. if (!list_empty(&prev->spas) ||
  2020. !list_empty(&prev->memdevs) ||
  2021. !list_empty(&prev->dcrs) ||
  2022. !list_empty(&prev->bdws) ||
  2023. !list_empty(&prev->idts) ||
  2024. !list_empty(&prev->flushes)) {
  2025. dev_err(dev, "new nfit deletes entries (unsupported)\n");
  2026. return -ENXIO;
  2027. }
  2028. return 0;
  2029. }
  2030. int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
  2031. {
  2032. struct device *dev = acpi_desc->dev;
  2033. struct nfit_table_prev prev;
  2034. const void *end;
  2035. u8 *data;
  2036. int rc;
  2037. mutex_lock(&acpi_desc->init_mutex);
  2038. INIT_LIST_HEAD(&prev.spas);
  2039. INIT_LIST_HEAD(&prev.memdevs);
  2040. INIT_LIST_HEAD(&prev.dcrs);
  2041. INIT_LIST_HEAD(&prev.bdws);
  2042. INIT_LIST_HEAD(&prev.idts);
  2043. INIT_LIST_HEAD(&prev.flushes);
  2044. list_cut_position(&prev.spas, &acpi_desc->spas,
  2045. acpi_desc->spas.prev);
  2046. list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
  2047. acpi_desc->memdevs.prev);
  2048. list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
  2049. acpi_desc->dcrs.prev);
  2050. list_cut_position(&prev.bdws, &acpi_desc->bdws,
  2051. acpi_desc->bdws.prev);
  2052. list_cut_position(&prev.idts, &acpi_desc->idts,
  2053. acpi_desc->idts.prev);
  2054. list_cut_position(&prev.flushes, &acpi_desc->flushes,
  2055. acpi_desc->flushes.prev);
  2056. data = (u8 *) acpi_desc->nfit;
  2057. end = data + sz;
  2058. while (!IS_ERR_OR_NULL(data))
  2059. data = add_table(acpi_desc, &prev, data, end);
  2060. if (IS_ERR(data)) {
  2061. dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__,
  2062. PTR_ERR(data));
  2063. rc = PTR_ERR(data);
  2064. goto out_unlock;
  2065. }
  2066. rc = acpi_nfit_check_deletions(acpi_desc, &prev);
  2067. if (rc)
  2068. goto out_unlock;
  2069. if (nfit_mem_init(acpi_desc) != 0) {
  2070. rc = -ENOMEM;
  2071. goto out_unlock;
  2072. }
  2073. acpi_nfit_init_dsms(acpi_desc);
  2074. rc = acpi_nfit_register_dimms(acpi_desc);
  2075. if (rc)
  2076. goto out_unlock;
  2077. rc = acpi_nfit_register_regions(acpi_desc);
  2078. out_unlock:
  2079. mutex_unlock(&acpi_desc->init_mutex);
  2080. return rc;
  2081. }
  2082. EXPORT_SYMBOL_GPL(acpi_nfit_init);
  2083. struct acpi_nfit_flush_work {
  2084. struct work_struct work;
  2085. struct completion cmp;
  2086. };
  2087. static void flush_probe(struct work_struct *work)
  2088. {
  2089. struct acpi_nfit_flush_work *flush;
  2090. flush = container_of(work, typeof(*flush), work);
  2091. complete(&flush->cmp);
  2092. }
  2093. static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
  2094. {
  2095. struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
  2096. struct device *dev = acpi_desc->dev;
  2097. struct acpi_nfit_flush_work flush;
  2098. /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
  2099. device_lock(dev);
  2100. device_unlock(dev);
  2101. /*
  2102. * Scrub work could take 10s of seconds, userspace may give up so we
  2103. * need to be interruptible while waiting.
  2104. */
  2105. INIT_WORK_ONSTACK(&flush.work, flush_probe);
  2106. COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
  2107. queue_work(nfit_wq, &flush.work);
  2108. return wait_for_completion_interruptible(&flush.cmp);
  2109. }
  2110. static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
  2111. struct nvdimm *nvdimm, unsigned int cmd)
  2112. {
  2113. struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
  2114. if (nvdimm)
  2115. return 0;
  2116. if (cmd != ND_CMD_ARS_START)
  2117. return 0;
  2118. /*
  2119. * The kernel and userspace may race to initiate a scrub, but
  2120. * the scrub thread is prepared to lose that initial race. It
  2121. * just needs guarantees that any ars it initiates are not
  2122. * interrupted by any intervening start reqeusts from userspace.
  2123. */
  2124. if (work_busy(&acpi_desc->work))
  2125. return -EBUSY;
  2126. return 0;
  2127. }
  2128. void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
  2129. {
  2130. struct nvdimm_bus_descriptor *nd_desc;
  2131. dev_set_drvdata(dev, acpi_desc);
  2132. acpi_desc->dev = dev;
  2133. acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
  2134. nd_desc = &acpi_desc->nd_desc;
  2135. nd_desc->provider_name = "ACPI.NFIT";
  2136. nd_desc->ndctl = acpi_nfit_ctl;
  2137. nd_desc->flush_probe = acpi_nfit_flush_probe;
  2138. nd_desc->clear_to_send = acpi_nfit_clear_to_send;
  2139. nd_desc->attr_groups = acpi_nfit_attribute_groups;
  2140. INIT_LIST_HEAD(&acpi_desc->spa_maps);
  2141. INIT_LIST_HEAD(&acpi_desc->spas);
  2142. INIT_LIST_HEAD(&acpi_desc->dcrs);
  2143. INIT_LIST_HEAD(&acpi_desc->bdws);
  2144. INIT_LIST_HEAD(&acpi_desc->idts);
  2145. INIT_LIST_HEAD(&acpi_desc->flushes);
  2146. INIT_LIST_HEAD(&acpi_desc->memdevs);
  2147. INIT_LIST_HEAD(&acpi_desc->dimms);
  2148. mutex_init(&acpi_desc->spa_map_mutex);
  2149. mutex_init(&acpi_desc->init_mutex);
  2150. INIT_WORK(&acpi_desc->work, acpi_nfit_scrub);
  2151. }
  2152. EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
  2153. static int acpi_nfit_add(struct acpi_device *adev)
  2154. {
  2155. struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
  2156. struct acpi_nfit_desc *acpi_desc;
  2157. struct device *dev = &adev->dev;
  2158. struct acpi_table_header *tbl;
  2159. acpi_status status = AE_OK;
  2160. acpi_size sz;
  2161. int rc;
  2162. status = acpi_get_table_with_size(ACPI_SIG_NFIT, 0, &tbl, &sz);
  2163. if (ACPI_FAILURE(status)) {
  2164. /* This is ok, we could have an nvdimm hotplugged later */
  2165. dev_dbg(dev, "failed to find NFIT at startup\n");
  2166. return 0;
  2167. }
  2168. acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
  2169. if (!acpi_desc)
  2170. return -ENOMEM;
  2171. acpi_nfit_desc_init(acpi_desc, &adev->dev);
  2172. acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc);
  2173. if (!acpi_desc->nvdimm_bus)
  2174. return -ENOMEM;
  2175. /*
  2176. * Save the acpi header for later and then skip it,
  2177. * making nfit point to the first nfit table header.
  2178. */
  2179. acpi_desc->acpi_header = *tbl;
  2180. acpi_desc->nfit = (void *) tbl + sizeof(struct acpi_table_nfit);
  2181. sz -= sizeof(struct acpi_table_nfit);
  2182. /* Evaluate _FIT and override with that if present */
  2183. status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
  2184. if (ACPI_SUCCESS(status) && buf.length > 0) {
  2185. union acpi_object *obj;
  2186. /*
  2187. * Adjust for the acpi_object header of the _FIT
  2188. */
  2189. obj = buf.pointer;
  2190. if (obj->type == ACPI_TYPE_BUFFER) {
  2191. acpi_desc->nfit =
  2192. (struct acpi_nfit_header *)obj->buffer.pointer;
  2193. sz = obj->buffer.length;
  2194. } else
  2195. dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
  2196. __func__, (int) obj->type);
  2197. }
  2198. rc = acpi_nfit_init(acpi_desc, sz);
  2199. if (rc) {
  2200. nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
  2201. return rc;
  2202. }
  2203. return 0;
  2204. }
  2205. static int acpi_nfit_remove(struct acpi_device *adev)
  2206. {
  2207. struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
  2208. acpi_desc->cancel = 1;
  2209. flush_workqueue(nfit_wq);
  2210. nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
  2211. return 0;
  2212. }
  2213. static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
  2214. {
  2215. struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
  2216. struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
  2217. struct acpi_nfit_header *nfit_saved;
  2218. union acpi_object *obj;
  2219. struct device *dev = &adev->dev;
  2220. acpi_status status;
  2221. int ret;
  2222. dev_dbg(dev, "%s: event: %d\n", __func__, event);
  2223. device_lock(dev);
  2224. if (!dev->driver) {
  2225. /* dev->driver may be null if we're being removed */
  2226. dev_dbg(dev, "%s: no driver found for dev\n", __func__);
  2227. goto out_unlock;
  2228. }
  2229. if (!acpi_desc) {
  2230. acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
  2231. if (!acpi_desc)
  2232. goto out_unlock;
  2233. acpi_nfit_desc_init(acpi_desc, &adev->dev);
  2234. acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc);
  2235. if (!acpi_desc->nvdimm_bus)
  2236. goto out_unlock;
  2237. } else {
  2238. /*
  2239. * Finish previous registration before considering new
  2240. * regions.
  2241. */
  2242. flush_workqueue(nfit_wq);
  2243. }
  2244. /* Evaluate _FIT */
  2245. status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
  2246. if (ACPI_FAILURE(status)) {
  2247. dev_err(dev, "failed to evaluate _FIT\n");
  2248. goto out_unlock;
  2249. }
  2250. nfit_saved = acpi_desc->nfit;
  2251. obj = buf.pointer;
  2252. if (obj->type == ACPI_TYPE_BUFFER) {
  2253. acpi_desc->nfit =
  2254. (struct acpi_nfit_header *)obj->buffer.pointer;
  2255. ret = acpi_nfit_init(acpi_desc, obj->buffer.length);
  2256. if (ret) {
  2257. /* Merge failed, restore old nfit, and exit */
  2258. acpi_desc->nfit = nfit_saved;
  2259. dev_err(dev, "failed to merge updated NFIT\n");
  2260. }
  2261. } else {
  2262. /* Bad _FIT, restore old nfit */
  2263. dev_err(dev, "Invalid _FIT\n");
  2264. }
  2265. kfree(buf.pointer);
  2266. out_unlock:
  2267. device_unlock(dev);
  2268. }
  2269. static const struct acpi_device_id acpi_nfit_ids[] = {
  2270. { "ACPI0012", 0 },
  2271. { "", 0 },
  2272. };
  2273. MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
  2274. static struct acpi_driver acpi_nfit_driver = {
  2275. .name = KBUILD_MODNAME,
  2276. .ids = acpi_nfit_ids,
  2277. .ops = {
  2278. .add = acpi_nfit_add,
  2279. .remove = acpi_nfit_remove,
  2280. .notify = acpi_nfit_notify,
  2281. },
  2282. };
  2283. static __init int nfit_init(void)
  2284. {
  2285. BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
  2286. BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
  2287. BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
  2288. BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
  2289. BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
  2290. BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
  2291. BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
  2292. acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]);
  2293. acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]);
  2294. acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]);
  2295. acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]);
  2296. acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]);
  2297. acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]);
  2298. acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]);
  2299. acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]);
  2300. acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
  2301. acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
  2302. acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1, nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
  2303. acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2, nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
  2304. nfit_wq = create_singlethread_workqueue("nfit");
  2305. if (!nfit_wq)
  2306. return -ENOMEM;
  2307. return acpi_bus_register_driver(&acpi_nfit_driver);
  2308. }
  2309. static __exit void nfit_exit(void)
  2310. {
  2311. acpi_bus_unregister_driver(&acpi_nfit_driver);
  2312. destroy_workqueue(nfit_wq);
  2313. }
  2314. module_init(nfit_init);
  2315. module_exit(nfit_exit);
  2316. MODULE_LICENSE("GPL v2");
  2317. MODULE_AUTHOR("Intel Corporation");