nfit.c 72 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713
  1. /*
  2. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #include <linux/list_sort.h>
  14. #include <linux/libnvdimm.h>
  15. #include <linux/module.h>
  16. #include <linux/mutex.h>
  17. #include <linux/ndctl.h>
  18. #include <linux/delay.h>
  19. #include <linux/list.h>
  20. #include <linux/acpi.h>
  21. #include <linux/sort.h>
  22. #include <linux/pmem.h>
  23. #include <linux/io.h>
  24. #include <linux/nd.h>
  25. #include <asm/cacheflush.h>
  26. #include "nfit.h"
  27. /*
  28. * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
  29. * irrelevant.
  30. */
  31. #include <linux/io-64-nonatomic-hi-lo.h>
  32. static bool force_enable_dimms;
  33. module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
  34. MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
  35. static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT;
  36. module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR);
  37. MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds");
  38. /* after three payloads of overflow, it's dead jim */
  39. static unsigned int scrub_overflow_abort = 3;
  40. module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR);
  41. MODULE_PARM_DESC(scrub_overflow_abort,
  42. "Number of times we overflow ARS results before abort");
  43. static bool disable_vendor_specific;
  44. module_param(disable_vendor_specific, bool, S_IRUGO);
  45. MODULE_PARM_DESC(disable_vendor_specific,
  46. "Limit commands to the publicly specified set\n");
  47. static struct workqueue_struct *nfit_wq;
  48. struct nfit_table_prev {
  49. struct list_head spas;
  50. struct list_head memdevs;
  51. struct list_head dcrs;
  52. struct list_head bdws;
  53. struct list_head idts;
  54. struct list_head flushes;
  55. };
  56. static u8 nfit_uuid[NFIT_UUID_MAX][16];
  57. const u8 *to_nfit_uuid(enum nfit_uuids id)
  58. {
  59. return nfit_uuid[id];
  60. }
  61. EXPORT_SYMBOL(to_nfit_uuid);
  62. static struct acpi_nfit_desc *to_acpi_nfit_desc(
  63. struct nvdimm_bus_descriptor *nd_desc)
  64. {
  65. return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
  66. }
  67. static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
  68. {
  69. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  70. /*
  71. * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
  72. * acpi_device.
  73. */
  74. if (!nd_desc->provider_name
  75. || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
  76. return NULL;
  77. return to_acpi_device(acpi_desc->dev);
  78. }
  79. static int xlat_status(void *buf, unsigned int cmd)
  80. {
  81. struct nd_cmd_clear_error *clear_err;
  82. struct nd_cmd_ars_status *ars_status;
  83. struct nd_cmd_ars_start *ars_start;
  84. struct nd_cmd_ars_cap *ars_cap;
  85. u16 flags;
  86. switch (cmd) {
  87. case ND_CMD_ARS_CAP:
  88. ars_cap = buf;
  89. if ((ars_cap->status & 0xffff) == NFIT_ARS_CAP_NONE)
  90. return -ENOTTY;
  91. /* Command failed */
  92. if (ars_cap->status & 0xffff)
  93. return -EIO;
  94. /* No supported scan types for this range */
  95. flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
  96. if ((ars_cap->status >> 16 & flags) == 0)
  97. return -ENOTTY;
  98. break;
  99. case ND_CMD_ARS_START:
  100. ars_start = buf;
  101. /* ARS is in progress */
  102. if ((ars_start->status & 0xffff) == NFIT_ARS_START_BUSY)
  103. return -EBUSY;
  104. /* Command failed */
  105. if (ars_start->status & 0xffff)
  106. return -EIO;
  107. break;
  108. case ND_CMD_ARS_STATUS:
  109. ars_status = buf;
  110. /* Command failed */
  111. if (ars_status->status & 0xffff)
  112. return -EIO;
  113. /* Check extended status (Upper two bytes) */
  114. if (ars_status->status == NFIT_ARS_STATUS_DONE)
  115. return 0;
  116. /* ARS is in progress */
  117. if (ars_status->status == NFIT_ARS_STATUS_BUSY)
  118. return -EBUSY;
  119. /* No ARS performed for the current boot */
  120. if (ars_status->status == NFIT_ARS_STATUS_NONE)
  121. return -EAGAIN;
  122. /*
  123. * ARS interrupted, either we overflowed or some other
  124. * agent wants the scan to stop. If we didn't overflow
  125. * then just continue with the returned results.
  126. */
  127. if (ars_status->status == NFIT_ARS_STATUS_INTR) {
  128. if (ars_status->flags & NFIT_ARS_F_OVERFLOW)
  129. return -ENOSPC;
  130. return 0;
  131. }
  132. /* Unknown status */
  133. if (ars_status->status >> 16)
  134. return -EIO;
  135. break;
  136. case ND_CMD_CLEAR_ERROR:
  137. clear_err = buf;
  138. if (clear_err->status & 0xffff)
  139. return -EIO;
  140. if (!clear_err->cleared)
  141. return -EIO;
  142. if (clear_err->length > clear_err->cleared)
  143. return clear_err->cleared;
  144. break;
  145. default:
  146. break;
  147. }
  148. return 0;
  149. }
  150. static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
  151. struct nvdimm *nvdimm, unsigned int cmd, void *buf,
  152. unsigned int buf_len, int *cmd_rc)
  153. {
  154. struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
  155. union acpi_object in_obj, in_buf, *out_obj;
  156. const struct nd_cmd_desc *desc = NULL;
  157. struct device *dev = acpi_desc->dev;
  158. struct nd_cmd_pkg *call_pkg = NULL;
  159. const char *cmd_name, *dimm_name;
  160. unsigned long cmd_mask, dsm_mask;
  161. acpi_handle handle;
  162. unsigned int func;
  163. const u8 *uuid;
  164. u32 offset;
  165. int rc, i;
  166. func = cmd;
  167. if (cmd == ND_CMD_CALL) {
  168. call_pkg = buf;
  169. func = call_pkg->nd_command;
  170. }
  171. if (nvdimm) {
  172. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  173. struct acpi_device *adev = nfit_mem->adev;
  174. if (!adev)
  175. return -ENOTTY;
  176. if (call_pkg && nfit_mem->family != call_pkg->nd_family)
  177. return -ENOTTY;
  178. dimm_name = nvdimm_name(nvdimm);
  179. cmd_name = nvdimm_cmd_name(cmd);
  180. cmd_mask = nvdimm_cmd_mask(nvdimm);
  181. dsm_mask = nfit_mem->dsm_mask;
  182. desc = nd_cmd_dimm_desc(cmd);
  183. uuid = to_nfit_uuid(nfit_mem->family);
  184. handle = adev->handle;
  185. } else {
  186. struct acpi_device *adev = to_acpi_dev(acpi_desc);
  187. cmd_name = nvdimm_bus_cmd_name(cmd);
  188. cmd_mask = nd_desc->cmd_mask;
  189. dsm_mask = cmd_mask;
  190. desc = nd_cmd_bus_desc(cmd);
  191. uuid = to_nfit_uuid(NFIT_DEV_BUS);
  192. handle = adev->handle;
  193. dimm_name = "bus";
  194. }
  195. if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
  196. return -ENOTTY;
  197. if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
  198. return -ENOTTY;
  199. in_obj.type = ACPI_TYPE_PACKAGE;
  200. in_obj.package.count = 1;
  201. in_obj.package.elements = &in_buf;
  202. in_buf.type = ACPI_TYPE_BUFFER;
  203. in_buf.buffer.pointer = buf;
  204. in_buf.buffer.length = 0;
  205. /* libnvdimm has already validated the input envelope */
  206. for (i = 0; i < desc->in_num; i++)
  207. in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
  208. i, buf);
  209. if (call_pkg) {
  210. /* skip over package wrapper */
  211. in_buf.buffer.pointer = (void *) &call_pkg->nd_payload;
  212. in_buf.buffer.length = call_pkg->nd_size_in;
  213. }
  214. if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
  215. dev_dbg(dev, "%s:%s cmd: %d: func: %d input length: %d\n",
  216. __func__, dimm_name, cmd, func,
  217. in_buf.buffer.length);
  218. print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4,
  219. in_buf.buffer.pointer,
  220. min_t(u32, 256, in_buf.buffer.length), true);
  221. }
  222. out_obj = acpi_evaluate_dsm(handle, uuid, 1, func, &in_obj);
  223. if (!out_obj) {
  224. dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
  225. cmd_name);
  226. return -EINVAL;
  227. }
  228. if (call_pkg) {
  229. call_pkg->nd_fw_size = out_obj->buffer.length;
  230. memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
  231. out_obj->buffer.pointer,
  232. min(call_pkg->nd_fw_size, call_pkg->nd_size_out));
  233. ACPI_FREE(out_obj);
  234. /*
  235. * Need to support FW function w/o known size in advance.
  236. * Caller can determine required size based upon nd_fw_size.
  237. * If we return an error (like elsewhere) then caller wouldn't
  238. * be able to rely upon data returned to make calculation.
  239. */
  240. return 0;
  241. }
  242. if (out_obj->package.type != ACPI_TYPE_BUFFER) {
  243. dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n",
  244. __func__, dimm_name, cmd_name, out_obj->type);
  245. rc = -EINVAL;
  246. goto out;
  247. }
  248. if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
  249. dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__,
  250. dimm_name, cmd_name, out_obj->buffer.length);
  251. print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
  252. 4, out_obj->buffer.pointer, min_t(u32, 128,
  253. out_obj->buffer.length), true);
  254. }
  255. for (i = 0, offset = 0; i < desc->out_num; i++) {
  256. u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
  257. (u32 *) out_obj->buffer.pointer);
  258. if (offset + out_size > out_obj->buffer.length) {
  259. dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n",
  260. __func__, dimm_name, cmd_name, i);
  261. break;
  262. }
  263. if (in_buf.buffer.length + offset + out_size > buf_len) {
  264. dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n",
  265. __func__, dimm_name, cmd_name, i);
  266. rc = -ENXIO;
  267. goto out;
  268. }
  269. memcpy(buf + in_buf.buffer.length + offset,
  270. out_obj->buffer.pointer + offset, out_size);
  271. offset += out_size;
  272. }
  273. if (offset + in_buf.buffer.length < buf_len) {
  274. if (i >= 1) {
  275. /*
  276. * status valid, return the number of bytes left
  277. * unfilled in the output buffer
  278. */
  279. rc = buf_len - offset - in_buf.buffer.length;
  280. if (cmd_rc)
  281. *cmd_rc = xlat_status(buf, cmd);
  282. } else {
  283. dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
  284. __func__, dimm_name, cmd_name, buf_len,
  285. offset);
  286. rc = -ENXIO;
  287. }
  288. } else {
  289. rc = 0;
  290. if (cmd_rc)
  291. *cmd_rc = xlat_status(buf, cmd);
  292. }
  293. out:
  294. ACPI_FREE(out_obj);
  295. return rc;
  296. }
  297. static const char *spa_type_name(u16 type)
  298. {
  299. static const char *to_name[] = {
  300. [NFIT_SPA_VOLATILE] = "volatile",
  301. [NFIT_SPA_PM] = "pmem",
  302. [NFIT_SPA_DCR] = "dimm-control-region",
  303. [NFIT_SPA_BDW] = "block-data-window",
  304. [NFIT_SPA_VDISK] = "volatile-disk",
  305. [NFIT_SPA_VCD] = "volatile-cd",
  306. [NFIT_SPA_PDISK] = "persistent-disk",
  307. [NFIT_SPA_PCD] = "persistent-cd",
  308. };
  309. if (type > NFIT_SPA_PCD)
  310. return "unknown";
  311. return to_name[type];
  312. }
  313. static int nfit_spa_type(struct acpi_nfit_system_address *spa)
  314. {
  315. int i;
  316. for (i = 0; i < NFIT_UUID_MAX; i++)
  317. if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0)
  318. return i;
  319. return -1;
  320. }
  321. static bool add_spa(struct acpi_nfit_desc *acpi_desc,
  322. struct nfit_table_prev *prev,
  323. struct acpi_nfit_system_address *spa)
  324. {
  325. size_t length = min_t(size_t, sizeof(*spa), spa->header.length);
  326. struct device *dev = acpi_desc->dev;
  327. struct nfit_spa *nfit_spa;
  328. list_for_each_entry(nfit_spa, &prev->spas, list) {
  329. if (memcmp(nfit_spa->spa, spa, length) == 0) {
  330. list_move_tail(&nfit_spa->list, &acpi_desc->spas);
  331. return true;
  332. }
  333. }
  334. nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa), GFP_KERNEL);
  335. if (!nfit_spa)
  336. return false;
  337. INIT_LIST_HEAD(&nfit_spa->list);
  338. nfit_spa->spa = spa;
  339. list_add_tail(&nfit_spa->list, &acpi_desc->spas);
  340. dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__,
  341. spa->range_index,
  342. spa_type_name(nfit_spa_type(spa)));
  343. return true;
  344. }
  345. static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
  346. struct nfit_table_prev *prev,
  347. struct acpi_nfit_memory_map *memdev)
  348. {
  349. size_t length = min_t(size_t, sizeof(*memdev), memdev->header.length);
  350. struct device *dev = acpi_desc->dev;
  351. struct nfit_memdev *nfit_memdev;
  352. list_for_each_entry(nfit_memdev, &prev->memdevs, list)
  353. if (memcmp(nfit_memdev->memdev, memdev, length) == 0) {
  354. list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
  355. return true;
  356. }
  357. nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev), GFP_KERNEL);
  358. if (!nfit_memdev)
  359. return false;
  360. INIT_LIST_HEAD(&nfit_memdev->list);
  361. nfit_memdev->memdev = memdev;
  362. list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
  363. dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n",
  364. __func__, memdev->device_handle, memdev->range_index,
  365. memdev->region_index);
  366. return true;
  367. }
  368. static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
  369. struct nfit_table_prev *prev,
  370. struct acpi_nfit_control_region *dcr)
  371. {
  372. size_t length = min_t(size_t, sizeof(*dcr), dcr->header.length);
  373. struct device *dev = acpi_desc->dev;
  374. struct nfit_dcr *nfit_dcr;
  375. list_for_each_entry(nfit_dcr, &prev->dcrs, list)
  376. if (memcmp(nfit_dcr->dcr, dcr, length) == 0) {
  377. list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
  378. return true;
  379. }
  380. nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr), GFP_KERNEL);
  381. if (!nfit_dcr)
  382. return false;
  383. INIT_LIST_HEAD(&nfit_dcr->list);
  384. nfit_dcr->dcr = dcr;
  385. list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
  386. dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__,
  387. dcr->region_index, dcr->windows);
  388. return true;
  389. }
  390. static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
  391. struct nfit_table_prev *prev,
  392. struct acpi_nfit_data_region *bdw)
  393. {
  394. size_t length = min_t(size_t, sizeof(*bdw), bdw->header.length);
  395. struct device *dev = acpi_desc->dev;
  396. struct nfit_bdw *nfit_bdw;
  397. list_for_each_entry(nfit_bdw, &prev->bdws, list)
  398. if (memcmp(nfit_bdw->bdw, bdw, length) == 0) {
  399. list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
  400. return true;
  401. }
  402. nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw), GFP_KERNEL);
  403. if (!nfit_bdw)
  404. return false;
  405. INIT_LIST_HEAD(&nfit_bdw->list);
  406. nfit_bdw->bdw = bdw;
  407. list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
  408. dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__,
  409. bdw->region_index, bdw->windows);
  410. return true;
  411. }
  412. static bool add_idt(struct acpi_nfit_desc *acpi_desc,
  413. struct nfit_table_prev *prev,
  414. struct acpi_nfit_interleave *idt)
  415. {
  416. size_t length = min_t(size_t, sizeof(*idt), idt->header.length);
  417. struct device *dev = acpi_desc->dev;
  418. struct nfit_idt *nfit_idt;
  419. list_for_each_entry(nfit_idt, &prev->idts, list)
  420. if (memcmp(nfit_idt->idt, idt, length) == 0) {
  421. list_move_tail(&nfit_idt->list, &acpi_desc->idts);
  422. return true;
  423. }
  424. nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt), GFP_KERNEL);
  425. if (!nfit_idt)
  426. return false;
  427. INIT_LIST_HEAD(&nfit_idt->list);
  428. nfit_idt->idt = idt;
  429. list_add_tail(&nfit_idt->list, &acpi_desc->idts);
  430. dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__,
  431. idt->interleave_index, idt->line_count);
  432. return true;
  433. }
  434. static bool add_flush(struct acpi_nfit_desc *acpi_desc,
  435. struct nfit_table_prev *prev,
  436. struct acpi_nfit_flush_address *flush)
  437. {
  438. size_t length = min_t(size_t, sizeof(*flush), flush->header.length);
  439. struct device *dev = acpi_desc->dev;
  440. struct nfit_flush *nfit_flush;
  441. list_for_each_entry(nfit_flush, &prev->flushes, list)
  442. if (memcmp(nfit_flush->flush, flush, length) == 0) {
  443. list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
  444. return true;
  445. }
  446. nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush), GFP_KERNEL);
  447. if (!nfit_flush)
  448. return false;
  449. INIT_LIST_HEAD(&nfit_flush->list);
  450. nfit_flush->flush = flush;
  451. list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
  452. dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
  453. flush->device_handle, flush->hint_count);
  454. return true;
  455. }
  456. static void *add_table(struct acpi_nfit_desc *acpi_desc,
  457. struct nfit_table_prev *prev, void *table, const void *end)
  458. {
  459. struct device *dev = acpi_desc->dev;
  460. struct acpi_nfit_header *hdr;
  461. void *err = ERR_PTR(-ENOMEM);
  462. if (table >= end)
  463. return NULL;
  464. hdr = table;
  465. if (!hdr->length) {
  466. dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
  467. hdr->type);
  468. return NULL;
  469. }
  470. switch (hdr->type) {
  471. case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
  472. if (!add_spa(acpi_desc, prev, table))
  473. return err;
  474. break;
  475. case ACPI_NFIT_TYPE_MEMORY_MAP:
  476. if (!add_memdev(acpi_desc, prev, table))
  477. return err;
  478. break;
  479. case ACPI_NFIT_TYPE_CONTROL_REGION:
  480. if (!add_dcr(acpi_desc, prev, table))
  481. return err;
  482. break;
  483. case ACPI_NFIT_TYPE_DATA_REGION:
  484. if (!add_bdw(acpi_desc, prev, table))
  485. return err;
  486. break;
  487. case ACPI_NFIT_TYPE_INTERLEAVE:
  488. if (!add_idt(acpi_desc, prev, table))
  489. return err;
  490. break;
  491. case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
  492. if (!add_flush(acpi_desc, prev, table))
  493. return err;
  494. break;
  495. case ACPI_NFIT_TYPE_SMBIOS:
  496. dev_dbg(dev, "%s: smbios\n", __func__);
  497. break;
  498. default:
  499. dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
  500. break;
  501. }
  502. return table + hdr->length;
  503. }
  504. static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
  505. struct nfit_mem *nfit_mem)
  506. {
  507. u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
  508. u16 dcr = nfit_mem->dcr->region_index;
  509. struct nfit_spa *nfit_spa;
  510. list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
  511. u16 range_index = nfit_spa->spa->range_index;
  512. int type = nfit_spa_type(nfit_spa->spa);
  513. struct nfit_memdev *nfit_memdev;
  514. if (type != NFIT_SPA_BDW)
  515. continue;
  516. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
  517. if (nfit_memdev->memdev->range_index != range_index)
  518. continue;
  519. if (nfit_memdev->memdev->device_handle != device_handle)
  520. continue;
  521. if (nfit_memdev->memdev->region_index != dcr)
  522. continue;
  523. nfit_mem->spa_bdw = nfit_spa->spa;
  524. return;
  525. }
  526. }
  527. dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
  528. nfit_mem->spa_dcr->range_index);
  529. nfit_mem->bdw = NULL;
  530. }
  531. static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
  532. struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
  533. {
  534. u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
  535. struct nfit_memdev *nfit_memdev;
  536. struct nfit_flush *nfit_flush;
  537. struct nfit_bdw *nfit_bdw;
  538. struct nfit_idt *nfit_idt;
  539. u16 idt_idx, range_index;
  540. list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
  541. if (nfit_bdw->bdw->region_index != dcr)
  542. continue;
  543. nfit_mem->bdw = nfit_bdw->bdw;
  544. break;
  545. }
  546. if (!nfit_mem->bdw)
  547. return;
  548. nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
  549. if (!nfit_mem->spa_bdw)
  550. return;
  551. range_index = nfit_mem->spa_bdw->range_index;
  552. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
  553. if (nfit_memdev->memdev->range_index != range_index ||
  554. nfit_memdev->memdev->region_index != dcr)
  555. continue;
  556. nfit_mem->memdev_bdw = nfit_memdev->memdev;
  557. idt_idx = nfit_memdev->memdev->interleave_index;
  558. list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
  559. if (nfit_idt->idt->interleave_index != idt_idx)
  560. continue;
  561. nfit_mem->idt_bdw = nfit_idt->idt;
  562. break;
  563. }
  564. list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
  565. if (nfit_flush->flush->device_handle !=
  566. nfit_memdev->memdev->device_handle)
  567. continue;
  568. nfit_mem->nfit_flush = nfit_flush;
  569. break;
  570. }
  571. break;
  572. }
  573. }
  574. static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
  575. struct acpi_nfit_system_address *spa)
  576. {
  577. struct nfit_mem *nfit_mem, *found;
  578. struct nfit_memdev *nfit_memdev;
  579. int type = nfit_spa_type(spa);
  580. switch (type) {
  581. case NFIT_SPA_DCR:
  582. case NFIT_SPA_PM:
  583. break;
  584. default:
  585. return 0;
  586. }
  587. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
  588. struct nfit_dcr *nfit_dcr;
  589. u32 device_handle;
  590. u16 dcr;
  591. if (nfit_memdev->memdev->range_index != spa->range_index)
  592. continue;
  593. found = NULL;
  594. dcr = nfit_memdev->memdev->region_index;
  595. device_handle = nfit_memdev->memdev->device_handle;
  596. list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
  597. if (__to_nfit_memdev(nfit_mem)->device_handle
  598. == device_handle) {
  599. found = nfit_mem;
  600. break;
  601. }
  602. if (found)
  603. nfit_mem = found;
  604. else {
  605. nfit_mem = devm_kzalloc(acpi_desc->dev,
  606. sizeof(*nfit_mem), GFP_KERNEL);
  607. if (!nfit_mem)
  608. return -ENOMEM;
  609. INIT_LIST_HEAD(&nfit_mem->list);
  610. nfit_mem->acpi_desc = acpi_desc;
  611. list_add(&nfit_mem->list, &acpi_desc->dimms);
  612. }
  613. list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
  614. if (nfit_dcr->dcr->region_index != dcr)
  615. continue;
  616. /*
  617. * Record the control region for the dimm. For
  618. * the ACPI 6.1 case, where there are separate
  619. * control regions for the pmem vs blk
  620. * interfaces, be sure to record the extended
  621. * blk details.
  622. */
  623. if (!nfit_mem->dcr)
  624. nfit_mem->dcr = nfit_dcr->dcr;
  625. else if (nfit_mem->dcr->windows == 0
  626. && nfit_dcr->dcr->windows)
  627. nfit_mem->dcr = nfit_dcr->dcr;
  628. break;
  629. }
  630. if (dcr && !nfit_mem->dcr) {
  631. dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
  632. spa->range_index, dcr);
  633. return -ENODEV;
  634. }
  635. if (type == NFIT_SPA_DCR) {
  636. struct nfit_idt *nfit_idt;
  637. u16 idt_idx;
  638. /* multiple dimms may share a SPA when interleaved */
  639. nfit_mem->spa_dcr = spa;
  640. nfit_mem->memdev_dcr = nfit_memdev->memdev;
  641. idt_idx = nfit_memdev->memdev->interleave_index;
  642. list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
  643. if (nfit_idt->idt->interleave_index != idt_idx)
  644. continue;
  645. nfit_mem->idt_dcr = nfit_idt->idt;
  646. break;
  647. }
  648. nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
  649. } else {
  650. /*
  651. * A single dimm may belong to multiple SPA-PM
  652. * ranges, record at least one in addition to
  653. * any SPA-DCR range.
  654. */
  655. nfit_mem->memdev_pmem = nfit_memdev->memdev;
  656. }
  657. }
  658. return 0;
  659. }
  660. static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
  661. {
  662. struct nfit_mem *a = container_of(_a, typeof(*a), list);
  663. struct nfit_mem *b = container_of(_b, typeof(*b), list);
  664. u32 handleA, handleB;
  665. handleA = __to_nfit_memdev(a)->device_handle;
  666. handleB = __to_nfit_memdev(b)->device_handle;
  667. if (handleA < handleB)
  668. return -1;
  669. else if (handleA > handleB)
  670. return 1;
  671. return 0;
  672. }
  673. static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
  674. {
  675. struct nfit_spa *nfit_spa;
  676. /*
  677. * For each SPA-DCR or SPA-PMEM address range find its
  678. * corresponding MEMDEV(s). From each MEMDEV find the
  679. * corresponding DCR. Then, if we're operating on a SPA-DCR,
  680. * try to find a SPA-BDW and a corresponding BDW that references
  681. * the DCR. Throw it all into an nfit_mem object. Note, that
  682. * BDWs are optional.
  683. */
  684. list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
  685. int rc;
  686. rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa);
  687. if (rc)
  688. return rc;
  689. }
  690. list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
  691. return 0;
  692. }
  693. static ssize_t revision_show(struct device *dev,
  694. struct device_attribute *attr, char *buf)
  695. {
  696. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  697. struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
  698. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  699. return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
  700. }
  701. static DEVICE_ATTR_RO(revision);
  702. static struct attribute *acpi_nfit_attributes[] = {
  703. &dev_attr_revision.attr,
  704. NULL,
  705. };
  706. static struct attribute_group acpi_nfit_attribute_group = {
  707. .name = "nfit",
  708. .attrs = acpi_nfit_attributes,
  709. };
  710. static const struct attribute_group *acpi_nfit_attribute_groups[] = {
  711. &nvdimm_bus_attribute_group,
  712. &acpi_nfit_attribute_group,
  713. NULL,
  714. };
  715. static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
  716. {
  717. struct nvdimm *nvdimm = to_nvdimm(dev);
  718. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  719. return __to_nfit_memdev(nfit_mem);
  720. }
  721. static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
  722. {
  723. struct nvdimm *nvdimm = to_nvdimm(dev);
  724. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  725. return nfit_mem->dcr;
  726. }
  727. static ssize_t handle_show(struct device *dev,
  728. struct device_attribute *attr, char *buf)
  729. {
  730. struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
  731. return sprintf(buf, "%#x\n", memdev->device_handle);
  732. }
  733. static DEVICE_ATTR_RO(handle);
  734. static ssize_t phys_id_show(struct device *dev,
  735. struct device_attribute *attr, char *buf)
  736. {
  737. struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
  738. return sprintf(buf, "%#x\n", memdev->physical_id);
  739. }
  740. static DEVICE_ATTR_RO(phys_id);
  741. static ssize_t vendor_show(struct device *dev,
  742. struct device_attribute *attr, char *buf)
  743. {
  744. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  745. return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
  746. }
  747. static DEVICE_ATTR_RO(vendor);
  748. static ssize_t rev_id_show(struct device *dev,
  749. struct device_attribute *attr, char *buf)
  750. {
  751. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  752. return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
  753. }
  754. static DEVICE_ATTR_RO(rev_id);
  755. static ssize_t device_show(struct device *dev,
  756. struct device_attribute *attr, char *buf)
  757. {
  758. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  759. return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
  760. }
  761. static DEVICE_ATTR_RO(device);
  762. static ssize_t subsystem_vendor_show(struct device *dev,
  763. struct device_attribute *attr, char *buf)
  764. {
  765. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  766. return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
  767. }
  768. static DEVICE_ATTR_RO(subsystem_vendor);
  769. static ssize_t subsystem_rev_id_show(struct device *dev,
  770. struct device_attribute *attr, char *buf)
  771. {
  772. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  773. return sprintf(buf, "0x%04x\n",
  774. be16_to_cpu(dcr->subsystem_revision_id));
  775. }
  776. static DEVICE_ATTR_RO(subsystem_rev_id);
  777. static ssize_t subsystem_device_show(struct device *dev,
  778. struct device_attribute *attr, char *buf)
  779. {
  780. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  781. return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
  782. }
  783. static DEVICE_ATTR_RO(subsystem_device);
  784. static int num_nvdimm_formats(struct nvdimm *nvdimm)
  785. {
  786. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  787. int formats = 0;
  788. if (nfit_mem->memdev_pmem)
  789. formats++;
  790. if (nfit_mem->memdev_bdw)
  791. formats++;
  792. return formats;
  793. }
  794. static ssize_t format_show(struct device *dev,
  795. struct device_attribute *attr, char *buf)
  796. {
  797. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  798. return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
  799. }
  800. static DEVICE_ATTR_RO(format);
  801. static ssize_t format1_show(struct device *dev,
  802. struct device_attribute *attr, char *buf)
  803. {
  804. u32 handle;
  805. ssize_t rc = -ENXIO;
  806. struct nfit_mem *nfit_mem;
  807. struct nfit_memdev *nfit_memdev;
  808. struct acpi_nfit_desc *acpi_desc;
  809. struct nvdimm *nvdimm = to_nvdimm(dev);
  810. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  811. nfit_mem = nvdimm_provider_data(nvdimm);
  812. acpi_desc = nfit_mem->acpi_desc;
  813. handle = to_nfit_memdev(dev)->device_handle;
  814. /* assumes DIMMs have at most 2 published interface codes */
  815. mutex_lock(&acpi_desc->init_mutex);
  816. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
  817. struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
  818. struct nfit_dcr *nfit_dcr;
  819. if (memdev->device_handle != handle)
  820. continue;
  821. list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
  822. if (nfit_dcr->dcr->region_index != memdev->region_index)
  823. continue;
  824. if (nfit_dcr->dcr->code == dcr->code)
  825. continue;
  826. rc = sprintf(buf, "0x%04x\n",
  827. le16_to_cpu(nfit_dcr->dcr->code));
  828. break;
  829. }
  830. if (rc != ENXIO)
  831. break;
  832. }
  833. mutex_unlock(&acpi_desc->init_mutex);
  834. return rc;
  835. }
  836. static DEVICE_ATTR_RO(format1);
  837. static ssize_t formats_show(struct device *dev,
  838. struct device_attribute *attr, char *buf)
  839. {
  840. struct nvdimm *nvdimm = to_nvdimm(dev);
  841. return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
  842. }
  843. static DEVICE_ATTR_RO(formats);
  844. static ssize_t serial_show(struct device *dev,
  845. struct device_attribute *attr, char *buf)
  846. {
  847. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  848. return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
  849. }
  850. static DEVICE_ATTR_RO(serial);
  851. static ssize_t family_show(struct device *dev,
  852. struct device_attribute *attr, char *buf)
  853. {
  854. struct nvdimm *nvdimm = to_nvdimm(dev);
  855. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  856. if (nfit_mem->family < 0)
  857. return -ENXIO;
  858. return sprintf(buf, "%d\n", nfit_mem->family);
  859. }
  860. static DEVICE_ATTR_RO(family);
  861. static ssize_t dsm_mask_show(struct device *dev,
  862. struct device_attribute *attr, char *buf)
  863. {
  864. struct nvdimm *nvdimm = to_nvdimm(dev);
  865. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  866. if (nfit_mem->family < 0)
  867. return -ENXIO;
  868. return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
  869. }
  870. static DEVICE_ATTR_RO(dsm_mask);
  871. static ssize_t flags_show(struct device *dev,
  872. struct device_attribute *attr, char *buf)
  873. {
  874. u16 flags = to_nfit_memdev(dev)->flags;
  875. return sprintf(buf, "%s%s%s%s%s\n",
  876. flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
  877. flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
  878. flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
  879. flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
  880. flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "");
  881. }
  882. static DEVICE_ATTR_RO(flags);
  883. static ssize_t id_show(struct device *dev,
  884. struct device_attribute *attr, char *buf)
  885. {
  886. struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
  887. if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
  888. return sprintf(buf, "%04x-%02x-%04x-%08x\n",
  889. be16_to_cpu(dcr->vendor_id),
  890. dcr->manufacturing_location,
  891. be16_to_cpu(dcr->manufacturing_date),
  892. be32_to_cpu(dcr->serial_number));
  893. else
  894. return sprintf(buf, "%04x-%08x\n",
  895. be16_to_cpu(dcr->vendor_id),
  896. be32_to_cpu(dcr->serial_number));
  897. }
  898. static DEVICE_ATTR_RO(id);
  899. static struct attribute *acpi_nfit_dimm_attributes[] = {
  900. &dev_attr_handle.attr,
  901. &dev_attr_phys_id.attr,
  902. &dev_attr_vendor.attr,
  903. &dev_attr_device.attr,
  904. &dev_attr_rev_id.attr,
  905. &dev_attr_subsystem_vendor.attr,
  906. &dev_attr_subsystem_device.attr,
  907. &dev_attr_subsystem_rev_id.attr,
  908. &dev_attr_format.attr,
  909. &dev_attr_formats.attr,
  910. &dev_attr_format1.attr,
  911. &dev_attr_serial.attr,
  912. &dev_attr_flags.attr,
  913. &dev_attr_id.attr,
  914. &dev_attr_family.attr,
  915. &dev_attr_dsm_mask.attr,
  916. NULL,
  917. };
  918. static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
  919. struct attribute *a, int n)
  920. {
  921. struct device *dev = container_of(kobj, struct device, kobj);
  922. struct nvdimm *nvdimm = to_nvdimm(dev);
  923. if (!to_nfit_dcr(dev))
  924. return 0;
  925. if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
  926. return 0;
  927. return a->mode;
  928. }
  929. static struct attribute_group acpi_nfit_dimm_attribute_group = {
  930. .name = "nfit",
  931. .attrs = acpi_nfit_dimm_attributes,
  932. .is_visible = acpi_nfit_dimm_attr_visible,
  933. };
  934. static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
  935. &nvdimm_attribute_group,
  936. &nd_device_attribute_group,
  937. &acpi_nfit_dimm_attribute_group,
  938. NULL,
  939. };
  940. static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
  941. u32 device_handle)
  942. {
  943. struct nfit_mem *nfit_mem;
  944. list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
  945. if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
  946. return nfit_mem->nvdimm;
  947. return NULL;
  948. }
  949. static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
  950. struct nfit_mem *nfit_mem, u32 device_handle)
  951. {
  952. struct acpi_device *adev, *adev_dimm;
  953. struct device *dev = acpi_desc->dev;
  954. unsigned long dsm_mask;
  955. const u8 *uuid;
  956. int i;
  957. /* nfit test assumes 1:1 relationship between commands and dsms */
  958. nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
  959. nfit_mem->family = NVDIMM_FAMILY_INTEL;
  960. adev = to_acpi_dev(acpi_desc);
  961. if (!adev)
  962. return 0;
  963. adev_dimm = acpi_find_child_device(adev, device_handle, false);
  964. nfit_mem->adev = adev_dimm;
  965. if (!adev_dimm) {
  966. dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
  967. device_handle);
  968. return force_enable_dimms ? 0 : -ENODEV;
  969. }
  970. /*
  971. * Until standardization materializes we need to consider up to 3
  972. * different command sets. Note, that checking for function0 (bit0)
  973. * tells us if any commands are reachable through this uuid.
  974. */
  975. for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_HPE2; i++)
  976. if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
  977. break;
  978. /* limit the supported commands to those that are publicly documented */
  979. nfit_mem->family = i;
  980. if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
  981. dsm_mask = 0x3fe;
  982. if (disable_vendor_specific)
  983. dsm_mask &= ~(1 << ND_CMD_VENDOR);
  984. } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1)
  985. dsm_mask = 0x1c3c76;
  986. else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) {
  987. dsm_mask = 0x1fe;
  988. if (disable_vendor_specific)
  989. dsm_mask &= ~(1 << 8);
  990. } else {
  991. dev_dbg(dev, "unknown dimm command family\n");
  992. nfit_mem->family = -1;
  993. /* DSMs are optional, continue loading the driver... */
  994. return 0;
  995. }
  996. uuid = to_nfit_uuid(nfit_mem->family);
  997. for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
  998. if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
  999. set_bit(i, &nfit_mem->dsm_mask);
  1000. return 0;
  1001. }
  1002. static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
  1003. {
  1004. struct nfit_mem *nfit_mem;
  1005. int dimm_count = 0;
  1006. list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
  1007. unsigned long flags = 0, cmd_mask;
  1008. struct nvdimm *nvdimm;
  1009. u32 device_handle;
  1010. u16 mem_flags;
  1011. int rc;
  1012. device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
  1013. nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
  1014. if (nvdimm) {
  1015. dimm_count++;
  1016. continue;
  1017. }
  1018. if (nfit_mem->bdw && nfit_mem->memdev_pmem)
  1019. flags |= NDD_ALIASING;
  1020. mem_flags = __to_nfit_memdev(nfit_mem)->flags;
  1021. if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
  1022. flags |= NDD_UNARMED;
  1023. rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
  1024. if (rc)
  1025. continue;
  1026. /*
  1027. * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
  1028. * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
  1029. * userspace interface.
  1030. */
  1031. cmd_mask = 1UL << ND_CMD_CALL;
  1032. if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
  1033. cmd_mask |= nfit_mem->dsm_mask;
  1034. nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
  1035. acpi_nfit_dimm_attribute_groups,
  1036. flags, cmd_mask);
  1037. if (!nvdimm)
  1038. return -ENOMEM;
  1039. nfit_mem->nvdimm = nvdimm;
  1040. dimm_count++;
  1041. if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
  1042. continue;
  1043. dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n",
  1044. nvdimm_name(nvdimm),
  1045. mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
  1046. mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
  1047. mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
  1048. mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "");
  1049. }
  1050. return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
  1051. }
  1052. static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
  1053. {
  1054. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  1055. const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS);
  1056. struct acpi_device *adev;
  1057. int i;
  1058. nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
  1059. adev = to_acpi_dev(acpi_desc);
  1060. if (!adev)
  1061. return;
  1062. for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
  1063. if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
  1064. set_bit(i, &nd_desc->cmd_mask);
  1065. }
  1066. static ssize_t range_index_show(struct device *dev,
  1067. struct device_attribute *attr, char *buf)
  1068. {
  1069. struct nd_region *nd_region = to_nd_region(dev);
  1070. struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
  1071. return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
  1072. }
  1073. static DEVICE_ATTR_RO(range_index);
  1074. static struct attribute *acpi_nfit_region_attributes[] = {
  1075. &dev_attr_range_index.attr,
  1076. NULL,
  1077. };
  1078. static struct attribute_group acpi_nfit_region_attribute_group = {
  1079. .name = "nfit",
  1080. .attrs = acpi_nfit_region_attributes,
  1081. };
  1082. static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
  1083. &nd_region_attribute_group,
  1084. &nd_mapping_attribute_group,
  1085. &nd_device_attribute_group,
  1086. &nd_numa_attribute_group,
  1087. &acpi_nfit_region_attribute_group,
  1088. NULL,
  1089. };
  1090. /* enough info to uniquely specify an interleave set */
  1091. struct nfit_set_info {
  1092. struct nfit_set_info_map {
  1093. u64 region_offset;
  1094. u32 serial_number;
  1095. u32 pad;
  1096. } mapping[0];
  1097. };
  1098. static size_t sizeof_nfit_set_info(int num_mappings)
  1099. {
  1100. return sizeof(struct nfit_set_info)
  1101. + num_mappings * sizeof(struct nfit_set_info_map);
  1102. }
  1103. static int cmp_map(const void *m0, const void *m1)
  1104. {
  1105. const struct nfit_set_info_map *map0 = m0;
  1106. const struct nfit_set_info_map *map1 = m1;
  1107. return memcmp(&map0->region_offset, &map1->region_offset,
  1108. sizeof(u64));
  1109. }
  1110. /* Retrieve the nth entry referencing this spa */
  1111. static struct acpi_nfit_memory_map *memdev_from_spa(
  1112. struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
  1113. {
  1114. struct nfit_memdev *nfit_memdev;
  1115. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
  1116. if (nfit_memdev->memdev->range_index == range_index)
  1117. if (n-- == 0)
  1118. return nfit_memdev->memdev;
  1119. return NULL;
  1120. }
  1121. static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
  1122. struct nd_region_desc *ndr_desc,
  1123. struct acpi_nfit_system_address *spa)
  1124. {
  1125. int i, spa_type = nfit_spa_type(spa);
  1126. struct device *dev = acpi_desc->dev;
  1127. struct nd_interleave_set *nd_set;
  1128. u16 nr = ndr_desc->num_mappings;
  1129. struct nfit_set_info *info;
  1130. if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE)
  1131. /* pass */;
  1132. else
  1133. return 0;
  1134. nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
  1135. if (!nd_set)
  1136. return -ENOMEM;
  1137. info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
  1138. if (!info)
  1139. return -ENOMEM;
  1140. for (i = 0; i < nr; i++) {
  1141. struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i];
  1142. struct nfit_set_info_map *map = &info->mapping[i];
  1143. struct nvdimm *nvdimm = nd_mapping->nvdimm;
  1144. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  1145. struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
  1146. spa->range_index, i);
  1147. if (!memdev || !nfit_mem->dcr) {
  1148. dev_err(dev, "%s: failed to find DCR\n", __func__);
  1149. return -ENODEV;
  1150. }
  1151. map->region_offset = memdev->region_offset;
  1152. map->serial_number = nfit_mem->dcr->serial_number;
  1153. }
  1154. sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
  1155. cmp_map, NULL);
  1156. nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
  1157. ndr_desc->nd_set = nd_set;
  1158. devm_kfree(dev, info);
  1159. return 0;
  1160. }
  1161. static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
  1162. {
  1163. struct acpi_nfit_interleave *idt = mmio->idt;
  1164. u32 sub_line_offset, line_index, line_offset;
  1165. u64 line_no, table_skip_count, table_offset;
  1166. line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
  1167. table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
  1168. line_offset = idt->line_offset[line_index]
  1169. * mmio->line_size;
  1170. table_offset = table_skip_count * mmio->table_size;
  1171. return mmio->base_offset + line_offset + table_offset + sub_line_offset;
  1172. }
  1173. static void wmb_blk(struct nfit_blk *nfit_blk)
  1174. {
  1175. if (nfit_blk->nvdimm_flush) {
  1176. /*
  1177. * The first wmb() is needed to 'sfence' all previous writes
  1178. * such that they are architecturally visible for the platform
  1179. * buffer flush. Note that we've already arranged for pmem
  1180. * writes to avoid the cache via arch_memcpy_to_pmem(). The
  1181. * final wmb() ensures ordering for the NVDIMM flush write.
  1182. */
  1183. wmb();
  1184. writeq(1, nfit_blk->nvdimm_flush);
  1185. wmb();
  1186. } else
  1187. wmb_pmem();
  1188. }
  1189. static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
  1190. {
  1191. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
  1192. u64 offset = nfit_blk->stat_offset + mmio->size * bw;
  1193. if (mmio->num_lines)
  1194. offset = to_interleave_offset(offset, mmio);
  1195. return readl(mmio->addr.base + offset);
  1196. }
  1197. static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
  1198. resource_size_t dpa, unsigned int len, unsigned int write)
  1199. {
  1200. u64 cmd, offset;
  1201. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
  1202. enum {
  1203. BCW_OFFSET_MASK = (1ULL << 48)-1,
  1204. BCW_LEN_SHIFT = 48,
  1205. BCW_LEN_MASK = (1ULL << 8) - 1,
  1206. BCW_CMD_SHIFT = 56,
  1207. };
  1208. cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
  1209. len = len >> L1_CACHE_SHIFT;
  1210. cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
  1211. cmd |= ((u64) write) << BCW_CMD_SHIFT;
  1212. offset = nfit_blk->cmd_offset + mmio->size * bw;
  1213. if (mmio->num_lines)
  1214. offset = to_interleave_offset(offset, mmio);
  1215. writeq(cmd, mmio->addr.base + offset);
  1216. wmb_blk(nfit_blk);
  1217. if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
  1218. readq(mmio->addr.base + offset);
  1219. }
  1220. static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
  1221. resource_size_t dpa, void *iobuf, size_t len, int rw,
  1222. unsigned int lane)
  1223. {
  1224. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
  1225. unsigned int copied = 0;
  1226. u64 base_offset;
  1227. int rc;
  1228. base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
  1229. + lane * mmio->size;
  1230. write_blk_ctl(nfit_blk, lane, dpa, len, rw);
  1231. while (len) {
  1232. unsigned int c;
  1233. u64 offset;
  1234. if (mmio->num_lines) {
  1235. u32 line_offset;
  1236. offset = to_interleave_offset(base_offset + copied,
  1237. mmio);
  1238. div_u64_rem(offset, mmio->line_size, &line_offset);
  1239. c = min_t(size_t, len, mmio->line_size - line_offset);
  1240. } else {
  1241. offset = base_offset + nfit_blk->bdw_offset;
  1242. c = len;
  1243. }
  1244. if (rw)
  1245. memcpy_to_pmem(mmio->addr.aperture + offset,
  1246. iobuf + copied, c);
  1247. else {
  1248. if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
  1249. mmio_flush_range((void __force *)
  1250. mmio->addr.aperture + offset, c);
  1251. memcpy_from_pmem(iobuf + copied,
  1252. mmio->addr.aperture + offset, c);
  1253. }
  1254. copied += c;
  1255. len -= c;
  1256. }
  1257. if (rw)
  1258. wmb_blk(nfit_blk);
  1259. rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
  1260. return rc;
  1261. }
  1262. static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
  1263. resource_size_t dpa, void *iobuf, u64 len, int rw)
  1264. {
  1265. struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
  1266. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
  1267. struct nd_region *nd_region = nfit_blk->nd_region;
  1268. unsigned int lane, copied = 0;
  1269. int rc = 0;
  1270. lane = nd_region_acquire_lane(nd_region);
  1271. while (len) {
  1272. u64 c = min(len, mmio->size);
  1273. rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
  1274. iobuf + copied, c, rw, lane);
  1275. if (rc)
  1276. break;
  1277. copied += c;
  1278. len -= c;
  1279. }
  1280. nd_region_release_lane(nd_region, lane);
  1281. return rc;
  1282. }
  1283. static void nfit_spa_mapping_release(struct kref *kref)
  1284. {
  1285. struct nfit_spa_mapping *spa_map = to_spa_map(kref);
  1286. struct acpi_nfit_system_address *spa = spa_map->spa;
  1287. struct acpi_nfit_desc *acpi_desc = spa_map->acpi_desc;
  1288. WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
  1289. dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index);
  1290. if (spa_map->type == SPA_MAP_APERTURE)
  1291. memunmap((void __force *)spa_map->addr.aperture);
  1292. else
  1293. iounmap(spa_map->addr.base);
  1294. release_mem_region(spa->address, spa->length);
  1295. list_del(&spa_map->list);
  1296. kfree(spa_map);
  1297. }
  1298. static struct nfit_spa_mapping *find_spa_mapping(
  1299. struct acpi_nfit_desc *acpi_desc,
  1300. struct acpi_nfit_system_address *spa)
  1301. {
  1302. struct nfit_spa_mapping *spa_map;
  1303. WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
  1304. list_for_each_entry(spa_map, &acpi_desc->spa_maps, list)
  1305. if (spa_map->spa == spa)
  1306. return spa_map;
  1307. return NULL;
  1308. }
  1309. static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc,
  1310. struct acpi_nfit_system_address *spa)
  1311. {
  1312. struct nfit_spa_mapping *spa_map;
  1313. mutex_lock(&acpi_desc->spa_map_mutex);
  1314. spa_map = find_spa_mapping(acpi_desc, spa);
  1315. if (spa_map)
  1316. kref_put(&spa_map->kref, nfit_spa_mapping_release);
  1317. mutex_unlock(&acpi_desc->spa_map_mutex);
  1318. }
  1319. static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
  1320. struct acpi_nfit_system_address *spa, enum spa_map_type type)
  1321. {
  1322. resource_size_t start = spa->address;
  1323. resource_size_t n = spa->length;
  1324. struct nfit_spa_mapping *spa_map;
  1325. struct resource *res;
  1326. WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
  1327. spa_map = find_spa_mapping(acpi_desc, spa);
  1328. if (spa_map) {
  1329. kref_get(&spa_map->kref);
  1330. return spa_map->addr.base;
  1331. }
  1332. spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL);
  1333. if (!spa_map)
  1334. return NULL;
  1335. INIT_LIST_HEAD(&spa_map->list);
  1336. spa_map->spa = spa;
  1337. kref_init(&spa_map->kref);
  1338. spa_map->acpi_desc = acpi_desc;
  1339. res = request_mem_region(start, n, dev_name(acpi_desc->dev));
  1340. if (!res)
  1341. goto err_mem;
  1342. spa_map->type = type;
  1343. if (type == SPA_MAP_APERTURE)
  1344. spa_map->addr.aperture = (void __pmem *)memremap(start, n,
  1345. ARCH_MEMREMAP_PMEM);
  1346. else
  1347. spa_map->addr.base = ioremap_nocache(start, n);
  1348. if (!spa_map->addr.base)
  1349. goto err_map;
  1350. list_add_tail(&spa_map->list, &acpi_desc->spa_maps);
  1351. return spa_map->addr.base;
  1352. err_map:
  1353. release_mem_region(start, n);
  1354. err_mem:
  1355. kfree(spa_map);
  1356. return NULL;
  1357. }
  1358. /**
  1359. * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges
  1360. * @nvdimm_bus: NFIT-bus that provided the spa table entry
  1361. * @nfit_spa: spa table to map
  1362. * @type: aperture or control region
  1363. *
  1364. * In the case where block-data-window apertures and
  1365. * dimm-control-regions are interleaved they will end up sharing a
  1366. * single request_mem_region() + ioremap() for the address range. In
  1367. * the style of devm nfit_spa_map() mappings are automatically dropped
  1368. * when all region devices referencing the same mapping are disabled /
  1369. * unbound.
  1370. */
  1371. static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
  1372. struct acpi_nfit_system_address *spa, enum spa_map_type type)
  1373. {
  1374. void __iomem *iomem;
  1375. mutex_lock(&acpi_desc->spa_map_mutex);
  1376. iomem = __nfit_spa_map(acpi_desc, spa, type);
  1377. mutex_unlock(&acpi_desc->spa_map_mutex);
  1378. return iomem;
  1379. }
  1380. static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
  1381. struct acpi_nfit_interleave *idt, u16 interleave_ways)
  1382. {
  1383. if (idt) {
  1384. mmio->num_lines = idt->line_count;
  1385. mmio->line_size = idt->line_size;
  1386. if (interleave_ways == 0)
  1387. return -ENXIO;
  1388. mmio->table_size = mmio->num_lines * interleave_ways
  1389. * mmio->line_size;
  1390. }
  1391. return 0;
  1392. }
  1393. static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
  1394. struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
  1395. {
  1396. struct nd_cmd_dimm_flags flags;
  1397. int rc;
  1398. memset(&flags, 0, sizeof(flags));
  1399. rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
  1400. sizeof(flags), NULL);
  1401. if (rc >= 0 && flags.status == 0)
  1402. nfit_blk->dimm_flags = flags.flags;
  1403. else if (rc == -ENOTTY) {
  1404. /* fall back to a conservative default */
  1405. nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
  1406. rc = 0;
  1407. } else
  1408. rc = -ENXIO;
  1409. return rc;
  1410. }
  1411. static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
  1412. struct device *dev)
  1413. {
  1414. struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
  1415. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  1416. struct nd_blk_region *ndbr = to_nd_blk_region(dev);
  1417. struct nfit_flush *nfit_flush;
  1418. struct nfit_blk_mmio *mmio;
  1419. struct nfit_blk *nfit_blk;
  1420. struct nfit_mem *nfit_mem;
  1421. struct nvdimm *nvdimm;
  1422. int rc;
  1423. nvdimm = nd_blk_region_to_dimm(ndbr);
  1424. nfit_mem = nvdimm_provider_data(nvdimm);
  1425. if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
  1426. dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
  1427. nfit_mem ? "" : " nfit_mem",
  1428. (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
  1429. (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
  1430. return -ENXIO;
  1431. }
  1432. nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
  1433. if (!nfit_blk)
  1434. return -ENOMEM;
  1435. nd_blk_region_set_provider_data(ndbr, nfit_blk);
  1436. nfit_blk->nd_region = to_nd_region(dev);
  1437. /* map block aperture memory */
  1438. nfit_blk->bdw_offset = nfit_mem->bdw->offset;
  1439. mmio = &nfit_blk->mmio[BDW];
  1440. mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw,
  1441. SPA_MAP_APERTURE);
  1442. if (!mmio->addr.base) {
  1443. dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
  1444. nvdimm_name(nvdimm));
  1445. return -ENOMEM;
  1446. }
  1447. mmio->size = nfit_mem->bdw->size;
  1448. mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
  1449. mmio->idt = nfit_mem->idt_bdw;
  1450. mmio->spa = nfit_mem->spa_bdw;
  1451. rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
  1452. nfit_mem->memdev_bdw->interleave_ways);
  1453. if (rc) {
  1454. dev_dbg(dev, "%s: %s failed to init bdw interleave\n",
  1455. __func__, nvdimm_name(nvdimm));
  1456. return rc;
  1457. }
  1458. /* map block control memory */
  1459. nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
  1460. nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
  1461. mmio = &nfit_blk->mmio[DCR];
  1462. mmio->addr.base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr,
  1463. SPA_MAP_CONTROL);
  1464. if (!mmio->addr.base) {
  1465. dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
  1466. nvdimm_name(nvdimm));
  1467. return -ENOMEM;
  1468. }
  1469. mmio->size = nfit_mem->dcr->window_size;
  1470. mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
  1471. mmio->idt = nfit_mem->idt_dcr;
  1472. mmio->spa = nfit_mem->spa_dcr;
  1473. rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
  1474. nfit_mem->memdev_dcr->interleave_ways);
  1475. if (rc) {
  1476. dev_dbg(dev, "%s: %s failed to init dcr interleave\n",
  1477. __func__, nvdimm_name(nvdimm));
  1478. return rc;
  1479. }
  1480. rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
  1481. if (rc < 0) {
  1482. dev_dbg(dev, "%s: %s failed get DIMM flags\n",
  1483. __func__, nvdimm_name(nvdimm));
  1484. return rc;
  1485. }
  1486. nfit_flush = nfit_mem->nfit_flush;
  1487. if (nfit_flush && nfit_flush->flush->hint_count != 0) {
  1488. nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev,
  1489. nfit_flush->flush->hint_address[0], 8);
  1490. if (!nfit_blk->nvdimm_flush)
  1491. return -ENOMEM;
  1492. }
  1493. if (!arch_has_wmb_pmem() && !nfit_blk->nvdimm_flush)
  1494. dev_warn(dev, "unable to guarantee persistence of writes\n");
  1495. if (mmio->line_size == 0)
  1496. return 0;
  1497. if ((u32) nfit_blk->cmd_offset % mmio->line_size
  1498. + 8 > mmio->line_size) {
  1499. dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
  1500. return -ENXIO;
  1501. } else if ((u32) nfit_blk->stat_offset % mmio->line_size
  1502. + 8 > mmio->line_size) {
  1503. dev_dbg(dev, "stat_offset crosses interleave boundary\n");
  1504. return -ENXIO;
  1505. }
  1506. return 0;
  1507. }
  1508. static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus,
  1509. struct device *dev)
  1510. {
  1511. struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
  1512. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  1513. struct nd_blk_region *ndbr = to_nd_blk_region(dev);
  1514. struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
  1515. int i;
  1516. if (!nfit_blk)
  1517. return; /* never enabled */
  1518. /* auto-free BLK spa mappings */
  1519. for (i = 0; i < 2; i++) {
  1520. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i];
  1521. if (mmio->addr.base)
  1522. nfit_spa_unmap(acpi_desc, mmio->spa);
  1523. }
  1524. nd_blk_region_set_provider_data(ndbr, NULL);
  1525. /* devm will free nfit_blk */
  1526. }
  1527. static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
  1528. struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
  1529. {
  1530. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  1531. struct acpi_nfit_system_address *spa = nfit_spa->spa;
  1532. int cmd_rc, rc;
  1533. cmd->address = spa->address;
  1534. cmd->length = spa->length;
  1535. rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
  1536. sizeof(*cmd), &cmd_rc);
  1537. if (rc < 0)
  1538. return rc;
  1539. return cmd_rc;
  1540. }
  1541. static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa)
  1542. {
  1543. int rc;
  1544. int cmd_rc;
  1545. struct nd_cmd_ars_start ars_start;
  1546. struct acpi_nfit_system_address *spa = nfit_spa->spa;
  1547. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  1548. memset(&ars_start, 0, sizeof(ars_start));
  1549. ars_start.address = spa->address;
  1550. ars_start.length = spa->length;
  1551. if (nfit_spa_type(spa) == NFIT_SPA_PM)
  1552. ars_start.type = ND_ARS_PERSISTENT;
  1553. else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
  1554. ars_start.type = ND_ARS_VOLATILE;
  1555. else
  1556. return -ENOTTY;
  1557. rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
  1558. sizeof(ars_start), &cmd_rc);
  1559. if (rc < 0)
  1560. return rc;
  1561. return cmd_rc;
  1562. }
  1563. static int ars_continue(struct acpi_nfit_desc *acpi_desc)
  1564. {
  1565. int rc, cmd_rc;
  1566. struct nd_cmd_ars_start ars_start;
  1567. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  1568. struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
  1569. memset(&ars_start, 0, sizeof(ars_start));
  1570. ars_start.address = ars_status->restart_address;
  1571. ars_start.length = ars_status->restart_length;
  1572. ars_start.type = ars_status->type;
  1573. rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
  1574. sizeof(ars_start), &cmd_rc);
  1575. if (rc < 0)
  1576. return rc;
  1577. return cmd_rc;
  1578. }
  1579. static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
  1580. {
  1581. struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
  1582. struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
  1583. int rc, cmd_rc;
  1584. rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
  1585. acpi_desc->ars_status_size, &cmd_rc);
  1586. if (rc < 0)
  1587. return rc;
  1588. return cmd_rc;
  1589. }
  1590. static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus,
  1591. struct nd_cmd_ars_status *ars_status)
  1592. {
  1593. int rc;
  1594. u32 i;
  1595. for (i = 0; i < ars_status->num_records; i++) {
  1596. rc = nvdimm_bus_add_poison(nvdimm_bus,
  1597. ars_status->records[i].err_address,
  1598. ars_status->records[i].length);
  1599. if (rc)
  1600. return rc;
  1601. }
  1602. return 0;
  1603. }
  1604. static void acpi_nfit_remove_resource(void *data)
  1605. {
  1606. struct resource *res = data;
  1607. remove_resource(res);
  1608. }
  1609. static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
  1610. struct nd_region_desc *ndr_desc)
  1611. {
  1612. struct resource *res, *nd_res = ndr_desc->res;
  1613. int is_pmem, ret;
  1614. /* No operation if the region is already registered as PMEM */
  1615. is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
  1616. IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
  1617. if (is_pmem == REGION_INTERSECTS)
  1618. return 0;
  1619. res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
  1620. if (!res)
  1621. return -ENOMEM;
  1622. res->name = "Persistent Memory";
  1623. res->start = nd_res->start;
  1624. res->end = nd_res->end;
  1625. res->flags = IORESOURCE_MEM;
  1626. res->desc = IORES_DESC_PERSISTENT_MEMORY;
  1627. ret = insert_resource(&iomem_resource, res);
  1628. if (ret)
  1629. return ret;
  1630. ret = devm_add_action(acpi_desc->dev, acpi_nfit_remove_resource, res);
  1631. if (ret) {
  1632. remove_resource(res);
  1633. return ret;
  1634. }
  1635. return 0;
  1636. }
  1637. static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
  1638. struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc,
  1639. struct acpi_nfit_memory_map *memdev,
  1640. struct nfit_spa *nfit_spa)
  1641. {
  1642. struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
  1643. memdev->device_handle);
  1644. struct acpi_nfit_system_address *spa = nfit_spa->spa;
  1645. struct nd_blk_region_desc *ndbr_desc;
  1646. struct nfit_mem *nfit_mem;
  1647. int blk_valid = 0;
  1648. if (!nvdimm) {
  1649. dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
  1650. spa->range_index, memdev->device_handle);
  1651. return -ENODEV;
  1652. }
  1653. nd_mapping->nvdimm = nvdimm;
  1654. switch (nfit_spa_type(spa)) {
  1655. case NFIT_SPA_PM:
  1656. case NFIT_SPA_VOLATILE:
  1657. nd_mapping->start = memdev->address;
  1658. nd_mapping->size = memdev->region_size;
  1659. break;
  1660. case NFIT_SPA_DCR:
  1661. nfit_mem = nvdimm_provider_data(nvdimm);
  1662. if (!nfit_mem || !nfit_mem->bdw) {
  1663. dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
  1664. spa->range_index, nvdimm_name(nvdimm));
  1665. } else {
  1666. nd_mapping->size = nfit_mem->bdw->capacity;
  1667. nd_mapping->start = nfit_mem->bdw->start_address;
  1668. ndr_desc->num_lanes = nfit_mem->bdw->windows;
  1669. blk_valid = 1;
  1670. }
  1671. ndr_desc->nd_mapping = nd_mapping;
  1672. ndr_desc->num_mappings = blk_valid;
  1673. ndbr_desc = to_blk_region_desc(ndr_desc);
  1674. ndbr_desc->enable = acpi_nfit_blk_region_enable;
  1675. ndbr_desc->disable = acpi_nfit_blk_region_disable;
  1676. ndbr_desc->do_io = acpi_desc->blk_do_io;
  1677. nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
  1678. ndr_desc);
  1679. if (!nfit_spa->nd_region)
  1680. return -ENOMEM;
  1681. break;
  1682. }
  1683. return 0;
  1684. }
  1685. static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
  1686. struct nfit_spa *nfit_spa)
  1687. {
  1688. static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS];
  1689. struct acpi_nfit_system_address *spa = nfit_spa->spa;
  1690. struct nd_blk_region_desc ndbr_desc;
  1691. struct nd_region_desc *ndr_desc;
  1692. struct nfit_memdev *nfit_memdev;
  1693. struct nvdimm_bus *nvdimm_bus;
  1694. struct resource res;
  1695. int count = 0, rc;
  1696. if (nfit_spa->nd_region)
  1697. return 0;
  1698. if (spa->range_index == 0) {
  1699. dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n",
  1700. __func__);
  1701. return 0;
  1702. }
  1703. memset(&res, 0, sizeof(res));
  1704. memset(&nd_mappings, 0, sizeof(nd_mappings));
  1705. memset(&ndbr_desc, 0, sizeof(ndbr_desc));
  1706. res.start = spa->address;
  1707. res.end = res.start + spa->length - 1;
  1708. ndr_desc = &ndbr_desc.ndr_desc;
  1709. ndr_desc->res = &res;
  1710. ndr_desc->provider_data = nfit_spa;
  1711. ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
  1712. if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
  1713. ndr_desc->numa_node = acpi_map_pxm_to_online_node(
  1714. spa->proximity_domain);
  1715. else
  1716. ndr_desc->numa_node = NUMA_NO_NODE;
  1717. list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
  1718. struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
  1719. struct nd_mapping *nd_mapping;
  1720. if (memdev->range_index != spa->range_index)
  1721. continue;
  1722. if (count >= ND_MAX_MAPPINGS) {
  1723. dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
  1724. spa->range_index, ND_MAX_MAPPINGS);
  1725. return -ENXIO;
  1726. }
  1727. nd_mapping = &nd_mappings[count++];
  1728. rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc,
  1729. memdev, nfit_spa);
  1730. if (rc)
  1731. goto out;
  1732. }
  1733. ndr_desc->nd_mapping = nd_mappings;
  1734. ndr_desc->num_mappings = count;
  1735. rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
  1736. if (rc)
  1737. goto out;
  1738. nvdimm_bus = acpi_desc->nvdimm_bus;
  1739. if (nfit_spa_type(spa) == NFIT_SPA_PM) {
  1740. rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
  1741. if (rc) {
  1742. dev_warn(acpi_desc->dev,
  1743. "failed to insert pmem resource to iomem: %d\n",
  1744. rc);
  1745. goto out;
  1746. }
  1747. nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
  1748. ndr_desc);
  1749. if (!nfit_spa->nd_region)
  1750. rc = -ENOMEM;
  1751. } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) {
  1752. nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
  1753. ndr_desc);
  1754. if (!nfit_spa->nd_region)
  1755. rc = -ENOMEM;
  1756. }
  1757. out:
  1758. if (rc)
  1759. dev_err(acpi_desc->dev, "failed to register spa range %d\n",
  1760. nfit_spa->spa->range_index);
  1761. return rc;
  1762. }
  1763. static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc,
  1764. u32 max_ars)
  1765. {
  1766. struct device *dev = acpi_desc->dev;
  1767. struct nd_cmd_ars_status *ars_status;
  1768. if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) {
  1769. memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size);
  1770. return 0;
  1771. }
  1772. if (acpi_desc->ars_status)
  1773. devm_kfree(dev, acpi_desc->ars_status);
  1774. acpi_desc->ars_status = NULL;
  1775. ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL);
  1776. if (!ars_status)
  1777. return -ENOMEM;
  1778. acpi_desc->ars_status = ars_status;
  1779. acpi_desc->ars_status_size = max_ars;
  1780. return 0;
  1781. }
  1782. static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc,
  1783. struct nfit_spa *nfit_spa)
  1784. {
  1785. struct acpi_nfit_system_address *spa = nfit_spa->spa;
  1786. int rc;
  1787. if (!nfit_spa->max_ars) {
  1788. struct nd_cmd_ars_cap ars_cap;
  1789. memset(&ars_cap, 0, sizeof(ars_cap));
  1790. rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
  1791. if (rc < 0)
  1792. return rc;
  1793. nfit_spa->max_ars = ars_cap.max_ars_out;
  1794. nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
  1795. /* check that the supported scrub types match the spa type */
  1796. if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE &&
  1797. ((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0)
  1798. return -ENOTTY;
  1799. else if (nfit_spa_type(spa) == NFIT_SPA_PM &&
  1800. ((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0)
  1801. return -ENOTTY;
  1802. }
  1803. if (ars_status_alloc(acpi_desc, nfit_spa->max_ars))
  1804. return -ENOMEM;
  1805. rc = ars_get_status(acpi_desc);
  1806. if (rc < 0 && rc != -ENOSPC)
  1807. return rc;
  1808. if (ars_status_process_records(acpi_desc->nvdimm_bus,
  1809. acpi_desc->ars_status))
  1810. return -ENOMEM;
  1811. return 0;
  1812. }
  1813. static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc,
  1814. struct nfit_spa *nfit_spa)
  1815. {
  1816. struct acpi_nfit_system_address *spa = nfit_spa->spa;
  1817. unsigned int overflow_retry = scrub_overflow_abort;
  1818. u64 init_ars_start = 0, init_ars_len = 0;
  1819. struct device *dev = acpi_desc->dev;
  1820. unsigned int tmo = scrub_timeout;
  1821. int rc;
  1822. if (nfit_spa->ars_done || !nfit_spa->nd_region)
  1823. return;
  1824. rc = ars_start(acpi_desc, nfit_spa);
  1825. /*
  1826. * If we timed out the initial scan we'll still be busy here,
  1827. * and will wait another timeout before giving up permanently.
  1828. */
  1829. if (rc < 0 && rc != -EBUSY)
  1830. return;
  1831. do {
  1832. u64 ars_start, ars_len;
  1833. if (acpi_desc->cancel)
  1834. break;
  1835. rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
  1836. if (rc == -ENOTTY)
  1837. break;
  1838. if (rc == -EBUSY && !tmo) {
  1839. dev_warn(dev, "range %d ars timeout, aborting\n",
  1840. spa->range_index);
  1841. break;
  1842. }
  1843. if (rc == -EBUSY) {
  1844. /*
  1845. * Note, entries may be appended to the list
  1846. * while the lock is dropped, but the workqueue
  1847. * being active prevents entries being deleted /
  1848. * freed.
  1849. */
  1850. mutex_unlock(&acpi_desc->init_mutex);
  1851. ssleep(1);
  1852. tmo--;
  1853. mutex_lock(&acpi_desc->init_mutex);
  1854. continue;
  1855. }
  1856. /* we got some results, but there are more pending... */
  1857. if (rc == -ENOSPC && overflow_retry--) {
  1858. if (!init_ars_len) {
  1859. init_ars_len = acpi_desc->ars_status->length;
  1860. init_ars_start = acpi_desc->ars_status->address;
  1861. }
  1862. rc = ars_continue(acpi_desc);
  1863. }
  1864. if (rc < 0) {
  1865. dev_warn(dev, "range %d ars continuation failed\n",
  1866. spa->range_index);
  1867. break;
  1868. }
  1869. if (init_ars_len) {
  1870. ars_start = init_ars_start;
  1871. ars_len = init_ars_len;
  1872. } else {
  1873. ars_start = acpi_desc->ars_status->address;
  1874. ars_len = acpi_desc->ars_status->length;
  1875. }
  1876. dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n",
  1877. spa->range_index, ars_start, ars_len);
  1878. /* notify the region about new poison entries */
  1879. nvdimm_region_notify(nfit_spa->nd_region,
  1880. NVDIMM_REVALIDATE_POISON);
  1881. break;
  1882. } while (1);
  1883. }
  1884. static void acpi_nfit_scrub(struct work_struct *work)
  1885. {
  1886. struct device *dev;
  1887. u64 init_scrub_length = 0;
  1888. struct nfit_spa *nfit_spa;
  1889. u64 init_scrub_address = 0;
  1890. bool init_ars_done = false;
  1891. struct acpi_nfit_desc *acpi_desc;
  1892. unsigned int tmo = scrub_timeout;
  1893. unsigned int overflow_retry = scrub_overflow_abort;
  1894. acpi_desc = container_of(work, typeof(*acpi_desc), work);
  1895. dev = acpi_desc->dev;
  1896. /*
  1897. * We scrub in 2 phases. The first phase waits for any platform
  1898. * firmware initiated scrubs to complete and then we go search for the
  1899. * affected spa regions to mark them scanned. In the second phase we
  1900. * initiate a directed scrub for every range that was not scrubbed in
  1901. * phase 1.
  1902. */
  1903. /* process platform firmware initiated scrubs */
  1904. retry:
  1905. mutex_lock(&acpi_desc->init_mutex);
  1906. list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
  1907. struct nd_cmd_ars_status *ars_status;
  1908. struct acpi_nfit_system_address *spa;
  1909. u64 ars_start, ars_len;
  1910. int rc;
  1911. if (acpi_desc->cancel)
  1912. break;
  1913. if (nfit_spa->nd_region)
  1914. continue;
  1915. if (init_ars_done) {
  1916. /*
  1917. * No need to re-query, we're now just
  1918. * reconciling all the ranges covered by the
  1919. * initial scrub
  1920. */
  1921. rc = 0;
  1922. } else
  1923. rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
  1924. if (rc == -ENOTTY) {
  1925. /* no ars capability, just register spa and move on */
  1926. acpi_nfit_register_region(acpi_desc, nfit_spa);
  1927. continue;
  1928. }
  1929. if (rc == -EBUSY && !tmo) {
  1930. /* fallthrough to directed scrub in phase 2 */
  1931. dev_warn(dev, "timeout awaiting ars results, continuing...\n");
  1932. break;
  1933. } else if (rc == -EBUSY) {
  1934. mutex_unlock(&acpi_desc->init_mutex);
  1935. ssleep(1);
  1936. tmo--;
  1937. goto retry;
  1938. }
  1939. /* we got some results, but there are more pending... */
  1940. if (rc == -ENOSPC && overflow_retry--) {
  1941. ars_status = acpi_desc->ars_status;
  1942. /*
  1943. * Record the original scrub range, so that we
  1944. * can recall all the ranges impacted by the
  1945. * initial scrub.
  1946. */
  1947. if (!init_scrub_length) {
  1948. init_scrub_length = ars_status->length;
  1949. init_scrub_address = ars_status->address;
  1950. }
  1951. rc = ars_continue(acpi_desc);
  1952. if (rc == 0) {
  1953. mutex_unlock(&acpi_desc->init_mutex);
  1954. goto retry;
  1955. }
  1956. }
  1957. if (rc < 0) {
  1958. /*
  1959. * Initial scrub failed, we'll give it one more
  1960. * try below...
  1961. */
  1962. break;
  1963. }
  1964. /* We got some final results, record completed ranges */
  1965. ars_status = acpi_desc->ars_status;
  1966. if (init_scrub_length) {
  1967. ars_start = init_scrub_address;
  1968. ars_len = ars_start + init_scrub_length;
  1969. } else {
  1970. ars_start = ars_status->address;
  1971. ars_len = ars_status->length;
  1972. }
  1973. spa = nfit_spa->spa;
  1974. if (!init_ars_done) {
  1975. init_ars_done = true;
  1976. dev_dbg(dev, "init scrub %#llx + %#llx complete\n",
  1977. ars_start, ars_len);
  1978. }
  1979. if (ars_start <= spa->address && ars_start + ars_len
  1980. >= spa->address + spa->length)
  1981. acpi_nfit_register_region(acpi_desc, nfit_spa);
  1982. }
  1983. /*
  1984. * For all the ranges not covered by an initial scrub we still
  1985. * want to see if there are errors, but it's ok to discover them
  1986. * asynchronously.
  1987. */
  1988. list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
  1989. /*
  1990. * Flag all the ranges that still need scrubbing, but
  1991. * register them now to make data available.
  1992. */
  1993. if (nfit_spa->nd_region)
  1994. nfit_spa->ars_done = 1;
  1995. else
  1996. acpi_nfit_register_region(acpi_desc, nfit_spa);
  1997. }
  1998. list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
  1999. acpi_nfit_async_scrub(acpi_desc, nfit_spa);
  2000. mutex_unlock(&acpi_desc->init_mutex);
  2001. }
  2002. static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
  2003. {
  2004. struct nfit_spa *nfit_spa;
  2005. int rc;
  2006. list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
  2007. if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) {
  2008. /* BLK regions don't need to wait for ars results */
  2009. rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
  2010. if (rc)
  2011. return rc;
  2012. }
  2013. queue_work(nfit_wq, &acpi_desc->work);
  2014. return 0;
  2015. }
  2016. static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
  2017. struct nfit_table_prev *prev)
  2018. {
  2019. struct device *dev = acpi_desc->dev;
  2020. if (!list_empty(&prev->spas) ||
  2021. !list_empty(&prev->memdevs) ||
  2022. !list_empty(&prev->dcrs) ||
  2023. !list_empty(&prev->bdws) ||
  2024. !list_empty(&prev->idts) ||
  2025. !list_empty(&prev->flushes)) {
  2026. dev_err(dev, "new nfit deletes entries (unsupported)\n");
  2027. return -ENXIO;
  2028. }
  2029. return 0;
  2030. }
  2031. int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
  2032. {
  2033. struct device *dev = acpi_desc->dev;
  2034. struct nfit_table_prev prev;
  2035. const void *end;
  2036. u8 *data;
  2037. int rc;
  2038. mutex_lock(&acpi_desc->init_mutex);
  2039. INIT_LIST_HEAD(&prev.spas);
  2040. INIT_LIST_HEAD(&prev.memdevs);
  2041. INIT_LIST_HEAD(&prev.dcrs);
  2042. INIT_LIST_HEAD(&prev.bdws);
  2043. INIT_LIST_HEAD(&prev.idts);
  2044. INIT_LIST_HEAD(&prev.flushes);
  2045. list_cut_position(&prev.spas, &acpi_desc->spas,
  2046. acpi_desc->spas.prev);
  2047. list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
  2048. acpi_desc->memdevs.prev);
  2049. list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
  2050. acpi_desc->dcrs.prev);
  2051. list_cut_position(&prev.bdws, &acpi_desc->bdws,
  2052. acpi_desc->bdws.prev);
  2053. list_cut_position(&prev.idts, &acpi_desc->idts,
  2054. acpi_desc->idts.prev);
  2055. list_cut_position(&prev.flushes, &acpi_desc->flushes,
  2056. acpi_desc->flushes.prev);
  2057. data = (u8 *) acpi_desc->nfit;
  2058. end = data + sz;
  2059. while (!IS_ERR_OR_NULL(data))
  2060. data = add_table(acpi_desc, &prev, data, end);
  2061. if (IS_ERR(data)) {
  2062. dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__,
  2063. PTR_ERR(data));
  2064. rc = PTR_ERR(data);
  2065. goto out_unlock;
  2066. }
  2067. rc = acpi_nfit_check_deletions(acpi_desc, &prev);
  2068. if (rc)
  2069. goto out_unlock;
  2070. if (nfit_mem_init(acpi_desc) != 0) {
  2071. rc = -ENOMEM;
  2072. goto out_unlock;
  2073. }
  2074. acpi_nfit_init_dsms(acpi_desc);
  2075. rc = acpi_nfit_register_dimms(acpi_desc);
  2076. if (rc)
  2077. goto out_unlock;
  2078. rc = acpi_nfit_register_regions(acpi_desc);
  2079. out_unlock:
  2080. mutex_unlock(&acpi_desc->init_mutex);
  2081. return rc;
  2082. }
  2083. EXPORT_SYMBOL_GPL(acpi_nfit_init);
  2084. struct acpi_nfit_flush_work {
  2085. struct work_struct work;
  2086. struct completion cmp;
  2087. };
  2088. static void flush_probe(struct work_struct *work)
  2089. {
  2090. struct acpi_nfit_flush_work *flush;
  2091. flush = container_of(work, typeof(*flush), work);
  2092. complete(&flush->cmp);
  2093. }
  2094. static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
  2095. {
  2096. struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
  2097. struct device *dev = acpi_desc->dev;
  2098. struct acpi_nfit_flush_work flush;
  2099. /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
  2100. device_lock(dev);
  2101. device_unlock(dev);
  2102. /*
  2103. * Scrub work could take 10s of seconds, userspace may give up so we
  2104. * need to be interruptible while waiting.
  2105. */
  2106. INIT_WORK_ONSTACK(&flush.work, flush_probe);
  2107. COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
  2108. queue_work(nfit_wq, &flush.work);
  2109. return wait_for_completion_interruptible(&flush.cmp);
  2110. }
  2111. static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
  2112. struct nvdimm *nvdimm, unsigned int cmd)
  2113. {
  2114. struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
  2115. if (nvdimm)
  2116. return 0;
  2117. if (cmd != ND_CMD_ARS_START)
  2118. return 0;
  2119. /*
  2120. * The kernel and userspace may race to initiate a scrub, but
  2121. * the scrub thread is prepared to lose that initial race. It
  2122. * just needs guarantees that any ars it initiates are not
  2123. * interrupted by any intervening start reqeusts from userspace.
  2124. */
  2125. if (work_busy(&acpi_desc->work))
  2126. return -EBUSY;
  2127. return 0;
  2128. }
  2129. void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
  2130. {
  2131. struct nvdimm_bus_descriptor *nd_desc;
  2132. dev_set_drvdata(dev, acpi_desc);
  2133. acpi_desc->dev = dev;
  2134. acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
  2135. nd_desc = &acpi_desc->nd_desc;
  2136. nd_desc->provider_name = "ACPI.NFIT";
  2137. nd_desc->ndctl = acpi_nfit_ctl;
  2138. nd_desc->flush_probe = acpi_nfit_flush_probe;
  2139. nd_desc->clear_to_send = acpi_nfit_clear_to_send;
  2140. nd_desc->attr_groups = acpi_nfit_attribute_groups;
  2141. INIT_LIST_HEAD(&acpi_desc->spa_maps);
  2142. INIT_LIST_HEAD(&acpi_desc->spas);
  2143. INIT_LIST_HEAD(&acpi_desc->dcrs);
  2144. INIT_LIST_HEAD(&acpi_desc->bdws);
  2145. INIT_LIST_HEAD(&acpi_desc->idts);
  2146. INIT_LIST_HEAD(&acpi_desc->flushes);
  2147. INIT_LIST_HEAD(&acpi_desc->memdevs);
  2148. INIT_LIST_HEAD(&acpi_desc->dimms);
  2149. mutex_init(&acpi_desc->spa_map_mutex);
  2150. mutex_init(&acpi_desc->init_mutex);
  2151. INIT_WORK(&acpi_desc->work, acpi_nfit_scrub);
  2152. }
  2153. EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
  2154. static int acpi_nfit_add(struct acpi_device *adev)
  2155. {
  2156. struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
  2157. struct acpi_nfit_desc *acpi_desc;
  2158. struct device *dev = &adev->dev;
  2159. struct acpi_table_header *tbl;
  2160. acpi_status status = AE_OK;
  2161. acpi_size sz;
  2162. int rc;
  2163. status = acpi_get_table_with_size(ACPI_SIG_NFIT, 0, &tbl, &sz);
  2164. if (ACPI_FAILURE(status)) {
  2165. /* This is ok, we could have an nvdimm hotplugged later */
  2166. dev_dbg(dev, "failed to find NFIT at startup\n");
  2167. return 0;
  2168. }
  2169. acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
  2170. if (!acpi_desc)
  2171. return -ENOMEM;
  2172. acpi_nfit_desc_init(acpi_desc, &adev->dev);
  2173. acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc);
  2174. if (!acpi_desc->nvdimm_bus)
  2175. return -ENOMEM;
  2176. /*
  2177. * Save the acpi header for later and then skip it,
  2178. * making nfit point to the first nfit table header.
  2179. */
  2180. acpi_desc->acpi_header = *tbl;
  2181. acpi_desc->nfit = (void *) tbl + sizeof(struct acpi_table_nfit);
  2182. sz -= sizeof(struct acpi_table_nfit);
  2183. /* Evaluate _FIT and override with that if present */
  2184. status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
  2185. if (ACPI_SUCCESS(status) && buf.length > 0) {
  2186. union acpi_object *obj;
  2187. /*
  2188. * Adjust for the acpi_object header of the _FIT
  2189. */
  2190. obj = buf.pointer;
  2191. if (obj->type == ACPI_TYPE_BUFFER) {
  2192. acpi_desc->nfit =
  2193. (struct acpi_nfit_header *)obj->buffer.pointer;
  2194. sz = obj->buffer.length;
  2195. } else
  2196. dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
  2197. __func__, (int) obj->type);
  2198. }
  2199. rc = acpi_nfit_init(acpi_desc, sz);
  2200. if (rc) {
  2201. nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
  2202. return rc;
  2203. }
  2204. return 0;
  2205. }
  2206. static int acpi_nfit_remove(struct acpi_device *adev)
  2207. {
  2208. struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
  2209. acpi_desc->cancel = 1;
  2210. flush_workqueue(nfit_wq);
  2211. nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
  2212. return 0;
  2213. }
  2214. static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
  2215. {
  2216. struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
  2217. struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
  2218. struct acpi_nfit_header *nfit_saved;
  2219. union acpi_object *obj;
  2220. struct device *dev = &adev->dev;
  2221. acpi_status status;
  2222. int ret;
  2223. dev_dbg(dev, "%s: event: %d\n", __func__, event);
  2224. device_lock(dev);
  2225. if (!dev->driver) {
  2226. /* dev->driver may be null if we're being removed */
  2227. dev_dbg(dev, "%s: no driver found for dev\n", __func__);
  2228. goto out_unlock;
  2229. }
  2230. if (!acpi_desc) {
  2231. acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
  2232. if (!acpi_desc)
  2233. goto out_unlock;
  2234. acpi_nfit_desc_init(acpi_desc, &adev->dev);
  2235. acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc);
  2236. if (!acpi_desc->nvdimm_bus)
  2237. goto out_unlock;
  2238. } else {
  2239. /*
  2240. * Finish previous registration before considering new
  2241. * regions.
  2242. */
  2243. flush_workqueue(nfit_wq);
  2244. }
  2245. /* Evaluate _FIT */
  2246. status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
  2247. if (ACPI_FAILURE(status)) {
  2248. dev_err(dev, "failed to evaluate _FIT\n");
  2249. goto out_unlock;
  2250. }
  2251. nfit_saved = acpi_desc->nfit;
  2252. obj = buf.pointer;
  2253. if (obj->type == ACPI_TYPE_BUFFER) {
  2254. acpi_desc->nfit =
  2255. (struct acpi_nfit_header *)obj->buffer.pointer;
  2256. ret = acpi_nfit_init(acpi_desc, obj->buffer.length);
  2257. if (ret) {
  2258. /* Merge failed, restore old nfit, and exit */
  2259. acpi_desc->nfit = nfit_saved;
  2260. dev_err(dev, "failed to merge updated NFIT\n");
  2261. }
  2262. } else {
  2263. /* Bad _FIT, restore old nfit */
  2264. dev_err(dev, "Invalid _FIT\n");
  2265. }
  2266. kfree(buf.pointer);
  2267. out_unlock:
  2268. device_unlock(dev);
  2269. }
  2270. static const struct acpi_device_id acpi_nfit_ids[] = {
  2271. { "ACPI0012", 0 },
  2272. { "", 0 },
  2273. };
  2274. MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
  2275. static struct acpi_driver acpi_nfit_driver = {
  2276. .name = KBUILD_MODNAME,
  2277. .ids = acpi_nfit_ids,
  2278. .ops = {
  2279. .add = acpi_nfit_add,
  2280. .remove = acpi_nfit_remove,
  2281. .notify = acpi_nfit_notify,
  2282. },
  2283. };
  2284. static __init int nfit_init(void)
  2285. {
  2286. BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
  2287. BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
  2288. BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
  2289. BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
  2290. BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
  2291. BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
  2292. BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
  2293. acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]);
  2294. acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]);
  2295. acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]);
  2296. acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]);
  2297. acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]);
  2298. acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]);
  2299. acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]);
  2300. acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]);
  2301. acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
  2302. acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
  2303. acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1, nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
  2304. acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2, nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
  2305. nfit_wq = create_singlethread_workqueue("nfit");
  2306. if (!nfit_wq)
  2307. return -ENOMEM;
  2308. return acpi_bus_register_driver(&acpi_nfit_driver);
  2309. }
  2310. static __exit void nfit_exit(void)
  2311. {
  2312. acpi_bus_unregister_driver(&acpi_nfit_driver);
  2313. destroy_workqueue(nfit_wq);
  2314. }
  2315. module_init(nfit_init);
  2316. module_exit(nfit_exit);
  2317. MODULE_LICENSE("GPL v2");
  2318. MODULE_AUTHOR("Intel Corporation");