nfit.c 55 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962
  1. /*
  2. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/platform_device.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/workqueue.h>
  17. #include <linux/libnvdimm.h>
  18. #include <linux/vmalloc.h>
  19. #include <linux/device.h>
  20. #include <linux/module.h>
  21. #include <linux/mutex.h>
  22. #include <linux/ndctl.h>
  23. #include <linux/sizes.h>
  24. #include <linux/list.h>
  25. #include <linux/slab.h>
  26. #include <nd-core.h>
  27. #include <nfit.h>
  28. #include <nd.h>
  29. #include "nfit_test.h"
  30. /*
  31. * Generate an NFIT table to describe the following topology:
  32. *
  33. * BUS0: Interleaved PMEM regions, and aliasing with BLK regions
  34. *
  35. * (a) (b) DIMM BLK-REGION
  36. * +----------+--------------+----------+---------+
  37. * +------+ | blk2.0 | pm0.0 | blk2.1 | pm1.0 | 0 region2
  38. * | imc0 +--+- - - - - region0 - - - -+----------+ +
  39. * +--+---+ | blk3.0 | pm0.0 | blk3.1 | pm1.0 | 1 region3
  40. * | +----------+--------------v----------v v
  41. * +--+---+ | |
  42. * | cpu0 | region1
  43. * +--+---+ | |
  44. * | +-------------------------^----------^ ^
  45. * +--+---+ | blk4.0 | pm1.0 | 2 region4
  46. * | imc1 +--+-------------------------+----------+ +
  47. * +------+ | blk5.0 | pm1.0 | 3 region5
  48. * +-------------------------+----------+-+-------+
  49. *
  50. * +--+---+
  51. * | cpu1 |
  52. * +--+---+ (Hotplug DIMM)
  53. * | +----------------------------------------------+
  54. * +--+---+ | blk6.0/pm7.0 | 4 region6/7
  55. * | imc0 +--+----------------------------------------------+
  56. * +------+
  57. *
  58. *
  59. * *) In this layout we have four dimms and two memory controllers in one
  60. * socket. Each unique interface (BLK or PMEM) to DPA space
  61. * is identified by a region device with a dynamically assigned id.
  62. *
  63. * *) The first portion of dimm0 and dimm1 are interleaved as REGION0.
  64. * A single PMEM namespace "pm0.0" is created using half of the
  65. * REGION0 SPA-range. REGION0 spans dimm0 and dimm1. PMEM namespace
  66. * allocate from from the bottom of a region. The unallocated
  67. * portion of REGION0 aliases with REGION2 and REGION3. That
  68. * unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and
  69. * "blk3.0") starting at the base of each DIMM to offset (a) in those
  70. * DIMMs. "pm0.0", "blk2.0" and "blk3.0" are free-form readable
  71. * names that can be assigned to a namespace.
  72. *
  73. * *) In the last portion of dimm0 and dimm1 we have an interleaved
  74. * SPA range, REGION1, that spans those two dimms as well as dimm2
  75. * and dimm3. Some of REGION1 allocated to a PMEM namespace named
  76. * "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each
  77. * dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and
  78. * "blk5.0".
  79. *
  80. * *) The portion of dimm2 and dimm3 that do not participate in the
  81. * REGION1 interleaved SPA range (i.e. the DPA address below offset
  82. * (b) are also included in the "blk4.0" and "blk5.0" namespaces.
  83. * Note, that BLK namespaces need not be contiguous in DPA-space, and
  84. * can consume aliased capacity from multiple interleave sets.
  85. *
  86. * BUS1: Legacy NVDIMM (single contiguous range)
  87. *
  88. * region2
  89. * +---------------------+
  90. * |---------------------|
  91. * || pm2.0 ||
  92. * |---------------------|
  93. * +---------------------+
  94. *
  95. * *) A NFIT-table may describe a simple system-physical-address range
  96. * with no BLK aliasing. This type of region may optionally
  97. * reference an NVDIMM.
  98. */
  99. enum {
  100. NUM_PM = 3,
  101. NUM_DCR = 5,
  102. NUM_HINTS = 8,
  103. NUM_BDW = NUM_DCR,
  104. NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW,
  105. NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */ + 4 /* spa1 iset */,
  106. DIMM_SIZE = SZ_32M,
  107. LABEL_SIZE = SZ_128K,
  108. SPA_VCD_SIZE = SZ_4M,
  109. SPA0_SIZE = DIMM_SIZE,
  110. SPA1_SIZE = DIMM_SIZE*2,
  111. SPA2_SIZE = DIMM_SIZE,
  112. BDW_SIZE = 64 << 8,
  113. DCR_SIZE = 12,
  114. NUM_NFITS = 2, /* permit testing multiple NFITs per system */
  115. };
  116. struct nfit_test_dcr {
  117. __le64 bdw_addr;
  118. __le32 bdw_status;
  119. __u8 aperature[BDW_SIZE];
  120. };
  121. #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
  122. (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
  123. | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
  124. static u32 handle[] = {
  125. [0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
  126. [1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
  127. [2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
  128. [3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
  129. [4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
  130. [5] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 0),
  131. };
  132. static unsigned long dimm_fail_cmd_flags[NUM_DCR];
  133. struct nfit_test {
  134. struct acpi_nfit_desc acpi_desc;
  135. struct platform_device pdev;
  136. struct list_head resources;
  137. void *nfit_buf;
  138. dma_addr_t nfit_dma;
  139. size_t nfit_size;
  140. int dcr_idx;
  141. int num_dcr;
  142. int num_pm;
  143. void **dimm;
  144. dma_addr_t *dimm_dma;
  145. void **flush;
  146. dma_addr_t *flush_dma;
  147. void **label;
  148. dma_addr_t *label_dma;
  149. void **spa_set;
  150. dma_addr_t *spa_set_dma;
  151. struct nfit_test_dcr **dcr;
  152. dma_addr_t *dcr_dma;
  153. int (*alloc)(struct nfit_test *t);
  154. void (*setup)(struct nfit_test *t);
  155. int setup_hotplug;
  156. union acpi_object **_fit;
  157. dma_addr_t _fit_dma;
  158. struct ars_state {
  159. struct nd_cmd_ars_status *ars_status;
  160. unsigned long deadline;
  161. spinlock_t lock;
  162. } ars_state;
  163. struct device *dimm_dev[NUM_DCR];
  164. };
  165. static struct nfit_test *to_nfit_test(struct device *dev)
  166. {
  167. struct platform_device *pdev = to_platform_device(dev);
  168. return container_of(pdev, struct nfit_test, pdev);
  169. }
  170. static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size *nd_cmd,
  171. unsigned int buf_len)
  172. {
  173. if (buf_len < sizeof(*nd_cmd))
  174. return -EINVAL;
  175. nd_cmd->status = 0;
  176. nd_cmd->config_size = LABEL_SIZE;
  177. nd_cmd->max_xfer = SZ_4K;
  178. return 0;
  179. }
  180. static int nfit_test_cmd_get_config_data(struct nd_cmd_get_config_data_hdr
  181. *nd_cmd, unsigned int buf_len, void *label)
  182. {
  183. unsigned int len, offset = nd_cmd->in_offset;
  184. int rc;
  185. if (buf_len < sizeof(*nd_cmd))
  186. return -EINVAL;
  187. if (offset >= LABEL_SIZE)
  188. return -EINVAL;
  189. if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len)
  190. return -EINVAL;
  191. nd_cmd->status = 0;
  192. len = min(nd_cmd->in_length, LABEL_SIZE - offset);
  193. memcpy(nd_cmd->out_buf, label + offset, len);
  194. rc = buf_len - sizeof(*nd_cmd) - len;
  195. return rc;
  196. }
  197. static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd,
  198. unsigned int buf_len, void *label)
  199. {
  200. unsigned int len, offset = nd_cmd->in_offset;
  201. u32 *status;
  202. int rc;
  203. if (buf_len < sizeof(*nd_cmd))
  204. return -EINVAL;
  205. if (offset >= LABEL_SIZE)
  206. return -EINVAL;
  207. if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len)
  208. return -EINVAL;
  209. status = (void *)nd_cmd + nd_cmd->in_length + sizeof(*nd_cmd);
  210. *status = 0;
  211. len = min(nd_cmd->in_length, LABEL_SIZE - offset);
  212. memcpy(label + offset, nd_cmd->in_buf, len);
  213. rc = buf_len - sizeof(*nd_cmd) - (len + 4);
  214. return rc;
  215. }
  216. #define NFIT_TEST_ARS_RECORDS 4
  217. #define NFIT_TEST_CLEAR_ERR_UNIT 256
  218. static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd,
  219. unsigned int buf_len)
  220. {
  221. if (buf_len < sizeof(*nd_cmd))
  222. return -EINVAL;
  223. nd_cmd->max_ars_out = sizeof(struct nd_cmd_ars_status)
  224. + NFIT_TEST_ARS_RECORDS * sizeof(struct nd_ars_record);
  225. nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16;
  226. nd_cmd->clear_err_unit = NFIT_TEST_CLEAR_ERR_UNIT;
  227. return 0;
  228. }
  229. /*
  230. * Initialize the ars_state to return an ars_result 1 second in the future with
  231. * a 4K error range in the middle of the requested address range.
  232. */
  233. static void post_ars_status(struct ars_state *ars_state, u64 addr, u64 len)
  234. {
  235. struct nd_cmd_ars_status *ars_status;
  236. struct nd_ars_record *ars_record;
  237. ars_state->deadline = jiffies + 1*HZ;
  238. ars_status = ars_state->ars_status;
  239. ars_status->status = 0;
  240. ars_status->out_length = sizeof(struct nd_cmd_ars_status)
  241. + sizeof(struct nd_ars_record);
  242. ars_status->address = addr;
  243. ars_status->length = len;
  244. ars_status->type = ND_ARS_PERSISTENT;
  245. ars_status->num_records = 1;
  246. ars_record = &ars_status->records[0];
  247. ars_record->handle = 0;
  248. ars_record->err_address = addr + len / 2;
  249. ars_record->length = SZ_4K;
  250. }
  251. static int nfit_test_cmd_ars_start(struct ars_state *ars_state,
  252. struct nd_cmd_ars_start *ars_start, unsigned int buf_len,
  253. int *cmd_rc)
  254. {
  255. if (buf_len < sizeof(*ars_start))
  256. return -EINVAL;
  257. spin_lock(&ars_state->lock);
  258. if (time_before(jiffies, ars_state->deadline)) {
  259. ars_start->status = NFIT_ARS_START_BUSY;
  260. *cmd_rc = -EBUSY;
  261. } else {
  262. ars_start->status = 0;
  263. ars_start->scrub_time = 1;
  264. post_ars_status(ars_state, ars_start->address,
  265. ars_start->length);
  266. *cmd_rc = 0;
  267. }
  268. spin_unlock(&ars_state->lock);
  269. return 0;
  270. }
  271. static int nfit_test_cmd_ars_status(struct ars_state *ars_state,
  272. struct nd_cmd_ars_status *ars_status, unsigned int buf_len,
  273. int *cmd_rc)
  274. {
  275. if (buf_len < ars_state->ars_status->out_length)
  276. return -EINVAL;
  277. spin_lock(&ars_state->lock);
  278. if (time_before(jiffies, ars_state->deadline)) {
  279. memset(ars_status, 0, buf_len);
  280. ars_status->status = NFIT_ARS_STATUS_BUSY;
  281. ars_status->out_length = sizeof(*ars_status);
  282. *cmd_rc = -EBUSY;
  283. } else {
  284. memcpy(ars_status, ars_state->ars_status,
  285. ars_state->ars_status->out_length);
  286. *cmd_rc = 0;
  287. }
  288. spin_unlock(&ars_state->lock);
  289. return 0;
  290. }
  291. static int nfit_test_cmd_clear_error(struct nd_cmd_clear_error *clear_err,
  292. unsigned int buf_len, int *cmd_rc)
  293. {
  294. const u64 mask = NFIT_TEST_CLEAR_ERR_UNIT - 1;
  295. if (buf_len < sizeof(*clear_err))
  296. return -EINVAL;
  297. if ((clear_err->address & mask) || (clear_err->length & mask))
  298. return -EINVAL;
  299. /*
  300. * Report 'all clear' success for all commands even though a new
  301. * scrub will find errors again. This is enough to have the
  302. * error removed from the 'badblocks' tracking in the pmem
  303. * driver.
  304. */
  305. clear_err->status = 0;
  306. clear_err->cleared = clear_err->length;
  307. *cmd_rc = 0;
  308. return 0;
  309. }
  310. static int nfit_test_cmd_smart(struct nd_cmd_smart *smart, unsigned int buf_len)
  311. {
  312. static const struct nd_smart_payload smart_data = {
  313. .flags = ND_SMART_HEALTH_VALID | ND_SMART_TEMP_VALID
  314. | ND_SMART_SPARES_VALID | ND_SMART_ALARM_VALID
  315. | ND_SMART_USED_VALID | ND_SMART_SHUTDOWN_VALID,
  316. .health = ND_SMART_NON_CRITICAL_HEALTH,
  317. .temperature = 23 * 16,
  318. .spares = 75,
  319. .alarm_flags = ND_SMART_SPARE_TRIP | ND_SMART_TEMP_TRIP,
  320. .life_used = 5,
  321. .shutdown_state = 0,
  322. .vendor_size = 0,
  323. };
  324. if (buf_len < sizeof(*smart))
  325. return -EINVAL;
  326. memcpy(smart->data, &smart_data, sizeof(smart_data));
  327. return 0;
  328. }
  329. static int nfit_test_cmd_smart_threshold(struct nd_cmd_smart_threshold *smart_t,
  330. unsigned int buf_len)
  331. {
  332. static const struct nd_smart_threshold_payload smart_t_data = {
  333. .alarm_control = ND_SMART_SPARE_TRIP | ND_SMART_TEMP_TRIP,
  334. .temperature = 40 * 16,
  335. .spares = 5,
  336. };
  337. if (buf_len < sizeof(*smart_t))
  338. return -EINVAL;
  339. memcpy(smart_t->data, &smart_t_data, sizeof(smart_t_data));
  340. return 0;
  341. }
  342. static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
  343. struct nvdimm *nvdimm, unsigned int cmd, void *buf,
  344. unsigned int buf_len, int *cmd_rc)
  345. {
  346. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  347. struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc);
  348. unsigned int func = cmd;
  349. int i, rc = 0, __cmd_rc;
  350. if (!cmd_rc)
  351. cmd_rc = &__cmd_rc;
  352. *cmd_rc = 0;
  353. if (nvdimm) {
  354. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  355. unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
  356. if (!nfit_mem)
  357. return -ENOTTY;
  358. if (cmd == ND_CMD_CALL) {
  359. struct nd_cmd_pkg *call_pkg = buf;
  360. buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out;
  361. buf = (void *) call_pkg->nd_payload;
  362. func = call_pkg->nd_command;
  363. if (call_pkg->nd_family != nfit_mem->family)
  364. return -ENOTTY;
  365. }
  366. if (!test_bit(cmd, &cmd_mask)
  367. || !test_bit(func, &nfit_mem->dsm_mask))
  368. return -ENOTTY;
  369. /* lookup label space for the given dimm */
  370. for (i = 0; i < ARRAY_SIZE(handle); i++)
  371. if (__to_nfit_memdev(nfit_mem)->device_handle ==
  372. handle[i])
  373. break;
  374. if (i >= ARRAY_SIZE(handle))
  375. return -ENXIO;
  376. if ((1 << func) & dimm_fail_cmd_flags[i])
  377. return -EIO;
  378. switch (func) {
  379. case ND_CMD_GET_CONFIG_SIZE:
  380. rc = nfit_test_cmd_get_config_size(buf, buf_len);
  381. break;
  382. case ND_CMD_GET_CONFIG_DATA:
  383. rc = nfit_test_cmd_get_config_data(buf, buf_len,
  384. t->label[i - t->dcr_idx]);
  385. break;
  386. case ND_CMD_SET_CONFIG_DATA:
  387. rc = nfit_test_cmd_set_config_data(buf, buf_len,
  388. t->label[i - t->dcr_idx]);
  389. break;
  390. case ND_CMD_SMART:
  391. rc = nfit_test_cmd_smart(buf, buf_len);
  392. break;
  393. case ND_CMD_SMART_THRESHOLD:
  394. rc = nfit_test_cmd_smart_threshold(buf, buf_len);
  395. device_lock(&t->pdev.dev);
  396. __acpi_nvdimm_notify(t->dimm_dev[i], 0x81);
  397. device_unlock(&t->pdev.dev);
  398. break;
  399. default:
  400. return -ENOTTY;
  401. }
  402. } else {
  403. struct ars_state *ars_state = &t->ars_state;
  404. if (!nd_desc || !test_bit(cmd, &nd_desc->cmd_mask))
  405. return -ENOTTY;
  406. switch (func) {
  407. case ND_CMD_ARS_CAP:
  408. rc = nfit_test_cmd_ars_cap(buf, buf_len);
  409. break;
  410. case ND_CMD_ARS_START:
  411. rc = nfit_test_cmd_ars_start(ars_state, buf, buf_len,
  412. cmd_rc);
  413. break;
  414. case ND_CMD_ARS_STATUS:
  415. rc = nfit_test_cmd_ars_status(ars_state, buf, buf_len,
  416. cmd_rc);
  417. break;
  418. case ND_CMD_CLEAR_ERROR:
  419. rc = nfit_test_cmd_clear_error(buf, buf_len, cmd_rc);
  420. break;
  421. default:
  422. return -ENOTTY;
  423. }
  424. }
  425. return rc;
  426. }
  427. static DEFINE_SPINLOCK(nfit_test_lock);
  428. static struct nfit_test *instances[NUM_NFITS];
  429. static void release_nfit_res(void *data)
  430. {
  431. struct nfit_test_resource *nfit_res = data;
  432. spin_lock(&nfit_test_lock);
  433. list_del(&nfit_res->list);
  434. spin_unlock(&nfit_test_lock);
  435. vfree(nfit_res->buf);
  436. kfree(nfit_res);
  437. }
  438. static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
  439. void *buf)
  440. {
  441. struct device *dev = &t->pdev.dev;
  442. struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res),
  443. GFP_KERNEL);
  444. int rc;
  445. if (!buf || !nfit_res)
  446. goto err;
  447. rc = devm_add_action(dev, release_nfit_res, nfit_res);
  448. if (rc)
  449. goto err;
  450. INIT_LIST_HEAD(&nfit_res->list);
  451. memset(buf, 0, size);
  452. nfit_res->dev = dev;
  453. nfit_res->buf = buf;
  454. nfit_res->res.start = *dma;
  455. nfit_res->res.end = *dma + size - 1;
  456. nfit_res->res.name = "NFIT";
  457. spin_lock_init(&nfit_res->lock);
  458. INIT_LIST_HEAD(&nfit_res->requests);
  459. spin_lock(&nfit_test_lock);
  460. list_add(&nfit_res->list, &t->resources);
  461. spin_unlock(&nfit_test_lock);
  462. return nfit_res->buf;
  463. err:
  464. if (buf)
  465. vfree(buf);
  466. kfree(nfit_res);
  467. return NULL;
  468. }
  469. static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma)
  470. {
  471. void *buf = vmalloc(size);
  472. *dma = (unsigned long) buf;
  473. return __test_alloc(t, size, dma, buf);
  474. }
  475. static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr)
  476. {
  477. int i;
  478. for (i = 0; i < ARRAY_SIZE(instances); i++) {
  479. struct nfit_test_resource *n, *nfit_res = NULL;
  480. struct nfit_test *t = instances[i];
  481. if (!t)
  482. continue;
  483. spin_lock(&nfit_test_lock);
  484. list_for_each_entry(n, &t->resources, list) {
  485. if (addr >= n->res.start && (addr < n->res.start
  486. + resource_size(&n->res))) {
  487. nfit_res = n;
  488. break;
  489. } else if (addr >= (unsigned long) n->buf
  490. && (addr < (unsigned long) n->buf
  491. + resource_size(&n->res))) {
  492. nfit_res = n;
  493. break;
  494. }
  495. }
  496. spin_unlock(&nfit_test_lock);
  497. if (nfit_res)
  498. return nfit_res;
  499. }
  500. return NULL;
  501. }
  502. static int ars_state_init(struct device *dev, struct ars_state *ars_state)
  503. {
  504. ars_state->ars_status = devm_kzalloc(dev,
  505. sizeof(struct nd_cmd_ars_status)
  506. + sizeof(struct nd_ars_record) * NFIT_TEST_ARS_RECORDS,
  507. GFP_KERNEL);
  508. if (!ars_state->ars_status)
  509. return -ENOMEM;
  510. spin_lock_init(&ars_state->lock);
  511. return 0;
  512. }
  513. static void put_dimms(void *data)
  514. {
  515. struct device **dimm_dev = data;
  516. int i;
  517. for (i = 0; i < NUM_DCR; i++)
  518. if (dimm_dev[i])
  519. device_unregister(dimm_dev[i]);
  520. }
  521. static struct class *nfit_test_dimm;
  522. static int dimm_name_to_id(struct device *dev)
  523. {
  524. int dimm;
  525. if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1
  526. || dimm >= NUM_DCR || dimm < 0)
  527. return -ENXIO;
  528. return dimm;
  529. }
  530. static ssize_t handle_show(struct device *dev, struct device_attribute *attr,
  531. char *buf)
  532. {
  533. int dimm = dimm_name_to_id(dev);
  534. if (dimm < 0)
  535. return dimm;
  536. return sprintf(buf, "%#x", handle[dimm]);
  537. }
  538. DEVICE_ATTR_RO(handle);
  539. static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr,
  540. char *buf)
  541. {
  542. int dimm = dimm_name_to_id(dev);
  543. if (dimm < 0)
  544. return dimm;
  545. return sprintf(buf, "%#lx\n", dimm_fail_cmd_flags[dimm]);
  546. }
  547. static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr,
  548. const char *buf, size_t size)
  549. {
  550. int dimm = dimm_name_to_id(dev);
  551. unsigned long val;
  552. ssize_t rc;
  553. if (dimm < 0)
  554. return dimm;
  555. rc = kstrtol(buf, 0, &val);
  556. if (rc)
  557. return rc;
  558. dimm_fail_cmd_flags[dimm] = val;
  559. return size;
  560. }
  561. static DEVICE_ATTR_RW(fail_cmd);
  562. static struct attribute *nfit_test_dimm_attributes[] = {
  563. &dev_attr_fail_cmd.attr,
  564. &dev_attr_handle.attr,
  565. NULL,
  566. };
  567. static struct attribute_group nfit_test_dimm_attribute_group = {
  568. .attrs = nfit_test_dimm_attributes,
  569. };
  570. static const struct attribute_group *nfit_test_dimm_attribute_groups[] = {
  571. &nfit_test_dimm_attribute_group,
  572. NULL,
  573. };
  574. static int nfit_test0_alloc(struct nfit_test *t)
  575. {
  576. size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA
  577. + sizeof(struct acpi_nfit_memory_map) * NUM_MEM
  578. + sizeof(struct acpi_nfit_control_region) * NUM_DCR
  579. + offsetof(struct acpi_nfit_control_region,
  580. window_size) * NUM_DCR
  581. + sizeof(struct acpi_nfit_data_region) * NUM_BDW
  582. + (sizeof(struct acpi_nfit_flush_address)
  583. + sizeof(u64) * NUM_HINTS) * NUM_DCR;
  584. int i;
  585. t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
  586. if (!t->nfit_buf)
  587. return -ENOMEM;
  588. t->nfit_size = nfit_size;
  589. t->spa_set[0] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[0]);
  590. if (!t->spa_set[0])
  591. return -ENOMEM;
  592. t->spa_set[1] = test_alloc(t, SPA1_SIZE, &t->spa_set_dma[1]);
  593. if (!t->spa_set[1])
  594. return -ENOMEM;
  595. t->spa_set[2] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[2]);
  596. if (!t->spa_set[2])
  597. return -ENOMEM;
  598. for (i = 0; i < t->num_dcr; i++) {
  599. t->dimm[i] = test_alloc(t, DIMM_SIZE, &t->dimm_dma[i]);
  600. if (!t->dimm[i])
  601. return -ENOMEM;
  602. t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
  603. if (!t->label[i])
  604. return -ENOMEM;
  605. sprintf(t->label[i], "label%d", i);
  606. t->flush[i] = test_alloc(t, max(PAGE_SIZE,
  607. sizeof(u64) * NUM_HINTS),
  608. &t->flush_dma[i]);
  609. if (!t->flush[i])
  610. return -ENOMEM;
  611. }
  612. for (i = 0; i < t->num_dcr; i++) {
  613. t->dcr[i] = test_alloc(t, LABEL_SIZE, &t->dcr_dma[i]);
  614. if (!t->dcr[i])
  615. return -ENOMEM;
  616. }
  617. t->_fit = test_alloc(t, sizeof(union acpi_object **), &t->_fit_dma);
  618. if (!t->_fit)
  619. return -ENOMEM;
  620. if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t->dimm_dev))
  621. return -ENOMEM;
  622. for (i = 0; i < NUM_DCR; i++) {
  623. t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm,
  624. &t->pdev.dev, 0, NULL,
  625. nfit_test_dimm_attribute_groups,
  626. "test_dimm%d", i);
  627. if (!t->dimm_dev[i])
  628. return -ENOMEM;
  629. }
  630. return ars_state_init(&t->pdev.dev, &t->ars_state);
  631. }
  632. static int nfit_test1_alloc(struct nfit_test *t)
  633. {
  634. size_t nfit_size = sizeof(struct acpi_nfit_system_address) * 2
  635. + sizeof(struct acpi_nfit_memory_map)
  636. + offsetof(struct acpi_nfit_control_region, window_size);
  637. int i;
  638. t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
  639. if (!t->nfit_buf)
  640. return -ENOMEM;
  641. t->nfit_size = nfit_size;
  642. t->spa_set[0] = test_alloc(t, SPA2_SIZE, &t->spa_set_dma[0]);
  643. if (!t->spa_set[0])
  644. return -ENOMEM;
  645. for (i = 0; i < t->num_dcr; i++) {
  646. t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
  647. if (!t->label[i])
  648. return -ENOMEM;
  649. sprintf(t->label[i], "label%d", i);
  650. }
  651. t->spa_set[1] = test_alloc(t, SPA_VCD_SIZE, &t->spa_set_dma[1]);
  652. if (!t->spa_set[1])
  653. return -ENOMEM;
  654. return ars_state_init(&t->pdev.dev, &t->ars_state);
  655. }
  656. static void dcr_common_init(struct acpi_nfit_control_region *dcr)
  657. {
  658. dcr->vendor_id = 0xabcd;
  659. dcr->device_id = 0;
  660. dcr->revision_id = 1;
  661. dcr->valid_fields = 1;
  662. dcr->manufacturing_location = 0xa;
  663. dcr->manufacturing_date = cpu_to_be16(2016);
  664. }
  665. static void nfit_test0_setup(struct nfit_test *t)
  666. {
  667. const int flush_hint_size = sizeof(struct acpi_nfit_flush_address)
  668. + (sizeof(u64) * NUM_HINTS);
  669. struct acpi_nfit_desc *acpi_desc;
  670. struct acpi_nfit_memory_map *memdev;
  671. void *nfit_buf = t->nfit_buf;
  672. struct acpi_nfit_system_address *spa;
  673. struct acpi_nfit_control_region *dcr;
  674. struct acpi_nfit_data_region *bdw;
  675. struct acpi_nfit_flush_address *flush;
  676. unsigned int offset, i;
  677. /*
  678. * spa0 (interleave first half of dimm0 and dimm1, note storage
  679. * does not actually alias the related block-data-window
  680. * regions)
  681. */
  682. spa = nfit_buf;
  683. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  684. spa->header.length = sizeof(*spa);
  685. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  686. spa->range_index = 0+1;
  687. spa->address = t->spa_set_dma[0];
  688. spa->length = SPA0_SIZE;
  689. /*
  690. * spa1 (interleave last half of the 4 DIMMS, note storage
  691. * does not actually alias the related block-data-window
  692. * regions)
  693. */
  694. spa = nfit_buf + sizeof(*spa);
  695. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  696. spa->header.length = sizeof(*spa);
  697. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  698. spa->range_index = 1+1;
  699. spa->address = t->spa_set_dma[1];
  700. spa->length = SPA1_SIZE;
  701. /* spa2 (dcr0) dimm0 */
  702. spa = nfit_buf + sizeof(*spa) * 2;
  703. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  704. spa->header.length = sizeof(*spa);
  705. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  706. spa->range_index = 2+1;
  707. spa->address = t->dcr_dma[0];
  708. spa->length = DCR_SIZE;
  709. /* spa3 (dcr1) dimm1 */
  710. spa = nfit_buf + sizeof(*spa) * 3;
  711. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  712. spa->header.length = sizeof(*spa);
  713. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  714. spa->range_index = 3+1;
  715. spa->address = t->dcr_dma[1];
  716. spa->length = DCR_SIZE;
  717. /* spa4 (dcr2) dimm2 */
  718. spa = nfit_buf + sizeof(*spa) * 4;
  719. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  720. spa->header.length = sizeof(*spa);
  721. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  722. spa->range_index = 4+1;
  723. spa->address = t->dcr_dma[2];
  724. spa->length = DCR_SIZE;
  725. /* spa5 (dcr3) dimm3 */
  726. spa = nfit_buf + sizeof(*spa) * 5;
  727. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  728. spa->header.length = sizeof(*spa);
  729. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  730. spa->range_index = 5+1;
  731. spa->address = t->dcr_dma[3];
  732. spa->length = DCR_SIZE;
  733. /* spa6 (bdw for dcr0) dimm0 */
  734. spa = nfit_buf + sizeof(*spa) * 6;
  735. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  736. spa->header.length = sizeof(*spa);
  737. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  738. spa->range_index = 6+1;
  739. spa->address = t->dimm_dma[0];
  740. spa->length = DIMM_SIZE;
  741. /* spa7 (bdw for dcr1) dimm1 */
  742. spa = nfit_buf + sizeof(*spa) * 7;
  743. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  744. spa->header.length = sizeof(*spa);
  745. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  746. spa->range_index = 7+1;
  747. spa->address = t->dimm_dma[1];
  748. spa->length = DIMM_SIZE;
  749. /* spa8 (bdw for dcr2) dimm2 */
  750. spa = nfit_buf + sizeof(*spa) * 8;
  751. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  752. spa->header.length = sizeof(*spa);
  753. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  754. spa->range_index = 8+1;
  755. spa->address = t->dimm_dma[2];
  756. spa->length = DIMM_SIZE;
  757. /* spa9 (bdw for dcr3) dimm3 */
  758. spa = nfit_buf + sizeof(*spa) * 9;
  759. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  760. spa->header.length = sizeof(*spa);
  761. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  762. spa->range_index = 9+1;
  763. spa->address = t->dimm_dma[3];
  764. spa->length = DIMM_SIZE;
  765. offset = sizeof(*spa) * 10;
  766. /* mem-region0 (spa0, dimm0) */
  767. memdev = nfit_buf + offset;
  768. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  769. memdev->header.length = sizeof(*memdev);
  770. memdev->device_handle = handle[0];
  771. memdev->physical_id = 0;
  772. memdev->region_id = 0;
  773. memdev->range_index = 0+1;
  774. memdev->region_index = 4+1;
  775. memdev->region_size = SPA0_SIZE/2;
  776. memdev->region_offset = t->spa_set_dma[0];
  777. memdev->address = 0;
  778. memdev->interleave_index = 0;
  779. memdev->interleave_ways = 2;
  780. /* mem-region1 (spa0, dimm1) */
  781. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map);
  782. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  783. memdev->header.length = sizeof(*memdev);
  784. memdev->device_handle = handle[1];
  785. memdev->physical_id = 1;
  786. memdev->region_id = 0;
  787. memdev->range_index = 0+1;
  788. memdev->region_index = 5+1;
  789. memdev->region_size = SPA0_SIZE/2;
  790. memdev->region_offset = t->spa_set_dma[0] + SPA0_SIZE/2;
  791. memdev->address = 0;
  792. memdev->interleave_index = 0;
  793. memdev->interleave_ways = 2;
  794. /* mem-region2 (spa1, dimm0) */
  795. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 2;
  796. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  797. memdev->header.length = sizeof(*memdev);
  798. memdev->device_handle = handle[0];
  799. memdev->physical_id = 0;
  800. memdev->region_id = 1;
  801. memdev->range_index = 1+1;
  802. memdev->region_index = 4+1;
  803. memdev->region_size = SPA1_SIZE/4;
  804. memdev->region_offset = t->spa_set_dma[1];
  805. memdev->address = SPA0_SIZE/2;
  806. memdev->interleave_index = 0;
  807. memdev->interleave_ways = 4;
  808. /* mem-region3 (spa1, dimm1) */
  809. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 3;
  810. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  811. memdev->header.length = sizeof(*memdev);
  812. memdev->device_handle = handle[1];
  813. memdev->physical_id = 1;
  814. memdev->region_id = 1;
  815. memdev->range_index = 1+1;
  816. memdev->region_index = 5+1;
  817. memdev->region_size = SPA1_SIZE/4;
  818. memdev->region_offset = t->spa_set_dma[1] + SPA1_SIZE/4;
  819. memdev->address = SPA0_SIZE/2;
  820. memdev->interleave_index = 0;
  821. memdev->interleave_ways = 4;
  822. /* mem-region4 (spa1, dimm2) */
  823. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 4;
  824. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  825. memdev->header.length = sizeof(*memdev);
  826. memdev->device_handle = handle[2];
  827. memdev->physical_id = 2;
  828. memdev->region_id = 0;
  829. memdev->range_index = 1+1;
  830. memdev->region_index = 6+1;
  831. memdev->region_size = SPA1_SIZE/4;
  832. memdev->region_offset = t->spa_set_dma[1] + 2*SPA1_SIZE/4;
  833. memdev->address = SPA0_SIZE/2;
  834. memdev->interleave_index = 0;
  835. memdev->interleave_ways = 4;
  836. /* mem-region5 (spa1, dimm3) */
  837. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 5;
  838. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  839. memdev->header.length = sizeof(*memdev);
  840. memdev->device_handle = handle[3];
  841. memdev->physical_id = 3;
  842. memdev->region_id = 0;
  843. memdev->range_index = 1+1;
  844. memdev->region_index = 7+1;
  845. memdev->region_size = SPA1_SIZE/4;
  846. memdev->region_offset = t->spa_set_dma[1] + 3*SPA1_SIZE/4;
  847. memdev->address = SPA0_SIZE/2;
  848. memdev->interleave_index = 0;
  849. memdev->interleave_ways = 4;
  850. /* mem-region6 (spa/dcr0, dimm0) */
  851. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 6;
  852. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  853. memdev->header.length = sizeof(*memdev);
  854. memdev->device_handle = handle[0];
  855. memdev->physical_id = 0;
  856. memdev->region_id = 0;
  857. memdev->range_index = 2+1;
  858. memdev->region_index = 0+1;
  859. memdev->region_size = 0;
  860. memdev->region_offset = 0;
  861. memdev->address = 0;
  862. memdev->interleave_index = 0;
  863. memdev->interleave_ways = 1;
  864. /* mem-region7 (spa/dcr1, dimm1) */
  865. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 7;
  866. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  867. memdev->header.length = sizeof(*memdev);
  868. memdev->device_handle = handle[1];
  869. memdev->physical_id = 1;
  870. memdev->region_id = 0;
  871. memdev->range_index = 3+1;
  872. memdev->region_index = 1+1;
  873. memdev->region_size = 0;
  874. memdev->region_offset = 0;
  875. memdev->address = 0;
  876. memdev->interleave_index = 0;
  877. memdev->interleave_ways = 1;
  878. /* mem-region8 (spa/dcr2, dimm2) */
  879. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 8;
  880. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  881. memdev->header.length = sizeof(*memdev);
  882. memdev->device_handle = handle[2];
  883. memdev->physical_id = 2;
  884. memdev->region_id = 0;
  885. memdev->range_index = 4+1;
  886. memdev->region_index = 2+1;
  887. memdev->region_size = 0;
  888. memdev->region_offset = 0;
  889. memdev->address = 0;
  890. memdev->interleave_index = 0;
  891. memdev->interleave_ways = 1;
  892. /* mem-region9 (spa/dcr3, dimm3) */
  893. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 9;
  894. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  895. memdev->header.length = sizeof(*memdev);
  896. memdev->device_handle = handle[3];
  897. memdev->physical_id = 3;
  898. memdev->region_id = 0;
  899. memdev->range_index = 5+1;
  900. memdev->region_index = 3+1;
  901. memdev->region_size = 0;
  902. memdev->region_offset = 0;
  903. memdev->address = 0;
  904. memdev->interleave_index = 0;
  905. memdev->interleave_ways = 1;
  906. /* mem-region10 (spa/bdw0, dimm0) */
  907. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 10;
  908. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  909. memdev->header.length = sizeof(*memdev);
  910. memdev->device_handle = handle[0];
  911. memdev->physical_id = 0;
  912. memdev->region_id = 0;
  913. memdev->range_index = 6+1;
  914. memdev->region_index = 0+1;
  915. memdev->region_size = 0;
  916. memdev->region_offset = 0;
  917. memdev->address = 0;
  918. memdev->interleave_index = 0;
  919. memdev->interleave_ways = 1;
  920. /* mem-region11 (spa/bdw1, dimm1) */
  921. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 11;
  922. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  923. memdev->header.length = sizeof(*memdev);
  924. memdev->device_handle = handle[1];
  925. memdev->physical_id = 1;
  926. memdev->region_id = 0;
  927. memdev->range_index = 7+1;
  928. memdev->region_index = 1+1;
  929. memdev->region_size = 0;
  930. memdev->region_offset = 0;
  931. memdev->address = 0;
  932. memdev->interleave_index = 0;
  933. memdev->interleave_ways = 1;
  934. /* mem-region12 (spa/bdw2, dimm2) */
  935. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 12;
  936. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  937. memdev->header.length = sizeof(*memdev);
  938. memdev->device_handle = handle[2];
  939. memdev->physical_id = 2;
  940. memdev->region_id = 0;
  941. memdev->range_index = 8+1;
  942. memdev->region_index = 2+1;
  943. memdev->region_size = 0;
  944. memdev->region_offset = 0;
  945. memdev->address = 0;
  946. memdev->interleave_index = 0;
  947. memdev->interleave_ways = 1;
  948. /* mem-region13 (spa/dcr3, dimm3) */
  949. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 13;
  950. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  951. memdev->header.length = sizeof(*memdev);
  952. memdev->device_handle = handle[3];
  953. memdev->physical_id = 3;
  954. memdev->region_id = 0;
  955. memdev->range_index = 9+1;
  956. memdev->region_index = 3+1;
  957. memdev->region_size = 0;
  958. memdev->region_offset = 0;
  959. memdev->address = 0;
  960. memdev->interleave_index = 0;
  961. memdev->interleave_ways = 1;
  962. offset = offset + sizeof(struct acpi_nfit_memory_map) * 14;
  963. /* dcr-descriptor0: blk */
  964. dcr = nfit_buf + offset;
  965. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  966. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  967. dcr->region_index = 0+1;
  968. dcr_common_init(dcr);
  969. dcr->serial_number = ~handle[0];
  970. dcr->code = NFIT_FIC_BLK;
  971. dcr->windows = 1;
  972. dcr->window_size = DCR_SIZE;
  973. dcr->command_offset = 0;
  974. dcr->command_size = 8;
  975. dcr->status_offset = 8;
  976. dcr->status_size = 4;
  977. /* dcr-descriptor1: blk */
  978. dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region);
  979. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  980. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  981. dcr->region_index = 1+1;
  982. dcr_common_init(dcr);
  983. dcr->serial_number = ~handle[1];
  984. dcr->code = NFIT_FIC_BLK;
  985. dcr->windows = 1;
  986. dcr->window_size = DCR_SIZE;
  987. dcr->command_offset = 0;
  988. dcr->command_size = 8;
  989. dcr->status_offset = 8;
  990. dcr->status_size = 4;
  991. /* dcr-descriptor2: blk */
  992. dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 2;
  993. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  994. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  995. dcr->region_index = 2+1;
  996. dcr_common_init(dcr);
  997. dcr->serial_number = ~handle[2];
  998. dcr->code = NFIT_FIC_BLK;
  999. dcr->windows = 1;
  1000. dcr->window_size = DCR_SIZE;
  1001. dcr->command_offset = 0;
  1002. dcr->command_size = 8;
  1003. dcr->status_offset = 8;
  1004. dcr->status_size = 4;
  1005. /* dcr-descriptor3: blk */
  1006. dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 3;
  1007. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1008. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  1009. dcr->region_index = 3+1;
  1010. dcr_common_init(dcr);
  1011. dcr->serial_number = ~handle[3];
  1012. dcr->code = NFIT_FIC_BLK;
  1013. dcr->windows = 1;
  1014. dcr->window_size = DCR_SIZE;
  1015. dcr->command_offset = 0;
  1016. dcr->command_size = 8;
  1017. dcr->status_offset = 8;
  1018. dcr->status_size = 4;
  1019. offset = offset + sizeof(struct acpi_nfit_control_region) * 4;
  1020. /* dcr-descriptor0: pmem */
  1021. dcr = nfit_buf + offset;
  1022. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1023. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1024. window_size);
  1025. dcr->region_index = 4+1;
  1026. dcr_common_init(dcr);
  1027. dcr->serial_number = ~handle[0];
  1028. dcr->code = NFIT_FIC_BYTEN;
  1029. dcr->windows = 0;
  1030. /* dcr-descriptor1: pmem */
  1031. dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
  1032. window_size);
  1033. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1034. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1035. window_size);
  1036. dcr->region_index = 5+1;
  1037. dcr_common_init(dcr);
  1038. dcr->serial_number = ~handle[1];
  1039. dcr->code = NFIT_FIC_BYTEN;
  1040. dcr->windows = 0;
  1041. /* dcr-descriptor2: pmem */
  1042. dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
  1043. window_size) * 2;
  1044. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1045. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1046. window_size);
  1047. dcr->region_index = 6+1;
  1048. dcr_common_init(dcr);
  1049. dcr->serial_number = ~handle[2];
  1050. dcr->code = NFIT_FIC_BYTEN;
  1051. dcr->windows = 0;
  1052. /* dcr-descriptor3: pmem */
  1053. dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
  1054. window_size) * 3;
  1055. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1056. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1057. window_size);
  1058. dcr->region_index = 7+1;
  1059. dcr_common_init(dcr);
  1060. dcr->serial_number = ~handle[3];
  1061. dcr->code = NFIT_FIC_BYTEN;
  1062. dcr->windows = 0;
  1063. offset = offset + offsetof(struct acpi_nfit_control_region,
  1064. window_size) * 4;
  1065. /* bdw0 (spa/dcr0, dimm0) */
  1066. bdw = nfit_buf + offset;
  1067. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  1068. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  1069. bdw->region_index = 0+1;
  1070. bdw->windows = 1;
  1071. bdw->offset = 0;
  1072. bdw->size = BDW_SIZE;
  1073. bdw->capacity = DIMM_SIZE;
  1074. bdw->start_address = 0;
  1075. /* bdw1 (spa/dcr1, dimm1) */
  1076. bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region);
  1077. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  1078. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  1079. bdw->region_index = 1+1;
  1080. bdw->windows = 1;
  1081. bdw->offset = 0;
  1082. bdw->size = BDW_SIZE;
  1083. bdw->capacity = DIMM_SIZE;
  1084. bdw->start_address = 0;
  1085. /* bdw2 (spa/dcr2, dimm2) */
  1086. bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 2;
  1087. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  1088. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  1089. bdw->region_index = 2+1;
  1090. bdw->windows = 1;
  1091. bdw->offset = 0;
  1092. bdw->size = BDW_SIZE;
  1093. bdw->capacity = DIMM_SIZE;
  1094. bdw->start_address = 0;
  1095. /* bdw3 (spa/dcr3, dimm3) */
  1096. bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 3;
  1097. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  1098. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  1099. bdw->region_index = 3+1;
  1100. bdw->windows = 1;
  1101. bdw->offset = 0;
  1102. bdw->size = BDW_SIZE;
  1103. bdw->capacity = DIMM_SIZE;
  1104. bdw->start_address = 0;
  1105. offset = offset + sizeof(struct acpi_nfit_data_region) * 4;
  1106. /* flush0 (dimm0) */
  1107. flush = nfit_buf + offset;
  1108. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  1109. flush->header.length = flush_hint_size;
  1110. flush->device_handle = handle[0];
  1111. flush->hint_count = NUM_HINTS;
  1112. for (i = 0; i < NUM_HINTS; i++)
  1113. flush->hint_address[i] = t->flush_dma[0] + i * sizeof(u64);
  1114. /* flush1 (dimm1) */
  1115. flush = nfit_buf + offset + flush_hint_size * 1;
  1116. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  1117. flush->header.length = flush_hint_size;
  1118. flush->device_handle = handle[1];
  1119. flush->hint_count = NUM_HINTS;
  1120. for (i = 0; i < NUM_HINTS; i++)
  1121. flush->hint_address[i] = t->flush_dma[1] + i * sizeof(u64);
  1122. /* flush2 (dimm2) */
  1123. flush = nfit_buf + offset + flush_hint_size * 2;
  1124. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  1125. flush->header.length = flush_hint_size;
  1126. flush->device_handle = handle[2];
  1127. flush->hint_count = NUM_HINTS;
  1128. for (i = 0; i < NUM_HINTS; i++)
  1129. flush->hint_address[i] = t->flush_dma[2] + i * sizeof(u64);
  1130. /* flush3 (dimm3) */
  1131. flush = nfit_buf + offset + flush_hint_size * 3;
  1132. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  1133. flush->header.length = flush_hint_size;
  1134. flush->device_handle = handle[3];
  1135. flush->hint_count = NUM_HINTS;
  1136. for (i = 0; i < NUM_HINTS; i++)
  1137. flush->hint_address[i] = t->flush_dma[3] + i * sizeof(u64);
  1138. if (t->setup_hotplug) {
  1139. offset = offset + flush_hint_size * 4;
  1140. /* dcr-descriptor4: blk */
  1141. dcr = nfit_buf + offset;
  1142. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1143. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  1144. dcr->region_index = 8+1;
  1145. dcr_common_init(dcr);
  1146. dcr->serial_number = ~handle[4];
  1147. dcr->code = NFIT_FIC_BLK;
  1148. dcr->windows = 1;
  1149. dcr->window_size = DCR_SIZE;
  1150. dcr->command_offset = 0;
  1151. dcr->command_size = 8;
  1152. dcr->status_offset = 8;
  1153. dcr->status_size = 4;
  1154. offset = offset + sizeof(struct acpi_nfit_control_region);
  1155. /* dcr-descriptor4: pmem */
  1156. dcr = nfit_buf + offset;
  1157. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1158. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1159. window_size);
  1160. dcr->region_index = 9+1;
  1161. dcr_common_init(dcr);
  1162. dcr->serial_number = ~handle[4];
  1163. dcr->code = NFIT_FIC_BYTEN;
  1164. dcr->windows = 0;
  1165. offset = offset + offsetof(struct acpi_nfit_control_region,
  1166. window_size);
  1167. /* bdw4 (spa/dcr4, dimm4) */
  1168. bdw = nfit_buf + offset;
  1169. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  1170. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  1171. bdw->region_index = 8+1;
  1172. bdw->windows = 1;
  1173. bdw->offset = 0;
  1174. bdw->size = BDW_SIZE;
  1175. bdw->capacity = DIMM_SIZE;
  1176. bdw->start_address = 0;
  1177. offset = offset + sizeof(struct acpi_nfit_data_region);
  1178. /* spa10 (dcr4) dimm4 */
  1179. spa = nfit_buf + offset;
  1180. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1181. spa->header.length = sizeof(*spa);
  1182. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  1183. spa->range_index = 10+1;
  1184. spa->address = t->dcr_dma[4];
  1185. spa->length = DCR_SIZE;
  1186. /*
  1187. * spa11 (single-dimm interleave for hotplug, note storage
  1188. * does not actually alias the related block-data-window
  1189. * regions)
  1190. */
  1191. spa = nfit_buf + offset + sizeof(*spa);
  1192. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1193. spa->header.length = sizeof(*spa);
  1194. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  1195. spa->range_index = 11+1;
  1196. spa->address = t->spa_set_dma[2];
  1197. spa->length = SPA0_SIZE;
  1198. /* spa12 (bdw for dcr4) dimm4 */
  1199. spa = nfit_buf + offset + sizeof(*spa) * 2;
  1200. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1201. spa->header.length = sizeof(*spa);
  1202. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  1203. spa->range_index = 12+1;
  1204. spa->address = t->dimm_dma[4];
  1205. spa->length = DIMM_SIZE;
  1206. offset = offset + sizeof(*spa) * 3;
  1207. /* mem-region14 (spa/dcr4, dimm4) */
  1208. memdev = nfit_buf + offset;
  1209. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1210. memdev->header.length = sizeof(*memdev);
  1211. memdev->device_handle = handle[4];
  1212. memdev->physical_id = 4;
  1213. memdev->region_id = 0;
  1214. memdev->range_index = 10+1;
  1215. memdev->region_index = 8+1;
  1216. memdev->region_size = 0;
  1217. memdev->region_offset = 0;
  1218. memdev->address = 0;
  1219. memdev->interleave_index = 0;
  1220. memdev->interleave_ways = 1;
  1221. /* mem-region15 (spa0, dimm4) */
  1222. memdev = nfit_buf + offset +
  1223. sizeof(struct acpi_nfit_memory_map);
  1224. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1225. memdev->header.length = sizeof(*memdev);
  1226. memdev->device_handle = handle[4];
  1227. memdev->physical_id = 4;
  1228. memdev->region_id = 0;
  1229. memdev->range_index = 11+1;
  1230. memdev->region_index = 9+1;
  1231. memdev->region_size = SPA0_SIZE;
  1232. memdev->region_offset = t->spa_set_dma[2];
  1233. memdev->address = 0;
  1234. memdev->interleave_index = 0;
  1235. memdev->interleave_ways = 1;
  1236. /* mem-region16 (spa/bdw4, dimm4) */
  1237. memdev = nfit_buf + offset +
  1238. sizeof(struct acpi_nfit_memory_map) * 2;
  1239. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1240. memdev->header.length = sizeof(*memdev);
  1241. memdev->device_handle = handle[4];
  1242. memdev->physical_id = 4;
  1243. memdev->region_id = 0;
  1244. memdev->range_index = 12+1;
  1245. memdev->region_index = 8+1;
  1246. memdev->region_size = 0;
  1247. memdev->region_offset = 0;
  1248. memdev->address = 0;
  1249. memdev->interleave_index = 0;
  1250. memdev->interleave_ways = 1;
  1251. offset = offset + sizeof(struct acpi_nfit_memory_map) * 3;
  1252. /* flush3 (dimm4) */
  1253. flush = nfit_buf + offset;
  1254. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  1255. flush->header.length = flush_hint_size;
  1256. flush->device_handle = handle[4];
  1257. flush->hint_count = NUM_HINTS;
  1258. for (i = 0; i < NUM_HINTS; i++)
  1259. flush->hint_address[i] = t->flush_dma[4]
  1260. + i * sizeof(u64);
  1261. }
  1262. post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA0_SIZE);
  1263. acpi_desc = &t->acpi_desc;
  1264. set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en);
  1265. set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
  1266. set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
  1267. set_bit(ND_CMD_SMART, &acpi_desc->dimm_cmd_force_en);
  1268. set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
  1269. set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
  1270. set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
  1271. set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
  1272. set_bit(ND_CMD_SMART_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
  1273. }
  1274. static void nfit_test1_setup(struct nfit_test *t)
  1275. {
  1276. size_t offset;
  1277. void *nfit_buf = t->nfit_buf;
  1278. struct acpi_nfit_memory_map *memdev;
  1279. struct acpi_nfit_control_region *dcr;
  1280. struct acpi_nfit_system_address *spa;
  1281. struct acpi_nfit_desc *acpi_desc;
  1282. offset = 0;
  1283. /* spa0 (flat range with no bdw aliasing) */
  1284. spa = nfit_buf + offset;
  1285. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1286. spa->header.length = sizeof(*spa);
  1287. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  1288. spa->range_index = 0+1;
  1289. spa->address = t->spa_set_dma[0];
  1290. spa->length = SPA2_SIZE;
  1291. /* virtual cd region */
  1292. spa = nfit_buf + sizeof(*spa);
  1293. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1294. spa->header.length = sizeof(*spa);
  1295. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_VCD), 16);
  1296. spa->range_index = 0;
  1297. spa->address = t->spa_set_dma[1];
  1298. spa->length = SPA_VCD_SIZE;
  1299. offset += sizeof(*spa) * 2;
  1300. /* mem-region0 (spa0, dimm0) */
  1301. memdev = nfit_buf + offset;
  1302. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1303. memdev->header.length = sizeof(*memdev);
  1304. memdev->device_handle = handle[5];
  1305. memdev->physical_id = 0;
  1306. memdev->region_id = 0;
  1307. memdev->range_index = 0+1;
  1308. memdev->region_index = 0+1;
  1309. memdev->region_size = SPA2_SIZE;
  1310. memdev->region_offset = 0;
  1311. memdev->address = 0;
  1312. memdev->interleave_index = 0;
  1313. memdev->interleave_ways = 1;
  1314. memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED
  1315. | ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED
  1316. | ACPI_NFIT_MEM_NOT_ARMED;
  1317. offset += sizeof(*memdev);
  1318. /* dcr-descriptor0 */
  1319. dcr = nfit_buf + offset;
  1320. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1321. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1322. window_size);
  1323. dcr->region_index = 0+1;
  1324. dcr_common_init(dcr);
  1325. dcr->serial_number = ~handle[5];
  1326. dcr->code = NFIT_FIC_BYTE;
  1327. dcr->windows = 0;
  1328. post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA2_SIZE);
  1329. acpi_desc = &t->acpi_desc;
  1330. set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
  1331. set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
  1332. set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
  1333. set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
  1334. set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en);
  1335. set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
  1336. set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
  1337. }
  1338. static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
  1339. void *iobuf, u64 len, int rw)
  1340. {
  1341. struct nfit_blk *nfit_blk = ndbr->blk_provider_data;
  1342. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
  1343. struct nd_region *nd_region = &ndbr->nd_region;
  1344. unsigned int lane;
  1345. lane = nd_region_acquire_lane(nd_region);
  1346. if (rw)
  1347. memcpy(mmio->addr.base + dpa, iobuf, len);
  1348. else {
  1349. memcpy(iobuf, mmio->addr.base + dpa, len);
  1350. /* give us some some coverage of the mmio_flush_range() API */
  1351. mmio_flush_range(mmio->addr.base + dpa, len);
  1352. }
  1353. nd_region_release_lane(nd_region, lane);
  1354. return 0;
  1355. }
  1356. static unsigned long nfit_ctl_handle;
  1357. union acpi_object *result;
  1358. static union acpi_object *nfit_test_evaluate_dsm(acpi_handle handle,
  1359. const u8 *uuid, u64 rev, u64 func, union acpi_object *argv4)
  1360. {
  1361. if (handle != &nfit_ctl_handle)
  1362. return ERR_PTR(-ENXIO);
  1363. return result;
  1364. }
  1365. static int setup_result(void *buf, size_t size)
  1366. {
  1367. result = kmalloc(sizeof(union acpi_object) + size, GFP_KERNEL);
  1368. if (!result)
  1369. return -ENOMEM;
  1370. result->package.type = ACPI_TYPE_BUFFER,
  1371. result->buffer.pointer = (void *) (result + 1);
  1372. result->buffer.length = size;
  1373. memcpy(result->buffer.pointer, buf, size);
  1374. memset(buf, 0, size);
  1375. return 0;
  1376. }
  1377. static int nfit_ctl_test(struct device *dev)
  1378. {
  1379. int rc, cmd_rc;
  1380. struct nvdimm *nvdimm;
  1381. struct acpi_device *adev;
  1382. struct nfit_mem *nfit_mem;
  1383. struct nd_ars_record *record;
  1384. struct acpi_nfit_desc *acpi_desc;
  1385. const u64 test_val = 0x0123456789abcdefULL;
  1386. unsigned long mask, cmd_size, offset;
  1387. union {
  1388. struct nd_cmd_get_config_size cfg_size;
  1389. struct nd_cmd_ars_status ars_stat;
  1390. struct nd_cmd_ars_cap ars_cap;
  1391. char buf[sizeof(struct nd_cmd_ars_status)
  1392. + sizeof(struct nd_ars_record)];
  1393. } cmds;
  1394. adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL);
  1395. if (!adev)
  1396. return -ENOMEM;
  1397. *adev = (struct acpi_device) {
  1398. .handle = &nfit_ctl_handle,
  1399. .dev = {
  1400. .init_name = "test-adev",
  1401. },
  1402. };
  1403. acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
  1404. if (!acpi_desc)
  1405. return -ENOMEM;
  1406. *acpi_desc = (struct acpi_nfit_desc) {
  1407. .nd_desc = {
  1408. .cmd_mask = 1UL << ND_CMD_ARS_CAP
  1409. | 1UL << ND_CMD_ARS_START
  1410. | 1UL << ND_CMD_ARS_STATUS
  1411. | 1UL << ND_CMD_CLEAR_ERROR,
  1412. .module = THIS_MODULE,
  1413. .provider_name = "ACPI.NFIT",
  1414. .ndctl = acpi_nfit_ctl,
  1415. },
  1416. .dev = &adev->dev,
  1417. };
  1418. nfit_mem = devm_kzalloc(dev, sizeof(*nfit_mem), GFP_KERNEL);
  1419. if (!nfit_mem)
  1420. return -ENOMEM;
  1421. mask = 1UL << ND_CMD_SMART | 1UL << ND_CMD_SMART_THRESHOLD
  1422. | 1UL << ND_CMD_DIMM_FLAGS | 1UL << ND_CMD_GET_CONFIG_SIZE
  1423. | 1UL << ND_CMD_GET_CONFIG_DATA | 1UL << ND_CMD_SET_CONFIG_DATA
  1424. | 1UL << ND_CMD_VENDOR;
  1425. *nfit_mem = (struct nfit_mem) {
  1426. .adev = adev,
  1427. .family = NVDIMM_FAMILY_INTEL,
  1428. .dsm_mask = mask,
  1429. };
  1430. nvdimm = devm_kzalloc(dev, sizeof(*nvdimm), GFP_KERNEL);
  1431. if (!nvdimm)
  1432. return -ENOMEM;
  1433. *nvdimm = (struct nvdimm) {
  1434. .provider_data = nfit_mem,
  1435. .cmd_mask = mask,
  1436. .dev = {
  1437. .init_name = "test-dimm",
  1438. },
  1439. };
  1440. /* basic checkout of a typical 'get config size' command */
  1441. cmd_size = sizeof(cmds.cfg_size);
  1442. cmds.cfg_size = (struct nd_cmd_get_config_size) {
  1443. .status = 0,
  1444. .config_size = SZ_128K,
  1445. .max_xfer = SZ_4K,
  1446. };
  1447. rc = setup_result(cmds.buf, cmd_size);
  1448. if (rc)
  1449. return rc;
  1450. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, nvdimm, ND_CMD_GET_CONFIG_SIZE,
  1451. cmds.buf, cmd_size, &cmd_rc);
  1452. if (rc < 0 || cmd_rc || cmds.cfg_size.status != 0
  1453. || cmds.cfg_size.config_size != SZ_128K
  1454. || cmds.cfg_size.max_xfer != SZ_4K) {
  1455. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  1456. __func__, __LINE__, rc, cmd_rc);
  1457. return -EIO;
  1458. }
  1459. /* test ars_status with zero output */
  1460. cmd_size = offsetof(struct nd_cmd_ars_status, address);
  1461. cmds.ars_stat = (struct nd_cmd_ars_status) {
  1462. .out_length = 0,
  1463. };
  1464. rc = setup_result(cmds.buf, cmd_size);
  1465. if (rc)
  1466. return rc;
  1467. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
  1468. cmds.buf, cmd_size, &cmd_rc);
  1469. if (rc < 0 || cmd_rc) {
  1470. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  1471. __func__, __LINE__, rc, cmd_rc);
  1472. return -EIO;
  1473. }
  1474. /* test ars_cap with benign extended status */
  1475. cmd_size = sizeof(cmds.ars_cap);
  1476. cmds.ars_cap = (struct nd_cmd_ars_cap) {
  1477. .status = ND_ARS_PERSISTENT << 16,
  1478. };
  1479. offset = offsetof(struct nd_cmd_ars_cap, status);
  1480. rc = setup_result(cmds.buf + offset, cmd_size - offset);
  1481. if (rc)
  1482. return rc;
  1483. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_CAP,
  1484. cmds.buf, cmd_size, &cmd_rc);
  1485. if (rc < 0 || cmd_rc) {
  1486. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  1487. __func__, __LINE__, rc, cmd_rc);
  1488. return -EIO;
  1489. }
  1490. /* test ars_status with 'status' trimmed from 'out_length' */
  1491. cmd_size = sizeof(cmds.ars_stat) + sizeof(struct nd_ars_record);
  1492. cmds.ars_stat = (struct nd_cmd_ars_status) {
  1493. .out_length = cmd_size - 4,
  1494. };
  1495. record = &cmds.ars_stat.records[0];
  1496. *record = (struct nd_ars_record) {
  1497. .length = test_val,
  1498. };
  1499. rc = setup_result(cmds.buf, cmd_size);
  1500. if (rc)
  1501. return rc;
  1502. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
  1503. cmds.buf, cmd_size, &cmd_rc);
  1504. if (rc < 0 || cmd_rc || record->length != test_val) {
  1505. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  1506. __func__, __LINE__, rc, cmd_rc);
  1507. return -EIO;
  1508. }
  1509. /* test ars_status with 'Output (Size)' including 'status' */
  1510. cmd_size = sizeof(cmds.ars_stat) + sizeof(struct nd_ars_record);
  1511. cmds.ars_stat = (struct nd_cmd_ars_status) {
  1512. .out_length = cmd_size,
  1513. };
  1514. record = &cmds.ars_stat.records[0];
  1515. *record = (struct nd_ars_record) {
  1516. .length = test_val,
  1517. };
  1518. rc = setup_result(cmds.buf, cmd_size);
  1519. if (rc)
  1520. return rc;
  1521. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
  1522. cmds.buf, cmd_size, &cmd_rc);
  1523. if (rc < 0 || cmd_rc || record->length != test_val) {
  1524. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  1525. __func__, __LINE__, rc, cmd_rc);
  1526. return -EIO;
  1527. }
  1528. /* test extended status for get_config_size results in failure */
  1529. cmd_size = sizeof(cmds.cfg_size);
  1530. cmds.cfg_size = (struct nd_cmd_get_config_size) {
  1531. .status = 1 << 16,
  1532. };
  1533. rc = setup_result(cmds.buf, cmd_size);
  1534. if (rc)
  1535. return rc;
  1536. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, nvdimm, ND_CMD_GET_CONFIG_SIZE,
  1537. cmds.buf, cmd_size, &cmd_rc);
  1538. if (rc < 0 || cmd_rc >= 0) {
  1539. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  1540. __func__, __LINE__, rc, cmd_rc);
  1541. return -EIO;
  1542. }
  1543. return 0;
  1544. }
  1545. static int nfit_test_probe(struct platform_device *pdev)
  1546. {
  1547. struct nvdimm_bus_descriptor *nd_desc;
  1548. struct acpi_nfit_desc *acpi_desc;
  1549. struct device *dev = &pdev->dev;
  1550. struct nfit_test *nfit_test;
  1551. struct nfit_mem *nfit_mem;
  1552. union acpi_object *obj;
  1553. int rc;
  1554. if (strcmp(dev_name(&pdev->dev), "nfit_test.0") == 0) {
  1555. rc = nfit_ctl_test(&pdev->dev);
  1556. if (rc)
  1557. return rc;
  1558. }
  1559. nfit_test = to_nfit_test(&pdev->dev);
  1560. /* common alloc */
  1561. if (nfit_test->num_dcr) {
  1562. int num = nfit_test->num_dcr;
  1563. nfit_test->dimm = devm_kcalloc(dev, num, sizeof(void *),
  1564. GFP_KERNEL);
  1565. nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
  1566. GFP_KERNEL);
  1567. nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *),
  1568. GFP_KERNEL);
  1569. nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
  1570. GFP_KERNEL);
  1571. nfit_test->label = devm_kcalloc(dev, num, sizeof(void *),
  1572. GFP_KERNEL);
  1573. nfit_test->label_dma = devm_kcalloc(dev, num,
  1574. sizeof(dma_addr_t), GFP_KERNEL);
  1575. nfit_test->dcr = devm_kcalloc(dev, num,
  1576. sizeof(struct nfit_test_dcr *), GFP_KERNEL);
  1577. nfit_test->dcr_dma = devm_kcalloc(dev, num,
  1578. sizeof(dma_addr_t), GFP_KERNEL);
  1579. if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label
  1580. && nfit_test->label_dma && nfit_test->dcr
  1581. && nfit_test->dcr_dma && nfit_test->flush
  1582. && nfit_test->flush_dma)
  1583. /* pass */;
  1584. else
  1585. return -ENOMEM;
  1586. }
  1587. if (nfit_test->num_pm) {
  1588. int num = nfit_test->num_pm;
  1589. nfit_test->spa_set = devm_kcalloc(dev, num, sizeof(void *),
  1590. GFP_KERNEL);
  1591. nfit_test->spa_set_dma = devm_kcalloc(dev, num,
  1592. sizeof(dma_addr_t), GFP_KERNEL);
  1593. if (nfit_test->spa_set && nfit_test->spa_set_dma)
  1594. /* pass */;
  1595. else
  1596. return -ENOMEM;
  1597. }
  1598. /* per-nfit specific alloc */
  1599. if (nfit_test->alloc(nfit_test))
  1600. return -ENOMEM;
  1601. nfit_test->setup(nfit_test);
  1602. acpi_desc = &nfit_test->acpi_desc;
  1603. acpi_nfit_desc_init(acpi_desc, &pdev->dev);
  1604. acpi_desc->blk_do_io = nfit_test_blk_do_io;
  1605. nd_desc = &acpi_desc->nd_desc;
  1606. nd_desc->provider_name = NULL;
  1607. nd_desc->module = THIS_MODULE;
  1608. nd_desc->ndctl = nfit_test_ctl;
  1609. rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_buf,
  1610. nfit_test->nfit_size);
  1611. if (rc)
  1612. return rc;
  1613. if (nfit_test->setup != nfit_test0_setup)
  1614. return 0;
  1615. nfit_test->setup_hotplug = 1;
  1616. nfit_test->setup(nfit_test);
  1617. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  1618. if (!obj)
  1619. return -ENOMEM;
  1620. obj->type = ACPI_TYPE_BUFFER;
  1621. obj->buffer.length = nfit_test->nfit_size;
  1622. obj->buffer.pointer = nfit_test->nfit_buf;
  1623. *(nfit_test->_fit) = obj;
  1624. __acpi_nfit_notify(&pdev->dev, nfit_test, 0x80);
  1625. /* associate dimm devices with nfit_mem data for notification testing */
  1626. mutex_lock(&acpi_desc->init_mutex);
  1627. list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
  1628. u32 nfit_handle = __to_nfit_memdev(nfit_mem)->device_handle;
  1629. int i;
  1630. for (i = 0; i < NUM_DCR; i++)
  1631. if (nfit_handle == handle[i])
  1632. dev_set_drvdata(nfit_test->dimm_dev[i],
  1633. nfit_mem);
  1634. }
  1635. mutex_unlock(&acpi_desc->init_mutex);
  1636. return 0;
  1637. }
  1638. static int nfit_test_remove(struct platform_device *pdev)
  1639. {
  1640. return 0;
  1641. }
  1642. static void nfit_test_release(struct device *dev)
  1643. {
  1644. struct nfit_test *nfit_test = to_nfit_test(dev);
  1645. kfree(nfit_test);
  1646. }
  1647. static const struct platform_device_id nfit_test_id[] = {
  1648. { KBUILD_MODNAME },
  1649. { },
  1650. };
  1651. static struct platform_driver nfit_test_driver = {
  1652. .probe = nfit_test_probe,
  1653. .remove = nfit_test_remove,
  1654. .driver = {
  1655. .name = KBUILD_MODNAME,
  1656. },
  1657. .id_table = nfit_test_id,
  1658. };
  1659. static __init int nfit_test_init(void)
  1660. {
  1661. int rc, i;
  1662. nfit_test_setup(nfit_test_lookup, nfit_test_evaluate_dsm);
  1663. nfit_test_dimm = class_create(THIS_MODULE, "nfit_test_dimm");
  1664. if (IS_ERR(nfit_test_dimm)) {
  1665. rc = PTR_ERR(nfit_test_dimm);
  1666. goto err_register;
  1667. }
  1668. for (i = 0; i < NUM_NFITS; i++) {
  1669. struct nfit_test *nfit_test;
  1670. struct platform_device *pdev;
  1671. nfit_test = kzalloc(sizeof(*nfit_test), GFP_KERNEL);
  1672. if (!nfit_test) {
  1673. rc = -ENOMEM;
  1674. goto err_register;
  1675. }
  1676. INIT_LIST_HEAD(&nfit_test->resources);
  1677. switch (i) {
  1678. case 0:
  1679. nfit_test->num_pm = NUM_PM;
  1680. nfit_test->dcr_idx = 0;
  1681. nfit_test->num_dcr = NUM_DCR;
  1682. nfit_test->alloc = nfit_test0_alloc;
  1683. nfit_test->setup = nfit_test0_setup;
  1684. break;
  1685. case 1:
  1686. nfit_test->num_pm = 1;
  1687. nfit_test->dcr_idx = NUM_DCR;
  1688. nfit_test->num_dcr = 1;
  1689. nfit_test->alloc = nfit_test1_alloc;
  1690. nfit_test->setup = nfit_test1_setup;
  1691. break;
  1692. default:
  1693. rc = -EINVAL;
  1694. goto err_register;
  1695. }
  1696. pdev = &nfit_test->pdev;
  1697. pdev->name = KBUILD_MODNAME;
  1698. pdev->id = i;
  1699. pdev->dev.release = nfit_test_release;
  1700. rc = platform_device_register(pdev);
  1701. if (rc) {
  1702. put_device(&pdev->dev);
  1703. goto err_register;
  1704. }
  1705. rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  1706. if (rc)
  1707. goto err_register;
  1708. instances[i] = nfit_test;
  1709. }
  1710. rc = platform_driver_register(&nfit_test_driver);
  1711. if (rc)
  1712. goto err_register;
  1713. return 0;
  1714. err_register:
  1715. for (i = 0; i < NUM_NFITS; i++)
  1716. if (instances[i])
  1717. platform_device_unregister(&instances[i]->pdev);
  1718. nfit_test_teardown();
  1719. return rc;
  1720. }
  1721. static __exit void nfit_test_exit(void)
  1722. {
  1723. int i;
  1724. platform_driver_unregister(&nfit_test_driver);
  1725. for (i = 0; i < NUM_NFITS; i++)
  1726. platform_device_unregister(&instances[i]->pdev);
  1727. nfit_test_teardown();
  1728. class_destroy(nfit_test_dimm);
  1729. }
  1730. module_init(nfit_test_init);
  1731. module_exit(nfit_test_exit);
  1732. MODULE_LICENSE("GPL v2");
  1733. MODULE_AUTHOR("Intel Corporation");