nfit.c 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008
  1. /*
  2. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/platform_device.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/workqueue.h>
  17. #include <linux/libnvdimm.h>
  18. #include <linux/vmalloc.h>
  19. #include <linux/device.h>
  20. #include <linux/module.h>
  21. #include <linux/mutex.h>
  22. #include <linux/ndctl.h>
  23. #include <linux/sizes.h>
  24. #include <linux/list.h>
  25. #include <linux/slab.h>
  26. #include <nd-core.h>
  27. #include <nfit.h>
  28. #include <nd.h>
  29. #include "nfit_test.h"
  30. /*
  31. * Generate an NFIT table to describe the following topology:
  32. *
  33. * BUS0: Interleaved PMEM regions, and aliasing with BLK regions
  34. *
  35. * (a) (b) DIMM BLK-REGION
  36. * +----------+--------------+----------+---------+
  37. * +------+ | blk2.0 | pm0.0 | blk2.1 | pm1.0 | 0 region2
  38. * | imc0 +--+- - - - - region0 - - - -+----------+ +
  39. * +--+---+ | blk3.0 | pm0.0 | blk3.1 | pm1.0 | 1 region3
  40. * | +----------+--------------v----------v v
  41. * +--+---+ | |
  42. * | cpu0 | region1
  43. * +--+---+ | |
  44. * | +-------------------------^----------^ ^
  45. * +--+---+ | blk4.0 | pm1.0 | 2 region4
  46. * | imc1 +--+-------------------------+----------+ +
  47. * +------+ | blk5.0 | pm1.0 | 3 region5
  48. * +-------------------------+----------+-+-------+
  49. *
  50. * +--+---+
  51. * | cpu1 |
  52. * +--+---+ (Hotplug DIMM)
  53. * | +----------------------------------------------+
  54. * +--+---+ | blk6.0/pm7.0 | 4 region6/7
  55. * | imc0 +--+----------------------------------------------+
  56. * +------+
  57. *
  58. *
  59. * *) In this layout we have four dimms and two memory controllers in one
  60. * socket. Each unique interface (BLK or PMEM) to DPA space
  61. * is identified by a region device with a dynamically assigned id.
  62. *
  63. * *) The first portion of dimm0 and dimm1 are interleaved as REGION0.
  64. * A single PMEM namespace "pm0.0" is created using half of the
  65. * REGION0 SPA-range. REGION0 spans dimm0 and dimm1. PMEM namespace
  66. * allocate from from the bottom of a region. The unallocated
  67. * portion of REGION0 aliases with REGION2 and REGION3. That
  68. * unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and
  69. * "blk3.0") starting at the base of each DIMM to offset (a) in those
  70. * DIMMs. "pm0.0", "blk2.0" and "blk3.0" are free-form readable
  71. * names that can be assigned to a namespace.
  72. *
  73. * *) In the last portion of dimm0 and dimm1 we have an interleaved
  74. * SPA range, REGION1, that spans those two dimms as well as dimm2
  75. * and dimm3. Some of REGION1 allocated to a PMEM namespace named
  76. * "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each
  77. * dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and
  78. * "blk5.0".
  79. *
  80. * *) The portion of dimm2 and dimm3 that do not participate in the
  81. * REGION1 interleaved SPA range (i.e. the DPA address below offset
  82. * (b) are also included in the "blk4.0" and "blk5.0" namespaces.
  83. * Note, that BLK namespaces need not be contiguous in DPA-space, and
  84. * can consume aliased capacity from multiple interleave sets.
  85. *
  86. * BUS1: Legacy NVDIMM (single contiguous range)
  87. *
  88. * region2
  89. * +---------------------+
  90. * |---------------------|
  91. * || pm2.0 ||
  92. * |---------------------|
  93. * +---------------------+
  94. *
  95. * *) A NFIT-table may describe a simple system-physical-address range
  96. * with no BLK aliasing. This type of region may optionally
  97. * reference an NVDIMM.
  98. */
  99. enum {
  100. NUM_PM = 3,
  101. NUM_DCR = 5,
  102. NUM_HINTS = 8,
  103. NUM_BDW = NUM_DCR,
  104. NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW,
  105. NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */ + 4 /* spa1 iset */,
  106. DIMM_SIZE = SZ_32M,
  107. LABEL_SIZE = SZ_128K,
  108. SPA_VCD_SIZE = SZ_4M,
  109. SPA0_SIZE = DIMM_SIZE,
  110. SPA1_SIZE = DIMM_SIZE*2,
  111. SPA2_SIZE = DIMM_SIZE,
  112. BDW_SIZE = 64 << 8,
  113. DCR_SIZE = 12,
  114. NUM_NFITS = 2, /* permit testing multiple NFITs per system */
  115. };
  116. struct nfit_test_dcr {
  117. __le64 bdw_addr;
  118. __le32 bdw_status;
  119. __u8 aperature[BDW_SIZE];
  120. };
  121. #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
  122. (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
  123. | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
  124. static u32 handle[] = {
  125. [0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
  126. [1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
  127. [2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
  128. [3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
  129. [4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
  130. [5] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 0),
  131. [6] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 1),
  132. };
  133. static unsigned long dimm_fail_cmd_flags[NUM_DCR];
  134. struct nfit_test {
  135. struct acpi_nfit_desc acpi_desc;
  136. struct platform_device pdev;
  137. struct list_head resources;
  138. void *nfit_buf;
  139. dma_addr_t nfit_dma;
  140. size_t nfit_size;
  141. int dcr_idx;
  142. int num_dcr;
  143. int num_pm;
  144. void **dimm;
  145. dma_addr_t *dimm_dma;
  146. void **flush;
  147. dma_addr_t *flush_dma;
  148. void **label;
  149. dma_addr_t *label_dma;
  150. void **spa_set;
  151. dma_addr_t *spa_set_dma;
  152. struct nfit_test_dcr **dcr;
  153. dma_addr_t *dcr_dma;
  154. int (*alloc)(struct nfit_test *t);
  155. void (*setup)(struct nfit_test *t);
  156. int setup_hotplug;
  157. union acpi_object **_fit;
  158. dma_addr_t _fit_dma;
  159. struct ars_state {
  160. struct nd_cmd_ars_status *ars_status;
  161. unsigned long deadline;
  162. spinlock_t lock;
  163. } ars_state;
  164. struct device *dimm_dev[NUM_DCR];
  165. };
  166. static struct nfit_test *to_nfit_test(struct device *dev)
  167. {
  168. struct platform_device *pdev = to_platform_device(dev);
  169. return container_of(pdev, struct nfit_test, pdev);
  170. }
  171. static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size *nd_cmd,
  172. unsigned int buf_len)
  173. {
  174. if (buf_len < sizeof(*nd_cmd))
  175. return -EINVAL;
  176. nd_cmd->status = 0;
  177. nd_cmd->config_size = LABEL_SIZE;
  178. nd_cmd->max_xfer = SZ_4K;
  179. return 0;
  180. }
  181. static int nfit_test_cmd_get_config_data(struct nd_cmd_get_config_data_hdr
  182. *nd_cmd, unsigned int buf_len, void *label)
  183. {
  184. unsigned int len, offset = nd_cmd->in_offset;
  185. int rc;
  186. if (buf_len < sizeof(*nd_cmd))
  187. return -EINVAL;
  188. if (offset >= LABEL_SIZE)
  189. return -EINVAL;
  190. if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len)
  191. return -EINVAL;
  192. nd_cmd->status = 0;
  193. len = min(nd_cmd->in_length, LABEL_SIZE - offset);
  194. memcpy(nd_cmd->out_buf, label + offset, len);
  195. rc = buf_len - sizeof(*nd_cmd) - len;
  196. return rc;
  197. }
  198. static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd,
  199. unsigned int buf_len, void *label)
  200. {
  201. unsigned int len, offset = nd_cmd->in_offset;
  202. u32 *status;
  203. int rc;
  204. if (buf_len < sizeof(*nd_cmd))
  205. return -EINVAL;
  206. if (offset >= LABEL_SIZE)
  207. return -EINVAL;
  208. if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len)
  209. return -EINVAL;
  210. status = (void *)nd_cmd + nd_cmd->in_length + sizeof(*nd_cmd);
  211. *status = 0;
  212. len = min(nd_cmd->in_length, LABEL_SIZE - offset);
  213. memcpy(label + offset, nd_cmd->in_buf, len);
  214. rc = buf_len - sizeof(*nd_cmd) - (len + 4);
  215. return rc;
  216. }
  217. #define NFIT_TEST_ARS_RECORDS 4
  218. #define NFIT_TEST_CLEAR_ERR_UNIT 256
  219. static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd,
  220. unsigned int buf_len)
  221. {
  222. if (buf_len < sizeof(*nd_cmd))
  223. return -EINVAL;
  224. nd_cmd->max_ars_out = sizeof(struct nd_cmd_ars_status)
  225. + NFIT_TEST_ARS_RECORDS * sizeof(struct nd_ars_record);
  226. nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16;
  227. nd_cmd->clear_err_unit = NFIT_TEST_CLEAR_ERR_UNIT;
  228. return 0;
  229. }
  230. /*
  231. * Initialize the ars_state to return an ars_result 1 second in the future with
  232. * a 4K error range in the middle of the requested address range.
  233. */
  234. static void post_ars_status(struct ars_state *ars_state, u64 addr, u64 len)
  235. {
  236. struct nd_cmd_ars_status *ars_status;
  237. struct nd_ars_record *ars_record;
  238. ars_state->deadline = jiffies + 1*HZ;
  239. ars_status = ars_state->ars_status;
  240. ars_status->status = 0;
  241. ars_status->out_length = sizeof(struct nd_cmd_ars_status)
  242. + sizeof(struct nd_ars_record);
  243. ars_status->address = addr;
  244. ars_status->length = len;
  245. ars_status->type = ND_ARS_PERSISTENT;
  246. ars_status->num_records = 1;
  247. ars_record = &ars_status->records[0];
  248. ars_record->handle = 0;
  249. ars_record->err_address = addr + len / 2;
  250. ars_record->length = SZ_4K;
  251. }
  252. static int nfit_test_cmd_ars_start(struct ars_state *ars_state,
  253. struct nd_cmd_ars_start *ars_start, unsigned int buf_len,
  254. int *cmd_rc)
  255. {
  256. if (buf_len < sizeof(*ars_start))
  257. return -EINVAL;
  258. spin_lock(&ars_state->lock);
  259. if (time_before(jiffies, ars_state->deadline)) {
  260. ars_start->status = NFIT_ARS_START_BUSY;
  261. *cmd_rc = -EBUSY;
  262. } else {
  263. ars_start->status = 0;
  264. ars_start->scrub_time = 1;
  265. post_ars_status(ars_state, ars_start->address,
  266. ars_start->length);
  267. *cmd_rc = 0;
  268. }
  269. spin_unlock(&ars_state->lock);
  270. return 0;
  271. }
  272. static int nfit_test_cmd_ars_status(struct ars_state *ars_state,
  273. struct nd_cmd_ars_status *ars_status, unsigned int buf_len,
  274. int *cmd_rc)
  275. {
  276. if (buf_len < ars_state->ars_status->out_length)
  277. return -EINVAL;
  278. spin_lock(&ars_state->lock);
  279. if (time_before(jiffies, ars_state->deadline)) {
  280. memset(ars_status, 0, buf_len);
  281. ars_status->status = NFIT_ARS_STATUS_BUSY;
  282. ars_status->out_length = sizeof(*ars_status);
  283. *cmd_rc = -EBUSY;
  284. } else {
  285. memcpy(ars_status, ars_state->ars_status,
  286. ars_state->ars_status->out_length);
  287. *cmd_rc = 0;
  288. }
  289. spin_unlock(&ars_state->lock);
  290. return 0;
  291. }
  292. static int nfit_test_cmd_clear_error(struct nd_cmd_clear_error *clear_err,
  293. unsigned int buf_len, int *cmd_rc)
  294. {
  295. const u64 mask = NFIT_TEST_CLEAR_ERR_UNIT - 1;
  296. if (buf_len < sizeof(*clear_err))
  297. return -EINVAL;
  298. if ((clear_err->address & mask) || (clear_err->length & mask))
  299. return -EINVAL;
  300. /*
  301. * Report 'all clear' success for all commands even though a new
  302. * scrub will find errors again. This is enough to have the
  303. * error removed from the 'badblocks' tracking in the pmem
  304. * driver.
  305. */
  306. clear_err->status = 0;
  307. clear_err->cleared = clear_err->length;
  308. *cmd_rc = 0;
  309. return 0;
  310. }
  311. static int nfit_test_cmd_smart(struct nd_cmd_smart *smart, unsigned int buf_len)
  312. {
  313. static const struct nd_smart_payload smart_data = {
  314. .flags = ND_SMART_HEALTH_VALID | ND_SMART_TEMP_VALID
  315. | ND_SMART_SPARES_VALID | ND_SMART_ALARM_VALID
  316. | ND_SMART_USED_VALID | ND_SMART_SHUTDOWN_VALID,
  317. .health = ND_SMART_NON_CRITICAL_HEALTH,
  318. .temperature = 23 * 16,
  319. .spares = 75,
  320. .alarm_flags = ND_SMART_SPARE_TRIP | ND_SMART_TEMP_TRIP,
  321. .life_used = 5,
  322. .shutdown_state = 0,
  323. .vendor_size = 0,
  324. };
  325. if (buf_len < sizeof(*smart))
  326. return -EINVAL;
  327. memcpy(smart->data, &smart_data, sizeof(smart_data));
  328. return 0;
  329. }
  330. static int nfit_test_cmd_smart_threshold(struct nd_cmd_smart_threshold *smart_t,
  331. unsigned int buf_len)
  332. {
  333. static const struct nd_smart_threshold_payload smart_t_data = {
  334. .alarm_control = ND_SMART_SPARE_TRIP | ND_SMART_TEMP_TRIP,
  335. .temperature = 40 * 16,
  336. .spares = 5,
  337. };
  338. if (buf_len < sizeof(*smart_t))
  339. return -EINVAL;
  340. memcpy(smart_t->data, &smart_t_data, sizeof(smart_t_data));
  341. return 0;
  342. }
  343. static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
  344. struct nvdimm *nvdimm, unsigned int cmd, void *buf,
  345. unsigned int buf_len, int *cmd_rc)
  346. {
  347. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  348. struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc);
  349. unsigned int func = cmd;
  350. int i, rc = 0, __cmd_rc;
  351. if (!cmd_rc)
  352. cmd_rc = &__cmd_rc;
  353. *cmd_rc = 0;
  354. if (nvdimm) {
  355. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  356. unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
  357. if (!nfit_mem)
  358. return -ENOTTY;
  359. if (cmd == ND_CMD_CALL) {
  360. struct nd_cmd_pkg *call_pkg = buf;
  361. buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out;
  362. buf = (void *) call_pkg->nd_payload;
  363. func = call_pkg->nd_command;
  364. if (call_pkg->nd_family != nfit_mem->family)
  365. return -ENOTTY;
  366. }
  367. if (!test_bit(cmd, &cmd_mask)
  368. || !test_bit(func, &nfit_mem->dsm_mask))
  369. return -ENOTTY;
  370. /* lookup label space for the given dimm */
  371. for (i = 0; i < ARRAY_SIZE(handle); i++)
  372. if (__to_nfit_memdev(nfit_mem)->device_handle ==
  373. handle[i])
  374. break;
  375. if (i >= ARRAY_SIZE(handle))
  376. return -ENXIO;
  377. if ((1 << func) & dimm_fail_cmd_flags[i])
  378. return -EIO;
  379. switch (func) {
  380. case ND_CMD_GET_CONFIG_SIZE:
  381. rc = nfit_test_cmd_get_config_size(buf, buf_len);
  382. break;
  383. case ND_CMD_GET_CONFIG_DATA:
  384. rc = nfit_test_cmd_get_config_data(buf, buf_len,
  385. t->label[i - t->dcr_idx]);
  386. break;
  387. case ND_CMD_SET_CONFIG_DATA:
  388. rc = nfit_test_cmd_set_config_data(buf, buf_len,
  389. t->label[i - t->dcr_idx]);
  390. break;
  391. case ND_CMD_SMART:
  392. rc = nfit_test_cmd_smart(buf, buf_len);
  393. break;
  394. case ND_CMD_SMART_THRESHOLD:
  395. rc = nfit_test_cmd_smart_threshold(buf, buf_len);
  396. device_lock(&t->pdev.dev);
  397. __acpi_nvdimm_notify(t->dimm_dev[i], 0x81);
  398. device_unlock(&t->pdev.dev);
  399. break;
  400. default:
  401. return -ENOTTY;
  402. }
  403. } else {
  404. struct ars_state *ars_state = &t->ars_state;
  405. if (!nd_desc || !test_bit(cmd, &nd_desc->cmd_mask))
  406. return -ENOTTY;
  407. switch (func) {
  408. case ND_CMD_ARS_CAP:
  409. rc = nfit_test_cmd_ars_cap(buf, buf_len);
  410. break;
  411. case ND_CMD_ARS_START:
  412. rc = nfit_test_cmd_ars_start(ars_state, buf, buf_len,
  413. cmd_rc);
  414. break;
  415. case ND_CMD_ARS_STATUS:
  416. rc = nfit_test_cmd_ars_status(ars_state, buf, buf_len,
  417. cmd_rc);
  418. break;
  419. case ND_CMD_CLEAR_ERROR:
  420. rc = nfit_test_cmd_clear_error(buf, buf_len, cmd_rc);
  421. break;
  422. default:
  423. return -ENOTTY;
  424. }
  425. }
  426. return rc;
  427. }
  428. static DEFINE_SPINLOCK(nfit_test_lock);
  429. static struct nfit_test *instances[NUM_NFITS];
  430. static void release_nfit_res(void *data)
  431. {
  432. struct nfit_test_resource *nfit_res = data;
  433. spin_lock(&nfit_test_lock);
  434. list_del(&nfit_res->list);
  435. spin_unlock(&nfit_test_lock);
  436. vfree(nfit_res->buf);
  437. kfree(nfit_res);
  438. }
  439. static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
  440. void *buf)
  441. {
  442. struct device *dev = &t->pdev.dev;
  443. struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res),
  444. GFP_KERNEL);
  445. int rc;
  446. if (!buf || !nfit_res)
  447. goto err;
  448. rc = devm_add_action(dev, release_nfit_res, nfit_res);
  449. if (rc)
  450. goto err;
  451. INIT_LIST_HEAD(&nfit_res->list);
  452. memset(buf, 0, size);
  453. nfit_res->dev = dev;
  454. nfit_res->buf = buf;
  455. nfit_res->res.start = *dma;
  456. nfit_res->res.end = *dma + size - 1;
  457. nfit_res->res.name = "NFIT";
  458. spin_lock_init(&nfit_res->lock);
  459. INIT_LIST_HEAD(&nfit_res->requests);
  460. spin_lock(&nfit_test_lock);
  461. list_add(&nfit_res->list, &t->resources);
  462. spin_unlock(&nfit_test_lock);
  463. return nfit_res->buf;
  464. err:
  465. if (buf)
  466. vfree(buf);
  467. kfree(nfit_res);
  468. return NULL;
  469. }
  470. static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma)
  471. {
  472. void *buf = vmalloc(size);
  473. *dma = (unsigned long) buf;
  474. return __test_alloc(t, size, dma, buf);
  475. }
  476. static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr)
  477. {
  478. int i;
  479. for (i = 0; i < ARRAY_SIZE(instances); i++) {
  480. struct nfit_test_resource *n, *nfit_res = NULL;
  481. struct nfit_test *t = instances[i];
  482. if (!t)
  483. continue;
  484. spin_lock(&nfit_test_lock);
  485. list_for_each_entry(n, &t->resources, list) {
  486. if (addr >= n->res.start && (addr < n->res.start
  487. + resource_size(&n->res))) {
  488. nfit_res = n;
  489. break;
  490. } else if (addr >= (unsigned long) n->buf
  491. && (addr < (unsigned long) n->buf
  492. + resource_size(&n->res))) {
  493. nfit_res = n;
  494. break;
  495. }
  496. }
  497. spin_unlock(&nfit_test_lock);
  498. if (nfit_res)
  499. return nfit_res;
  500. }
  501. return NULL;
  502. }
  503. static int ars_state_init(struct device *dev, struct ars_state *ars_state)
  504. {
  505. ars_state->ars_status = devm_kzalloc(dev,
  506. sizeof(struct nd_cmd_ars_status)
  507. + sizeof(struct nd_ars_record) * NFIT_TEST_ARS_RECORDS,
  508. GFP_KERNEL);
  509. if (!ars_state->ars_status)
  510. return -ENOMEM;
  511. spin_lock_init(&ars_state->lock);
  512. return 0;
  513. }
  514. static void put_dimms(void *data)
  515. {
  516. struct device **dimm_dev = data;
  517. int i;
  518. for (i = 0; i < NUM_DCR; i++)
  519. if (dimm_dev[i])
  520. device_unregister(dimm_dev[i]);
  521. }
  522. static struct class *nfit_test_dimm;
  523. static int dimm_name_to_id(struct device *dev)
  524. {
  525. int dimm;
  526. if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1
  527. || dimm >= NUM_DCR || dimm < 0)
  528. return -ENXIO;
  529. return dimm;
  530. }
  531. static ssize_t handle_show(struct device *dev, struct device_attribute *attr,
  532. char *buf)
  533. {
  534. int dimm = dimm_name_to_id(dev);
  535. if (dimm < 0)
  536. return dimm;
  537. return sprintf(buf, "%#x", handle[dimm]);
  538. }
  539. DEVICE_ATTR_RO(handle);
  540. static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr,
  541. char *buf)
  542. {
  543. int dimm = dimm_name_to_id(dev);
  544. if (dimm < 0)
  545. return dimm;
  546. return sprintf(buf, "%#lx\n", dimm_fail_cmd_flags[dimm]);
  547. }
  548. static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr,
  549. const char *buf, size_t size)
  550. {
  551. int dimm = dimm_name_to_id(dev);
  552. unsigned long val;
  553. ssize_t rc;
  554. if (dimm < 0)
  555. return dimm;
  556. rc = kstrtol(buf, 0, &val);
  557. if (rc)
  558. return rc;
  559. dimm_fail_cmd_flags[dimm] = val;
  560. return size;
  561. }
  562. static DEVICE_ATTR_RW(fail_cmd);
  563. static struct attribute *nfit_test_dimm_attributes[] = {
  564. &dev_attr_fail_cmd.attr,
  565. &dev_attr_handle.attr,
  566. NULL,
  567. };
  568. static struct attribute_group nfit_test_dimm_attribute_group = {
  569. .attrs = nfit_test_dimm_attributes,
  570. };
  571. static const struct attribute_group *nfit_test_dimm_attribute_groups[] = {
  572. &nfit_test_dimm_attribute_group,
  573. NULL,
  574. };
  575. static int nfit_test0_alloc(struct nfit_test *t)
  576. {
  577. size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA
  578. + sizeof(struct acpi_nfit_memory_map) * NUM_MEM
  579. + sizeof(struct acpi_nfit_control_region) * NUM_DCR
  580. + offsetof(struct acpi_nfit_control_region,
  581. window_size) * NUM_DCR
  582. + sizeof(struct acpi_nfit_data_region) * NUM_BDW
  583. + (sizeof(struct acpi_nfit_flush_address)
  584. + sizeof(u64) * NUM_HINTS) * NUM_DCR;
  585. int i;
  586. t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
  587. if (!t->nfit_buf)
  588. return -ENOMEM;
  589. t->nfit_size = nfit_size;
  590. t->spa_set[0] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[0]);
  591. if (!t->spa_set[0])
  592. return -ENOMEM;
  593. t->spa_set[1] = test_alloc(t, SPA1_SIZE, &t->spa_set_dma[1]);
  594. if (!t->spa_set[1])
  595. return -ENOMEM;
  596. t->spa_set[2] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[2]);
  597. if (!t->spa_set[2])
  598. return -ENOMEM;
  599. for (i = 0; i < t->num_dcr; i++) {
  600. t->dimm[i] = test_alloc(t, DIMM_SIZE, &t->dimm_dma[i]);
  601. if (!t->dimm[i])
  602. return -ENOMEM;
  603. t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
  604. if (!t->label[i])
  605. return -ENOMEM;
  606. sprintf(t->label[i], "label%d", i);
  607. t->flush[i] = test_alloc(t, max(PAGE_SIZE,
  608. sizeof(u64) * NUM_HINTS),
  609. &t->flush_dma[i]);
  610. if (!t->flush[i])
  611. return -ENOMEM;
  612. }
  613. for (i = 0; i < t->num_dcr; i++) {
  614. t->dcr[i] = test_alloc(t, LABEL_SIZE, &t->dcr_dma[i]);
  615. if (!t->dcr[i])
  616. return -ENOMEM;
  617. }
  618. t->_fit = test_alloc(t, sizeof(union acpi_object **), &t->_fit_dma);
  619. if (!t->_fit)
  620. return -ENOMEM;
  621. if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t->dimm_dev))
  622. return -ENOMEM;
  623. for (i = 0; i < NUM_DCR; i++) {
  624. t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm,
  625. &t->pdev.dev, 0, NULL,
  626. nfit_test_dimm_attribute_groups,
  627. "test_dimm%d", i);
  628. if (!t->dimm_dev[i])
  629. return -ENOMEM;
  630. }
  631. return ars_state_init(&t->pdev.dev, &t->ars_state);
  632. }
  633. static int nfit_test1_alloc(struct nfit_test *t)
  634. {
  635. size_t nfit_size = sizeof(struct acpi_nfit_system_address) * 2
  636. + sizeof(struct acpi_nfit_memory_map) * 2
  637. + offsetof(struct acpi_nfit_control_region, window_size) * 2;
  638. int i;
  639. t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
  640. if (!t->nfit_buf)
  641. return -ENOMEM;
  642. t->nfit_size = nfit_size;
  643. t->spa_set[0] = test_alloc(t, SPA2_SIZE, &t->spa_set_dma[0]);
  644. if (!t->spa_set[0])
  645. return -ENOMEM;
  646. for (i = 0; i < t->num_dcr; i++) {
  647. t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
  648. if (!t->label[i])
  649. return -ENOMEM;
  650. sprintf(t->label[i], "label%d", i);
  651. }
  652. t->spa_set[1] = test_alloc(t, SPA_VCD_SIZE, &t->spa_set_dma[1]);
  653. if (!t->spa_set[1])
  654. return -ENOMEM;
  655. return ars_state_init(&t->pdev.dev, &t->ars_state);
  656. }
  657. static void dcr_common_init(struct acpi_nfit_control_region *dcr)
  658. {
  659. dcr->vendor_id = 0xabcd;
  660. dcr->device_id = 0;
  661. dcr->revision_id = 1;
  662. dcr->valid_fields = 1;
  663. dcr->manufacturing_location = 0xa;
  664. dcr->manufacturing_date = cpu_to_be16(2016);
  665. }
  666. static void nfit_test0_setup(struct nfit_test *t)
  667. {
  668. const int flush_hint_size = sizeof(struct acpi_nfit_flush_address)
  669. + (sizeof(u64) * NUM_HINTS);
  670. struct acpi_nfit_desc *acpi_desc;
  671. struct acpi_nfit_memory_map *memdev;
  672. void *nfit_buf = t->nfit_buf;
  673. struct acpi_nfit_system_address *spa;
  674. struct acpi_nfit_control_region *dcr;
  675. struct acpi_nfit_data_region *bdw;
  676. struct acpi_nfit_flush_address *flush;
  677. unsigned int offset, i;
  678. /*
  679. * spa0 (interleave first half of dimm0 and dimm1, note storage
  680. * does not actually alias the related block-data-window
  681. * regions)
  682. */
  683. spa = nfit_buf;
  684. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  685. spa->header.length = sizeof(*spa);
  686. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  687. spa->range_index = 0+1;
  688. spa->address = t->spa_set_dma[0];
  689. spa->length = SPA0_SIZE;
  690. /*
  691. * spa1 (interleave last half of the 4 DIMMS, note storage
  692. * does not actually alias the related block-data-window
  693. * regions)
  694. */
  695. spa = nfit_buf + sizeof(*spa);
  696. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  697. spa->header.length = sizeof(*spa);
  698. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  699. spa->range_index = 1+1;
  700. spa->address = t->spa_set_dma[1];
  701. spa->length = SPA1_SIZE;
  702. /* spa2 (dcr0) dimm0 */
  703. spa = nfit_buf + sizeof(*spa) * 2;
  704. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  705. spa->header.length = sizeof(*spa);
  706. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  707. spa->range_index = 2+1;
  708. spa->address = t->dcr_dma[0];
  709. spa->length = DCR_SIZE;
  710. /* spa3 (dcr1) dimm1 */
  711. spa = nfit_buf + sizeof(*spa) * 3;
  712. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  713. spa->header.length = sizeof(*spa);
  714. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  715. spa->range_index = 3+1;
  716. spa->address = t->dcr_dma[1];
  717. spa->length = DCR_SIZE;
  718. /* spa4 (dcr2) dimm2 */
  719. spa = nfit_buf + sizeof(*spa) * 4;
  720. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  721. spa->header.length = sizeof(*spa);
  722. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  723. spa->range_index = 4+1;
  724. spa->address = t->dcr_dma[2];
  725. spa->length = DCR_SIZE;
  726. /* spa5 (dcr3) dimm3 */
  727. spa = nfit_buf + sizeof(*spa) * 5;
  728. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  729. spa->header.length = sizeof(*spa);
  730. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  731. spa->range_index = 5+1;
  732. spa->address = t->dcr_dma[3];
  733. spa->length = DCR_SIZE;
  734. /* spa6 (bdw for dcr0) dimm0 */
  735. spa = nfit_buf + sizeof(*spa) * 6;
  736. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  737. spa->header.length = sizeof(*spa);
  738. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  739. spa->range_index = 6+1;
  740. spa->address = t->dimm_dma[0];
  741. spa->length = DIMM_SIZE;
  742. /* spa7 (bdw for dcr1) dimm1 */
  743. spa = nfit_buf + sizeof(*spa) * 7;
  744. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  745. spa->header.length = sizeof(*spa);
  746. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  747. spa->range_index = 7+1;
  748. spa->address = t->dimm_dma[1];
  749. spa->length = DIMM_SIZE;
  750. /* spa8 (bdw for dcr2) dimm2 */
  751. spa = nfit_buf + sizeof(*spa) * 8;
  752. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  753. spa->header.length = sizeof(*spa);
  754. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  755. spa->range_index = 8+1;
  756. spa->address = t->dimm_dma[2];
  757. spa->length = DIMM_SIZE;
  758. /* spa9 (bdw for dcr3) dimm3 */
  759. spa = nfit_buf + sizeof(*spa) * 9;
  760. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  761. spa->header.length = sizeof(*spa);
  762. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  763. spa->range_index = 9+1;
  764. spa->address = t->dimm_dma[3];
  765. spa->length = DIMM_SIZE;
  766. offset = sizeof(*spa) * 10;
  767. /* mem-region0 (spa0, dimm0) */
  768. memdev = nfit_buf + offset;
  769. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  770. memdev->header.length = sizeof(*memdev);
  771. memdev->device_handle = handle[0];
  772. memdev->physical_id = 0;
  773. memdev->region_id = 0;
  774. memdev->range_index = 0+1;
  775. memdev->region_index = 4+1;
  776. memdev->region_size = SPA0_SIZE/2;
  777. memdev->region_offset = 1;
  778. memdev->address = 0;
  779. memdev->interleave_index = 0;
  780. memdev->interleave_ways = 2;
  781. /* mem-region1 (spa0, dimm1) */
  782. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map);
  783. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  784. memdev->header.length = sizeof(*memdev);
  785. memdev->device_handle = handle[1];
  786. memdev->physical_id = 1;
  787. memdev->region_id = 0;
  788. memdev->range_index = 0+1;
  789. memdev->region_index = 5+1;
  790. memdev->region_size = SPA0_SIZE/2;
  791. memdev->region_offset = (1 << 8);
  792. memdev->address = 0;
  793. memdev->interleave_index = 0;
  794. memdev->interleave_ways = 2;
  795. memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
  796. /* mem-region2 (spa1, dimm0) */
  797. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 2;
  798. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  799. memdev->header.length = sizeof(*memdev);
  800. memdev->device_handle = handle[0];
  801. memdev->physical_id = 0;
  802. memdev->region_id = 1;
  803. memdev->range_index = 1+1;
  804. memdev->region_index = 4+1;
  805. memdev->region_size = SPA1_SIZE/4;
  806. memdev->region_offset = (1 << 16);
  807. memdev->address = SPA0_SIZE/2;
  808. memdev->interleave_index = 0;
  809. memdev->interleave_ways = 4;
  810. memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
  811. /* mem-region3 (spa1, dimm1) */
  812. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 3;
  813. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  814. memdev->header.length = sizeof(*memdev);
  815. memdev->device_handle = handle[1];
  816. memdev->physical_id = 1;
  817. memdev->region_id = 1;
  818. memdev->range_index = 1+1;
  819. memdev->region_index = 5+1;
  820. memdev->region_size = SPA1_SIZE/4;
  821. memdev->region_offset = (1 << 24);
  822. memdev->address = SPA0_SIZE/2;
  823. memdev->interleave_index = 0;
  824. memdev->interleave_ways = 4;
  825. /* mem-region4 (spa1, dimm2) */
  826. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 4;
  827. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  828. memdev->header.length = sizeof(*memdev);
  829. memdev->device_handle = handle[2];
  830. memdev->physical_id = 2;
  831. memdev->region_id = 0;
  832. memdev->range_index = 1+1;
  833. memdev->region_index = 6+1;
  834. memdev->region_size = SPA1_SIZE/4;
  835. memdev->region_offset = (1ULL << 32);
  836. memdev->address = SPA0_SIZE/2;
  837. memdev->interleave_index = 0;
  838. memdev->interleave_ways = 4;
  839. memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
  840. /* mem-region5 (spa1, dimm3) */
  841. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 5;
  842. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  843. memdev->header.length = sizeof(*memdev);
  844. memdev->device_handle = handle[3];
  845. memdev->physical_id = 3;
  846. memdev->region_id = 0;
  847. memdev->range_index = 1+1;
  848. memdev->region_index = 7+1;
  849. memdev->region_size = SPA1_SIZE/4;
  850. memdev->region_offset = (1ULL << 40);
  851. memdev->address = SPA0_SIZE/2;
  852. memdev->interleave_index = 0;
  853. memdev->interleave_ways = 4;
  854. /* mem-region6 (spa/dcr0, dimm0) */
  855. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 6;
  856. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  857. memdev->header.length = sizeof(*memdev);
  858. memdev->device_handle = handle[0];
  859. memdev->physical_id = 0;
  860. memdev->region_id = 0;
  861. memdev->range_index = 2+1;
  862. memdev->region_index = 0+1;
  863. memdev->region_size = 0;
  864. memdev->region_offset = 0;
  865. memdev->address = 0;
  866. memdev->interleave_index = 0;
  867. memdev->interleave_ways = 1;
  868. /* mem-region7 (spa/dcr1, dimm1) */
  869. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 7;
  870. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  871. memdev->header.length = sizeof(*memdev);
  872. memdev->device_handle = handle[1];
  873. memdev->physical_id = 1;
  874. memdev->region_id = 0;
  875. memdev->range_index = 3+1;
  876. memdev->region_index = 1+1;
  877. memdev->region_size = 0;
  878. memdev->region_offset = 0;
  879. memdev->address = 0;
  880. memdev->interleave_index = 0;
  881. memdev->interleave_ways = 1;
  882. /* mem-region8 (spa/dcr2, dimm2) */
  883. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 8;
  884. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  885. memdev->header.length = sizeof(*memdev);
  886. memdev->device_handle = handle[2];
  887. memdev->physical_id = 2;
  888. memdev->region_id = 0;
  889. memdev->range_index = 4+1;
  890. memdev->region_index = 2+1;
  891. memdev->region_size = 0;
  892. memdev->region_offset = 0;
  893. memdev->address = 0;
  894. memdev->interleave_index = 0;
  895. memdev->interleave_ways = 1;
  896. /* mem-region9 (spa/dcr3, dimm3) */
  897. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 9;
  898. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  899. memdev->header.length = sizeof(*memdev);
  900. memdev->device_handle = handle[3];
  901. memdev->physical_id = 3;
  902. memdev->region_id = 0;
  903. memdev->range_index = 5+1;
  904. memdev->region_index = 3+1;
  905. memdev->region_size = 0;
  906. memdev->region_offset = 0;
  907. memdev->address = 0;
  908. memdev->interleave_index = 0;
  909. memdev->interleave_ways = 1;
  910. /* mem-region10 (spa/bdw0, dimm0) */
  911. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 10;
  912. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  913. memdev->header.length = sizeof(*memdev);
  914. memdev->device_handle = handle[0];
  915. memdev->physical_id = 0;
  916. memdev->region_id = 0;
  917. memdev->range_index = 6+1;
  918. memdev->region_index = 0+1;
  919. memdev->region_size = 0;
  920. memdev->region_offset = 0;
  921. memdev->address = 0;
  922. memdev->interleave_index = 0;
  923. memdev->interleave_ways = 1;
  924. /* mem-region11 (spa/bdw1, dimm1) */
  925. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 11;
  926. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  927. memdev->header.length = sizeof(*memdev);
  928. memdev->device_handle = handle[1];
  929. memdev->physical_id = 1;
  930. memdev->region_id = 0;
  931. memdev->range_index = 7+1;
  932. memdev->region_index = 1+1;
  933. memdev->region_size = 0;
  934. memdev->region_offset = 0;
  935. memdev->address = 0;
  936. memdev->interleave_index = 0;
  937. memdev->interleave_ways = 1;
  938. /* mem-region12 (spa/bdw2, dimm2) */
  939. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 12;
  940. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  941. memdev->header.length = sizeof(*memdev);
  942. memdev->device_handle = handle[2];
  943. memdev->physical_id = 2;
  944. memdev->region_id = 0;
  945. memdev->range_index = 8+1;
  946. memdev->region_index = 2+1;
  947. memdev->region_size = 0;
  948. memdev->region_offset = 0;
  949. memdev->address = 0;
  950. memdev->interleave_index = 0;
  951. memdev->interleave_ways = 1;
  952. /* mem-region13 (spa/dcr3, dimm3) */
  953. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 13;
  954. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  955. memdev->header.length = sizeof(*memdev);
  956. memdev->device_handle = handle[3];
  957. memdev->physical_id = 3;
  958. memdev->region_id = 0;
  959. memdev->range_index = 9+1;
  960. memdev->region_index = 3+1;
  961. memdev->region_size = 0;
  962. memdev->region_offset = 0;
  963. memdev->address = 0;
  964. memdev->interleave_index = 0;
  965. memdev->interleave_ways = 1;
  966. memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
  967. offset = offset + sizeof(struct acpi_nfit_memory_map) * 14;
  968. /* dcr-descriptor0: blk */
  969. dcr = nfit_buf + offset;
  970. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  971. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  972. dcr->region_index = 0+1;
  973. dcr_common_init(dcr);
  974. dcr->serial_number = ~handle[0];
  975. dcr->code = NFIT_FIC_BLK;
  976. dcr->windows = 1;
  977. dcr->window_size = DCR_SIZE;
  978. dcr->command_offset = 0;
  979. dcr->command_size = 8;
  980. dcr->status_offset = 8;
  981. dcr->status_size = 4;
  982. /* dcr-descriptor1: blk */
  983. dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region);
  984. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  985. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  986. dcr->region_index = 1+1;
  987. dcr_common_init(dcr);
  988. dcr->serial_number = ~handle[1];
  989. dcr->code = NFIT_FIC_BLK;
  990. dcr->windows = 1;
  991. dcr->window_size = DCR_SIZE;
  992. dcr->command_offset = 0;
  993. dcr->command_size = 8;
  994. dcr->status_offset = 8;
  995. dcr->status_size = 4;
  996. /* dcr-descriptor2: blk */
  997. dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 2;
  998. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  999. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  1000. dcr->region_index = 2+1;
  1001. dcr_common_init(dcr);
  1002. dcr->serial_number = ~handle[2];
  1003. dcr->code = NFIT_FIC_BLK;
  1004. dcr->windows = 1;
  1005. dcr->window_size = DCR_SIZE;
  1006. dcr->command_offset = 0;
  1007. dcr->command_size = 8;
  1008. dcr->status_offset = 8;
  1009. dcr->status_size = 4;
  1010. /* dcr-descriptor3: blk */
  1011. dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 3;
  1012. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1013. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  1014. dcr->region_index = 3+1;
  1015. dcr_common_init(dcr);
  1016. dcr->serial_number = ~handle[3];
  1017. dcr->code = NFIT_FIC_BLK;
  1018. dcr->windows = 1;
  1019. dcr->window_size = DCR_SIZE;
  1020. dcr->command_offset = 0;
  1021. dcr->command_size = 8;
  1022. dcr->status_offset = 8;
  1023. dcr->status_size = 4;
  1024. offset = offset + sizeof(struct acpi_nfit_control_region) * 4;
  1025. /* dcr-descriptor0: pmem */
  1026. dcr = nfit_buf + offset;
  1027. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1028. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1029. window_size);
  1030. dcr->region_index = 4+1;
  1031. dcr_common_init(dcr);
  1032. dcr->serial_number = ~handle[0];
  1033. dcr->code = NFIT_FIC_BYTEN;
  1034. dcr->windows = 0;
  1035. /* dcr-descriptor1: pmem */
  1036. dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
  1037. window_size);
  1038. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1039. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1040. window_size);
  1041. dcr->region_index = 5+1;
  1042. dcr_common_init(dcr);
  1043. dcr->serial_number = ~handle[1];
  1044. dcr->code = NFIT_FIC_BYTEN;
  1045. dcr->windows = 0;
  1046. /* dcr-descriptor2: pmem */
  1047. dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
  1048. window_size) * 2;
  1049. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1050. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1051. window_size);
  1052. dcr->region_index = 6+1;
  1053. dcr_common_init(dcr);
  1054. dcr->serial_number = ~handle[2];
  1055. dcr->code = NFIT_FIC_BYTEN;
  1056. dcr->windows = 0;
  1057. /* dcr-descriptor3: pmem */
  1058. dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
  1059. window_size) * 3;
  1060. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1061. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1062. window_size);
  1063. dcr->region_index = 7+1;
  1064. dcr_common_init(dcr);
  1065. dcr->serial_number = ~handle[3];
  1066. dcr->code = NFIT_FIC_BYTEN;
  1067. dcr->windows = 0;
  1068. offset = offset + offsetof(struct acpi_nfit_control_region,
  1069. window_size) * 4;
  1070. /* bdw0 (spa/dcr0, dimm0) */
  1071. bdw = nfit_buf + offset;
  1072. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  1073. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  1074. bdw->region_index = 0+1;
  1075. bdw->windows = 1;
  1076. bdw->offset = 0;
  1077. bdw->size = BDW_SIZE;
  1078. bdw->capacity = DIMM_SIZE;
  1079. bdw->start_address = 0;
  1080. /* bdw1 (spa/dcr1, dimm1) */
  1081. bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region);
  1082. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  1083. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  1084. bdw->region_index = 1+1;
  1085. bdw->windows = 1;
  1086. bdw->offset = 0;
  1087. bdw->size = BDW_SIZE;
  1088. bdw->capacity = DIMM_SIZE;
  1089. bdw->start_address = 0;
  1090. /* bdw2 (spa/dcr2, dimm2) */
  1091. bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 2;
  1092. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  1093. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  1094. bdw->region_index = 2+1;
  1095. bdw->windows = 1;
  1096. bdw->offset = 0;
  1097. bdw->size = BDW_SIZE;
  1098. bdw->capacity = DIMM_SIZE;
  1099. bdw->start_address = 0;
  1100. /* bdw3 (spa/dcr3, dimm3) */
  1101. bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 3;
  1102. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  1103. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  1104. bdw->region_index = 3+1;
  1105. bdw->windows = 1;
  1106. bdw->offset = 0;
  1107. bdw->size = BDW_SIZE;
  1108. bdw->capacity = DIMM_SIZE;
  1109. bdw->start_address = 0;
  1110. offset = offset + sizeof(struct acpi_nfit_data_region) * 4;
  1111. /* flush0 (dimm0) */
  1112. flush = nfit_buf + offset;
  1113. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  1114. flush->header.length = flush_hint_size;
  1115. flush->device_handle = handle[0];
  1116. flush->hint_count = NUM_HINTS;
  1117. for (i = 0; i < NUM_HINTS; i++)
  1118. flush->hint_address[i] = t->flush_dma[0] + i * sizeof(u64);
  1119. /* flush1 (dimm1) */
  1120. flush = nfit_buf + offset + flush_hint_size * 1;
  1121. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  1122. flush->header.length = flush_hint_size;
  1123. flush->device_handle = handle[1];
  1124. flush->hint_count = NUM_HINTS;
  1125. for (i = 0; i < NUM_HINTS; i++)
  1126. flush->hint_address[i] = t->flush_dma[1] + i * sizeof(u64);
  1127. /* flush2 (dimm2) */
  1128. flush = nfit_buf + offset + flush_hint_size * 2;
  1129. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  1130. flush->header.length = flush_hint_size;
  1131. flush->device_handle = handle[2];
  1132. flush->hint_count = NUM_HINTS;
  1133. for (i = 0; i < NUM_HINTS; i++)
  1134. flush->hint_address[i] = t->flush_dma[2] + i * sizeof(u64);
  1135. /* flush3 (dimm3) */
  1136. flush = nfit_buf + offset + flush_hint_size * 3;
  1137. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  1138. flush->header.length = flush_hint_size;
  1139. flush->device_handle = handle[3];
  1140. flush->hint_count = NUM_HINTS;
  1141. for (i = 0; i < NUM_HINTS; i++)
  1142. flush->hint_address[i] = t->flush_dma[3] + i * sizeof(u64);
  1143. if (t->setup_hotplug) {
  1144. offset = offset + flush_hint_size * 4;
  1145. /* dcr-descriptor4: blk */
  1146. dcr = nfit_buf + offset;
  1147. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1148. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  1149. dcr->region_index = 8+1;
  1150. dcr_common_init(dcr);
  1151. dcr->serial_number = ~handle[4];
  1152. dcr->code = NFIT_FIC_BLK;
  1153. dcr->windows = 1;
  1154. dcr->window_size = DCR_SIZE;
  1155. dcr->command_offset = 0;
  1156. dcr->command_size = 8;
  1157. dcr->status_offset = 8;
  1158. dcr->status_size = 4;
  1159. offset = offset + sizeof(struct acpi_nfit_control_region);
  1160. /* dcr-descriptor4: pmem */
  1161. dcr = nfit_buf + offset;
  1162. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1163. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1164. window_size);
  1165. dcr->region_index = 9+1;
  1166. dcr_common_init(dcr);
  1167. dcr->serial_number = ~handle[4];
  1168. dcr->code = NFIT_FIC_BYTEN;
  1169. dcr->windows = 0;
  1170. offset = offset + offsetof(struct acpi_nfit_control_region,
  1171. window_size);
  1172. /* bdw4 (spa/dcr4, dimm4) */
  1173. bdw = nfit_buf + offset;
  1174. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  1175. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  1176. bdw->region_index = 8+1;
  1177. bdw->windows = 1;
  1178. bdw->offset = 0;
  1179. bdw->size = BDW_SIZE;
  1180. bdw->capacity = DIMM_SIZE;
  1181. bdw->start_address = 0;
  1182. offset = offset + sizeof(struct acpi_nfit_data_region);
  1183. /* spa10 (dcr4) dimm4 */
  1184. spa = nfit_buf + offset;
  1185. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1186. spa->header.length = sizeof(*spa);
  1187. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  1188. spa->range_index = 10+1;
  1189. spa->address = t->dcr_dma[4];
  1190. spa->length = DCR_SIZE;
  1191. /*
  1192. * spa11 (single-dimm interleave for hotplug, note storage
  1193. * does not actually alias the related block-data-window
  1194. * regions)
  1195. */
  1196. spa = nfit_buf + offset + sizeof(*spa);
  1197. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1198. spa->header.length = sizeof(*spa);
  1199. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  1200. spa->range_index = 11+1;
  1201. spa->address = t->spa_set_dma[2];
  1202. spa->length = SPA0_SIZE;
  1203. /* spa12 (bdw for dcr4) dimm4 */
  1204. spa = nfit_buf + offset + sizeof(*spa) * 2;
  1205. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1206. spa->header.length = sizeof(*spa);
  1207. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  1208. spa->range_index = 12+1;
  1209. spa->address = t->dimm_dma[4];
  1210. spa->length = DIMM_SIZE;
  1211. offset = offset + sizeof(*spa) * 3;
  1212. /* mem-region14 (spa/dcr4, dimm4) */
  1213. memdev = nfit_buf + offset;
  1214. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1215. memdev->header.length = sizeof(*memdev);
  1216. memdev->device_handle = handle[4];
  1217. memdev->physical_id = 4;
  1218. memdev->region_id = 0;
  1219. memdev->range_index = 10+1;
  1220. memdev->region_index = 8+1;
  1221. memdev->region_size = 0;
  1222. memdev->region_offset = 0;
  1223. memdev->address = 0;
  1224. memdev->interleave_index = 0;
  1225. memdev->interleave_ways = 1;
  1226. /* mem-region15 (spa0, dimm4) */
  1227. memdev = nfit_buf + offset +
  1228. sizeof(struct acpi_nfit_memory_map);
  1229. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1230. memdev->header.length = sizeof(*memdev);
  1231. memdev->device_handle = handle[4];
  1232. memdev->physical_id = 4;
  1233. memdev->region_id = 0;
  1234. memdev->range_index = 11+1;
  1235. memdev->region_index = 9+1;
  1236. memdev->region_size = SPA0_SIZE;
  1237. memdev->region_offset = (1ULL << 48);
  1238. memdev->address = 0;
  1239. memdev->interleave_index = 0;
  1240. memdev->interleave_ways = 1;
  1241. memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
  1242. /* mem-region16 (spa/bdw4, dimm4) */
  1243. memdev = nfit_buf + offset +
  1244. sizeof(struct acpi_nfit_memory_map) * 2;
  1245. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1246. memdev->header.length = sizeof(*memdev);
  1247. memdev->device_handle = handle[4];
  1248. memdev->physical_id = 4;
  1249. memdev->region_id = 0;
  1250. memdev->range_index = 12+1;
  1251. memdev->region_index = 8+1;
  1252. memdev->region_size = 0;
  1253. memdev->region_offset = 0;
  1254. memdev->address = 0;
  1255. memdev->interleave_index = 0;
  1256. memdev->interleave_ways = 1;
  1257. offset = offset + sizeof(struct acpi_nfit_memory_map) * 3;
  1258. /* flush3 (dimm4) */
  1259. flush = nfit_buf + offset;
  1260. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  1261. flush->header.length = flush_hint_size;
  1262. flush->device_handle = handle[4];
  1263. flush->hint_count = NUM_HINTS;
  1264. for (i = 0; i < NUM_HINTS; i++)
  1265. flush->hint_address[i] = t->flush_dma[4]
  1266. + i * sizeof(u64);
  1267. }
  1268. post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA0_SIZE);
  1269. acpi_desc = &t->acpi_desc;
  1270. set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en);
  1271. set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
  1272. set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
  1273. set_bit(ND_CMD_SMART, &acpi_desc->dimm_cmd_force_en);
  1274. set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
  1275. set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
  1276. set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
  1277. set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
  1278. set_bit(ND_CMD_SMART_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
  1279. }
  1280. static void nfit_test1_setup(struct nfit_test *t)
  1281. {
  1282. size_t offset;
  1283. void *nfit_buf = t->nfit_buf;
  1284. struct acpi_nfit_memory_map *memdev;
  1285. struct acpi_nfit_control_region *dcr;
  1286. struct acpi_nfit_system_address *spa;
  1287. struct acpi_nfit_desc *acpi_desc;
  1288. offset = 0;
  1289. /* spa0 (flat range with no bdw aliasing) */
  1290. spa = nfit_buf + offset;
  1291. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1292. spa->header.length = sizeof(*spa);
  1293. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  1294. spa->range_index = 0+1;
  1295. spa->address = t->spa_set_dma[0];
  1296. spa->length = SPA2_SIZE;
  1297. /* virtual cd region */
  1298. spa = nfit_buf + sizeof(*spa);
  1299. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1300. spa->header.length = sizeof(*spa);
  1301. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_VCD), 16);
  1302. spa->range_index = 0;
  1303. spa->address = t->spa_set_dma[1];
  1304. spa->length = SPA_VCD_SIZE;
  1305. offset += sizeof(*spa) * 2;
  1306. /* mem-region0 (spa0, dimm0) */
  1307. memdev = nfit_buf + offset;
  1308. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1309. memdev->header.length = sizeof(*memdev);
  1310. memdev->device_handle = handle[5];
  1311. memdev->physical_id = 0;
  1312. memdev->region_id = 0;
  1313. memdev->range_index = 0+1;
  1314. memdev->region_index = 0+1;
  1315. memdev->region_size = SPA2_SIZE;
  1316. memdev->region_offset = 0;
  1317. memdev->address = 0;
  1318. memdev->interleave_index = 0;
  1319. memdev->interleave_ways = 1;
  1320. memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED
  1321. | ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED
  1322. | ACPI_NFIT_MEM_NOT_ARMED;
  1323. offset += sizeof(*memdev);
  1324. /* dcr-descriptor0 */
  1325. dcr = nfit_buf + offset;
  1326. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1327. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1328. window_size);
  1329. dcr->region_index = 0+1;
  1330. dcr_common_init(dcr);
  1331. dcr->serial_number = ~handle[5];
  1332. dcr->code = NFIT_FIC_BYTE;
  1333. dcr->windows = 0;
  1334. offset += dcr->header.length;
  1335. memdev = nfit_buf + offset;
  1336. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1337. memdev->header.length = sizeof(*memdev);
  1338. memdev->device_handle = handle[6];
  1339. memdev->physical_id = 0;
  1340. memdev->region_id = 0;
  1341. memdev->range_index = 0;
  1342. memdev->region_index = 0+2;
  1343. memdev->region_size = SPA2_SIZE;
  1344. memdev->region_offset = 0;
  1345. memdev->address = 0;
  1346. memdev->interleave_index = 0;
  1347. memdev->interleave_ways = 1;
  1348. memdev->flags = ACPI_NFIT_MEM_MAP_FAILED;
  1349. /* dcr-descriptor1 */
  1350. offset += sizeof(*memdev);
  1351. dcr = nfit_buf + offset;
  1352. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1353. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1354. window_size);
  1355. dcr->region_index = 0+2;
  1356. dcr_common_init(dcr);
  1357. dcr->serial_number = ~handle[6];
  1358. dcr->code = NFIT_FIC_BYTE;
  1359. dcr->windows = 0;
  1360. post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA2_SIZE);
  1361. acpi_desc = &t->acpi_desc;
  1362. set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
  1363. set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
  1364. set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
  1365. set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
  1366. set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en);
  1367. set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
  1368. set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
  1369. }
  1370. static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
  1371. void *iobuf, u64 len, int rw)
  1372. {
  1373. struct nfit_blk *nfit_blk = ndbr->blk_provider_data;
  1374. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
  1375. struct nd_region *nd_region = &ndbr->nd_region;
  1376. unsigned int lane;
  1377. lane = nd_region_acquire_lane(nd_region);
  1378. if (rw)
  1379. memcpy(mmio->addr.base + dpa, iobuf, len);
  1380. else {
  1381. memcpy(iobuf, mmio->addr.base + dpa, len);
  1382. /* give us some some coverage of the mmio_flush_range() API */
  1383. mmio_flush_range(mmio->addr.base + dpa, len);
  1384. }
  1385. nd_region_release_lane(nd_region, lane);
  1386. return 0;
  1387. }
  1388. static unsigned long nfit_ctl_handle;
  1389. union acpi_object *result;
  1390. static union acpi_object *nfit_test_evaluate_dsm(acpi_handle handle,
  1391. const u8 *uuid, u64 rev, u64 func, union acpi_object *argv4)
  1392. {
  1393. if (handle != &nfit_ctl_handle)
  1394. return ERR_PTR(-ENXIO);
  1395. return result;
  1396. }
  1397. static int setup_result(void *buf, size_t size)
  1398. {
  1399. result = kmalloc(sizeof(union acpi_object) + size, GFP_KERNEL);
  1400. if (!result)
  1401. return -ENOMEM;
  1402. result->package.type = ACPI_TYPE_BUFFER,
  1403. result->buffer.pointer = (void *) (result + 1);
  1404. result->buffer.length = size;
  1405. memcpy(result->buffer.pointer, buf, size);
  1406. memset(buf, 0, size);
  1407. return 0;
  1408. }
  1409. static int nfit_ctl_test(struct device *dev)
  1410. {
  1411. int rc, cmd_rc;
  1412. struct nvdimm *nvdimm;
  1413. struct acpi_device *adev;
  1414. struct nfit_mem *nfit_mem;
  1415. struct nd_ars_record *record;
  1416. struct acpi_nfit_desc *acpi_desc;
  1417. const u64 test_val = 0x0123456789abcdefULL;
  1418. unsigned long mask, cmd_size, offset;
  1419. union {
  1420. struct nd_cmd_get_config_size cfg_size;
  1421. struct nd_cmd_ars_status ars_stat;
  1422. struct nd_cmd_ars_cap ars_cap;
  1423. char buf[sizeof(struct nd_cmd_ars_status)
  1424. + sizeof(struct nd_ars_record)];
  1425. } cmds;
  1426. adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL);
  1427. if (!adev)
  1428. return -ENOMEM;
  1429. *adev = (struct acpi_device) {
  1430. .handle = &nfit_ctl_handle,
  1431. .dev = {
  1432. .init_name = "test-adev",
  1433. },
  1434. };
  1435. acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
  1436. if (!acpi_desc)
  1437. return -ENOMEM;
  1438. *acpi_desc = (struct acpi_nfit_desc) {
  1439. .nd_desc = {
  1440. .cmd_mask = 1UL << ND_CMD_ARS_CAP
  1441. | 1UL << ND_CMD_ARS_START
  1442. | 1UL << ND_CMD_ARS_STATUS
  1443. | 1UL << ND_CMD_CLEAR_ERROR,
  1444. .module = THIS_MODULE,
  1445. .provider_name = "ACPI.NFIT",
  1446. .ndctl = acpi_nfit_ctl,
  1447. },
  1448. .dev = &adev->dev,
  1449. };
  1450. nfit_mem = devm_kzalloc(dev, sizeof(*nfit_mem), GFP_KERNEL);
  1451. if (!nfit_mem)
  1452. return -ENOMEM;
  1453. mask = 1UL << ND_CMD_SMART | 1UL << ND_CMD_SMART_THRESHOLD
  1454. | 1UL << ND_CMD_DIMM_FLAGS | 1UL << ND_CMD_GET_CONFIG_SIZE
  1455. | 1UL << ND_CMD_GET_CONFIG_DATA | 1UL << ND_CMD_SET_CONFIG_DATA
  1456. | 1UL << ND_CMD_VENDOR;
  1457. *nfit_mem = (struct nfit_mem) {
  1458. .adev = adev,
  1459. .family = NVDIMM_FAMILY_INTEL,
  1460. .dsm_mask = mask,
  1461. };
  1462. nvdimm = devm_kzalloc(dev, sizeof(*nvdimm), GFP_KERNEL);
  1463. if (!nvdimm)
  1464. return -ENOMEM;
  1465. *nvdimm = (struct nvdimm) {
  1466. .provider_data = nfit_mem,
  1467. .cmd_mask = mask,
  1468. .dev = {
  1469. .init_name = "test-dimm",
  1470. },
  1471. };
  1472. /* basic checkout of a typical 'get config size' command */
  1473. cmd_size = sizeof(cmds.cfg_size);
  1474. cmds.cfg_size = (struct nd_cmd_get_config_size) {
  1475. .status = 0,
  1476. .config_size = SZ_128K,
  1477. .max_xfer = SZ_4K,
  1478. };
  1479. rc = setup_result(cmds.buf, cmd_size);
  1480. if (rc)
  1481. return rc;
  1482. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, nvdimm, ND_CMD_GET_CONFIG_SIZE,
  1483. cmds.buf, cmd_size, &cmd_rc);
  1484. if (rc < 0 || cmd_rc || cmds.cfg_size.status != 0
  1485. || cmds.cfg_size.config_size != SZ_128K
  1486. || cmds.cfg_size.max_xfer != SZ_4K) {
  1487. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  1488. __func__, __LINE__, rc, cmd_rc);
  1489. return -EIO;
  1490. }
  1491. /* test ars_status with zero output */
  1492. cmd_size = offsetof(struct nd_cmd_ars_status, address);
  1493. cmds.ars_stat = (struct nd_cmd_ars_status) {
  1494. .out_length = 0,
  1495. };
  1496. rc = setup_result(cmds.buf, cmd_size);
  1497. if (rc)
  1498. return rc;
  1499. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
  1500. cmds.buf, cmd_size, &cmd_rc);
  1501. if (rc < 0 || cmd_rc) {
  1502. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  1503. __func__, __LINE__, rc, cmd_rc);
  1504. return -EIO;
  1505. }
  1506. /* test ars_cap with benign extended status */
  1507. cmd_size = sizeof(cmds.ars_cap);
  1508. cmds.ars_cap = (struct nd_cmd_ars_cap) {
  1509. .status = ND_ARS_PERSISTENT << 16,
  1510. };
  1511. offset = offsetof(struct nd_cmd_ars_cap, status);
  1512. rc = setup_result(cmds.buf + offset, cmd_size - offset);
  1513. if (rc)
  1514. return rc;
  1515. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_CAP,
  1516. cmds.buf, cmd_size, &cmd_rc);
  1517. if (rc < 0 || cmd_rc) {
  1518. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  1519. __func__, __LINE__, rc, cmd_rc);
  1520. return -EIO;
  1521. }
  1522. /* test ars_status with 'status' trimmed from 'out_length' */
  1523. cmd_size = sizeof(cmds.ars_stat) + sizeof(struct nd_ars_record);
  1524. cmds.ars_stat = (struct nd_cmd_ars_status) {
  1525. .out_length = cmd_size - 4,
  1526. };
  1527. record = &cmds.ars_stat.records[0];
  1528. *record = (struct nd_ars_record) {
  1529. .length = test_val,
  1530. };
  1531. rc = setup_result(cmds.buf, cmd_size);
  1532. if (rc)
  1533. return rc;
  1534. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
  1535. cmds.buf, cmd_size, &cmd_rc);
  1536. if (rc < 0 || cmd_rc || record->length != test_val) {
  1537. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  1538. __func__, __LINE__, rc, cmd_rc);
  1539. return -EIO;
  1540. }
  1541. /* test ars_status with 'Output (Size)' including 'status' */
  1542. cmd_size = sizeof(cmds.ars_stat) + sizeof(struct nd_ars_record);
  1543. cmds.ars_stat = (struct nd_cmd_ars_status) {
  1544. .out_length = cmd_size,
  1545. };
  1546. record = &cmds.ars_stat.records[0];
  1547. *record = (struct nd_ars_record) {
  1548. .length = test_val,
  1549. };
  1550. rc = setup_result(cmds.buf, cmd_size);
  1551. if (rc)
  1552. return rc;
  1553. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
  1554. cmds.buf, cmd_size, &cmd_rc);
  1555. if (rc < 0 || cmd_rc || record->length != test_val) {
  1556. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  1557. __func__, __LINE__, rc, cmd_rc);
  1558. return -EIO;
  1559. }
  1560. /* test extended status for get_config_size results in failure */
  1561. cmd_size = sizeof(cmds.cfg_size);
  1562. cmds.cfg_size = (struct nd_cmd_get_config_size) {
  1563. .status = 1 << 16,
  1564. };
  1565. rc = setup_result(cmds.buf, cmd_size);
  1566. if (rc)
  1567. return rc;
  1568. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, nvdimm, ND_CMD_GET_CONFIG_SIZE,
  1569. cmds.buf, cmd_size, &cmd_rc);
  1570. if (rc < 0 || cmd_rc >= 0) {
  1571. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  1572. __func__, __LINE__, rc, cmd_rc);
  1573. return -EIO;
  1574. }
  1575. return 0;
  1576. }
  1577. static int nfit_test_probe(struct platform_device *pdev)
  1578. {
  1579. struct nvdimm_bus_descriptor *nd_desc;
  1580. struct acpi_nfit_desc *acpi_desc;
  1581. struct device *dev = &pdev->dev;
  1582. struct nfit_test *nfit_test;
  1583. struct nfit_mem *nfit_mem;
  1584. union acpi_object *obj;
  1585. int rc;
  1586. if (strcmp(dev_name(&pdev->dev), "nfit_test.0") == 0) {
  1587. rc = nfit_ctl_test(&pdev->dev);
  1588. if (rc)
  1589. return rc;
  1590. }
  1591. nfit_test = to_nfit_test(&pdev->dev);
  1592. /* common alloc */
  1593. if (nfit_test->num_dcr) {
  1594. int num = nfit_test->num_dcr;
  1595. nfit_test->dimm = devm_kcalloc(dev, num, sizeof(void *),
  1596. GFP_KERNEL);
  1597. nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
  1598. GFP_KERNEL);
  1599. nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *),
  1600. GFP_KERNEL);
  1601. nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
  1602. GFP_KERNEL);
  1603. nfit_test->label = devm_kcalloc(dev, num, sizeof(void *),
  1604. GFP_KERNEL);
  1605. nfit_test->label_dma = devm_kcalloc(dev, num,
  1606. sizeof(dma_addr_t), GFP_KERNEL);
  1607. nfit_test->dcr = devm_kcalloc(dev, num,
  1608. sizeof(struct nfit_test_dcr *), GFP_KERNEL);
  1609. nfit_test->dcr_dma = devm_kcalloc(dev, num,
  1610. sizeof(dma_addr_t), GFP_KERNEL);
  1611. if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label
  1612. && nfit_test->label_dma && nfit_test->dcr
  1613. && nfit_test->dcr_dma && nfit_test->flush
  1614. && nfit_test->flush_dma)
  1615. /* pass */;
  1616. else
  1617. return -ENOMEM;
  1618. }
  1619. if (nfit_test->num_pm) {
  1620. int num = nfit_test->num_pm;
  1621. nfit_test->spa_set = devm_kcalloc(dev, num, sizeof(void *),
  1622. GFP_KERNEL);
  1623. nfit_test->spa_set_dma = devm_kcalloc(dev, num,
  1624. sizeof(dma_addr_t), GFP_KERNEL);
  1625. if (nfit_test->spa_set && nfit_test->spa_set_dma)
  1626. /* pass */;
  1627. else
  1628. return -ENOMEM;
  1629. }
  1630. /* per-nfit specific alloc */
  1631. if (nfit_test->alloc(nfit_test))
  1632. return -ENOMEM;
  1633. nfit_test->setup(nfit_test);
  1634. acpi_desc = &nfit_test->acpi_desc;
  1635. acpi_nfit_desc_init(acpi_desc, &pdev->dev);
  1636. acpi_desc->blk_do_io = nfit_test_blk_do_io;
  1637. nd_desc = &acpi_desc->nd_desc;
  1638. nd_desc->provider_name = NULL;
  1639. nd_desc->module = THIS_MODULE;
  1640. nd_desc->ndctl = nfit_test_ctl;
  1641. rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_buf,
  1642. nfit_test->nfit_size);
  1643. if (rc)
  1644. return rc;
  1645. rc = devm_add_action_or_reset(&pdev->dev, acpi_nfit_shutdown, acpi_desc);
  1646. if (rc)
  1647. return rc;
  1648. if (nfit_test->setup != nfit_test0_setup)
  1649. return 0;
  1650. nfit_test->setup_hotplug = 1;
  1651. nfit_test->setup(nfit_test);
  1652. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  1653. if (!obj)
  1654. return -ENOMEM;
  1655. obj->type = ACPI_TYPE_BUFFER;
  1656. obj->buffer.length = nfit_test->nfit_size;
  1657. obj->buffer.pointer = nfit_test->nfit_buf;
  1658. *(nfit_test->_fit) = obj;
  1659. __acpi_nfit_notify(&pdev->dev, nfit_test, 0x80);
  1660. /* associate dimm devices with nfit_mem data for notification testing */
  1661. mutex_lock(&acpi_desc->init_mutex);
  1662. list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
  1663. u32 nfit_handle = __to_nfit_memdev(nfit_mem)->device_handle;
  1664. int i;
  1665. for (i = 0; i < NUM_DCR; i++)
  1666. if (nfit_handle == handle[i])
  1667. dev_set_drvdata(nfit_test->dimm_dev[i],
  1668. nfit_mem);
  1669. }
  1670. mutex_unlock(&acpi_desc->init_mutex);
  1671. return 0;
  1672. }
  1673. static int nfit_test_remove(struct platform_device *pdev)
  1674. {
  1675. return 0;
  1676. }
  1677. static void nfit_test_release(struct device *dev)
  1678. {
  1679. struct nfit_test *nfit_test = to_nfit_test(dev);
  1680. kfree(nfit_test);
  1681. }
  1682. static const struct platform_device_id nfit_test_id[] = {
  1683. { KBUILD_MODNAME },
  1684. { },
  1685. };
  1686. static struct platform_driver nfit_test_driver = {
  1687. .probe = nfit_test_probe,
  1688. .remove = nfit_test_remove,
  1689. .driver = {
  1690. .name = KBUILD_MODNAME,
  1691. },
  1692. .id_table = nfit_test_id,
  1693. };
  1694. static __init int nfit_test_init(void)
  1695. {
  1696. int rc, i;
  1697. nfit_test_setup(nfit_test_lookup, nfit_test_evaluate_dsm);
  1698. nfit_test_dimm = class_create(THIS_MODULE, "nfit_test_dimm");
  1699. if (IS_ERR(nfit_test_dimm)) {
  1700. rc = PTR_ERR(nfit_test_dimm);
  1701. goto err_register;
  1702. }
  1703. for (i = 0; i < NUM_NFITS; i++) {
  1704. struct nfit_test *nfit_test;
  1705. struct platform_device *pdev;
  1706. nfit_test = kzalloc(sizeof(*nfit_test), GFP_KERNEL);
  1707. if (!nfit_test) {
  1708. rc = -ENOMEM;
  1709. goto err_register;
  1710. }
  1711. INIT_LIST_HEAD(&nfit_test->resources);
  1712. switch (i) {
  1713. case 0:
  1714. nfit_test->num_pm = NUM_PM;
  1715. nfit_test->dcr_idx = 0;
  1716. nfit_test->num_dcr = NUM_DCR;
  1717. nfit_test->alloc = nfit_test0_alloc;
  1718. nfit_test->setup = nfit_test0_setup;
  1719. break;
  1720. case 1:
  1721. nfit_test->num_pm = 1;
  1722. nfit_test->dcr_idx = NUM_DCR;
  1723. nfit_test->num_dcr = 2;
  1724. nfit_test->alloc = nfit_test1_alloc;
  1725. nfit_test->setup = nfit_test1_setup;
  1726. break;
  1727. default:
  1728. rc = -EINVAL;
  1729. goto err_register;
  1730. }
  1731. pdev = &nfit_test->pdev;
  1732. pdev->name = KBUILD_MODNAME;
  1733. pdev->id = i;
  1734. pdev->dev.release = nfit_test_release;
  1735. rc = platform_device_register(pdev);
  1736. if (rc) {
  1737. put_device(&pdev->dev);
  1738. goto err_register;
  1739. }
  1740. get_device(&pdev->dev);
  1741. rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  1742. if (rc)
  1743. goto err_register;
  1744. instances[i] = nfit_test;
  1745. }
  1746. rc = platform_driver_register(&nfit_test_driver);
  1747. if (rc)
  1748. goto err_register;
  1749. return 0;
  1750. err_register:
  1751. for (i = 0; i < NUM_NFITS; i++)
  1752. if (instances[i])
  1753. platform_device_unregister(&instances[i]->pdev);
  1754. nfit_test_teardown();
  1755. for (i = 0; i < NUM_NFITS; i++)
  1756. if (instances[i])
  1757. put_device(&instances[i]->pdev.dev);
  1758. return rc;
  1759. }
  1760. static __exit void nfit_test_exit(void)
  1761. {
  1762. int i;
  1763. for (i = 0; i < NUM_NFITS; i++)
  1764. platform_device_unregister(&instances[i]->pdev);
  1765. platform_driver_unregister(&nfit_test_driver);
  1766. nfit_test_teardown();
  1767. for (i = 0; i < NUM_NFITS; i++)
  1768. put_device(&instances[i]->pdev.dev);
  1769. class_destroy(nfit_test_dimm);
  1770. }
  1771. module_init(nfit_test_init);
  1772. module_exit(nfit_test_exit);
  1773. MODULE_LICENSE("GPL v2");
  1774. MODULE_AUTHOR("Intel Corporation");