nfit.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247
  1. /*
  2. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/platform_device.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/libnvdimm.h>
  17. #include <linux/vmalloc.h>
  18. #include <linux/device.h>
  19. #include <linux/module.h>
  20. #include <linux/ndctl.h>
  21. #include <linux/sizes.h>
  22. #include <linux/slab.h>
  23. #include <nfit.h>
  24. #include <nd.h>
  25. #include "nfit_test.h"
  26. /*
  27. * Generate an NFIT table to describe the following topology:
  28. *
  29. * BUS0: Interleaved PMEM regions, and aliasing with BLK regions
  30. *
  31. * (a) (b) DIMM BLK-REGION
  32. * +----------+--------------+----------+---------+
  33. * +------+ | blk2.0 | pm0.0 | blk2.1 | pm1.0 | 0 region2
  34. * | imc0 +--+- - - - - region0 - - - -+----------+ +
  35. * +--+---+ | blk3.0 | pm0.0 | blk3.1 | pm1.0 | 1 region3
  36. * | +----------+--------------v----------v v
  37. * +--+---+ | |
  38. * | cpu0 | region1
  39. * +--+---+ | |
  40. * | +-------------------------^----------^ ^
  41. * +--+---+ | blk4.0 | pm1.0 | 2 region4
  42. * | imc1 +--+-------------------------+----------+ +
  43. * +------+ | blk5.0 | pm1.0 | 3 region5
  44. * +-------------------------+----------+-+-------+
  45. *
  46. * *) In this layout we have four dimms and two memory controllers in one
  47. * socket. Each unique interface (BLK or PMEM) to DPA space
  48. * is identified by a region device with a dynamically assigned id.
  49. *
  50. * *) The first portion of dimm0 and dimm1 are interleaved as REGION0.
  51. * A single PMEM namespace "pm0.0" is created using half of the
  52. * REGION0 SPA-range. REGION0 spans dimm0 and dimm1. PMEM namespace
  53. * allocate from from the bottom of a region. The unallocated
  54. * portion of REGION0 aliases with REGION2 and REGION3. That
  55. * unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and
  56. * "blk3.0") starting at the base of each DIMM to offset (a) in those
  57. * DIMMs. "pm0.0", "blk2.0" and "blk3.0" are free-form readable
  58. * names that can be assigned to a namespace.
  59. *
  60. * *) In the last portion of dimm0 and dimm1 we have an interleaved
  61. * SPA range, REGION1, that spans those two dimms as well as dimm2
  62. * and dimm3. Some of REGION1 allocated to a PMEM namespace named
  63. * "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each
  64. * dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and
  65. * "blk5.0".
  66. *
  67. * *) The portion of dimm2 and dimm3 that do not participate in the
  68. * REGION1 interleaved SPA range (i.e. the DPA address below offset
  69. * (b) are also included in the "blk4.0" and "blk5.0" namespaces.
  70. * Note, that BLK namespaces need not be contiguous in DPA-space, and
  71. * can consume aliased capacity from multiple interleave sets.
  72. *
  73. * BUS1: Legacy NVDIMM (single contiguous range)
  74. *
  75. * region2
  76. * +---------------------+
  77. * |---------------------|
  78. * || pm2.0 ||
  79. * |---------------------|
  80. * +---------------------+
  81. *
  82. * *) A NFIT-table may describe a simple system-physical-address range
  83. * with no BLK aliasing. This type of region may optionally
  84. * reference an NVDIMM.
  85. */
  86. enum {
  87. NUM_PM = 2,
  88. NUM_DCR = 4,
  89. NUM_BDW = NUM_DCR,
  90. NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW,
  91. NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */ + 4 /* spa1 iset */,
  92. DIMM_SIZE = SZ_32M,
  93. LABEL_SIZE = SZ_128K,
  94. SPA0_SIZE = DIMM_SIZE,
  95. SPA1_SIZE = DIMM_SIZE*2,
  96. SPA2_SIZE = DIMM_SIZE,
  97. BDW_SIZE = 64 << 8,
  98. DCR_SIZE = 12,
  99. NUM_NFITS = 2, /* permit testing multiple NFITs per system */
  100. };
  101. struct nfit_test_dcr {
  102. __le64 bdw_addr;
  103. __le32 bdw_status;
  104. __u8 aperature[BDW_SIZE];
  105. };
  106. #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
  107. (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
  108. | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
  109. static u32 handle[NUM_DCR] = {
  110. [0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
  111. [1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
  112. [2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
  113. [3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
  114. };
  115. struct nfit_test {
  116. struct acpi_nfit_desc acpi_desc;
  117. struct platform_device pdev;
  118. struct list_head resources;
  119. void *nfit_buf;
  120. dma_addr_t nfit_dma;
  121. size_t nfit_size;
  122. int num_dcr;
  123. int num_pm;
  124. void **dimm;
  125. dma_addr_t *dimm_dma;
  126. void **flush;
  127. dma_addr_t *flush_dma;
  128. void **label;
  129. dma_addr_t *label_dma;
  130. void **spa_set;
  131. dma_addr_t *spa_set_dma;
  132. struct nfit_test_dcr **dcr;
  133. dma_addr_t *dcr_dma;
  134. int (*alloc)(struct nfit_test *t);
  135. void (*setup)(struct nfit_test *t);
  136. };
  137. static struct nfit_test *to_nfit_test(struct device *dev)
  138. {
  139. struct platform_device *pdev = to_platform_device(dev);
  140. return container_of(pdev, struct nfit_test, pdev);
  141. }
  142. static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size *nd_cmd,
  143. unsigned int buf_len)
  144. {
  145. if (buf_len < sizeof(*nd_cmd))
  146. return -EINVAL;
  147. nd_cmd->status = 0;
  148. nd_cmd->config_size = LABEL_SIZE;
  149. nd_cmd->max_xfer = SZ_4K;
  150. return 0;
  151. }
  152. static int nfit_test_cmd_get_config_data(struct nd_cmd_get_config_data_hdr
  153. *nd_cmd, unsigned int buf_len, void *label)
  154. {
  155. unsigned int len, offset = nd_cmd->in_offset;
  156. int rc;
  157. if (buf_len < sizeof(*nd_cmd))
  158. return -EINVAL;
  159. if (offset >= LABEL_SIZE)
  160. return -EINVAL;
  161. if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len)
  162. return -EINVAL;
  163. nd_cmd->status = 0;
  164. len = min(nd_cmd->in_length, LABEL_SIZE - offset);
  165. memcpy(nd_cmd->out_buf, label + offset, len);
  166. rc = buf_len - sizeof(*nd_cmd) - len;
  167. return rc;
  168. }
  169. static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd,
  170. unsigned int buf_len, void *label)
  171. {
  172. unsigned int len, offset = nd_cmd->in_offset;
  173. u32 *status;
  174. int rc;
  175. if (buf_len < sizeof(*nd_cmd))
  176. return -EINVAL;
  177. if (offset >= LABEL_SIZE)
  178. return -EINVAL;
  179. if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len)
  180. return -EINVAL;
  181. status = (void *)nd_cmd + nd_cmd->in_length + sizeof(*nd_cmd);
  182. *status = 0;
  183. len = min(nd_cmd->in_length, LABEL_SIZE - offset);
  184. memcpy(label + offset, nd_cmd->in_buf, len);
  185. rc = buf_len - sizeof(*nd_cmd) - (len + 4);
  186. return rc;
  187. }
  188. static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd,
  189. unsigned int buf_len)
  190. {
  191. if (buf_len < sizeof(*nd_cmd))
  192. return -EINVAL;
  193. nd_cmd->max_ars_out = 256;
  194. nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16;
  195. return 0;
  196. }
  197. static int nfit_test_cmd_ars_start(struct nd_cmd_ars_start *nd_cmd,
  198. unsigned int buf_len)
  199. {
  200. if (buf_len < sizeof(*nd_cmd))
  201. return -EINVAL;
  202. nd_cmd->status = 0;
  203. return 0;
  204. }
  205. static int nfit_test_cmd_ars_status(struct nd_cmd_ars_status *nd_cmd,
  206. unsigned int buf_len)
  207. {
  208. if (buf_len < sizeof(*nd_cmd))
  209. return -EINVAL;
  210. nd_cmd->out_length = 256;
  211. nd_cmd->num_records = 0;
  212. nd_cmd->status = 0;
  213. return 0;
  214. }
  215. static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
  216. struct nvdimm *nvdimm, unsigned int cmd, void *buf,
  217. unsigned int buf_len)
  218. {
  219. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  220. struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc);
  221. int i, rc = 0;
  222. if (nvdimm) {
  223. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  224. if (!nfit_mem || !test_bit(cmd, &nfit_mem->dsm_mask))
  225. return -ENOTTY;
  226. /* lookup label space for the given dimm */
  227. for (i = 0; i < ARRAY_SIZE(handle); i++)
  228. if (__to_nfit_memdev(nfit_mem)->device_handle ==
  229. handle[i])
  230. break;
  231. if (i >= ARRAY_SIZE(handle))
  232. return -ENXIO;
  233. switch (cmd) {
  234. case ND_CMD_GET_CONFIG_SIZE:
  235. rc = nfit_test_cmd_get_config_size(buf, buf_len);
  236. break;
  237. case ND_CMD_GET_CONFIG_DATA:
  238. rc = nfit_test_cmd_get_config_data(buf, buf_len,
  239. t->label[i]);
  240. break;
  241. case ND_CMD_SET_CONFIG_DATA:
  242. rc = nfit_test_cmd_set_config_data(buf, buf_len,
  243. t->label[i]);
  244. break;
  245. default:
  246. return -ENOTTY;
  247. }
  248. } else {
  249. if (!nd_desc || !test_bit(cmd, &nd_desc->dsm_mask))
  250. return -ENOTTY;
  251. switch (cmd) {
  252. case ND_CMD_ARS_CAP:
  253. rc = nfit_test_cmd_ars_cap(buf, buf_len);
  254. break;
  255. case ND_CMD_ARS_START:
  256. rc = nfit_test_cmd_ars_start(buf, buf_len);
  257. break;
  258. case ND_CMD_ARS_STATUS:
  259. rc = nfit_test_cmd_ars_status(buf, buf_len);
  260. break;
  261. default:
  262. return -ENOTTY;
  263. }
  264. }
  265. return rc;
  266. }
  267. static DEFINE_SPINLOCK(nfit_test_lock);
  268. static struct nfit_test *instances[NUM_NFITS];
  269. static void release_nfit_res(void *data)
  270. {
  271. struct nfit_test_resource *nfit_res = data;
  272. struct resource *res = nfit_res->res;
  273. spin_lock(&nfit_test_lock);
  274. list_del(&nfit_res->list);
  275. spin_unlock(&nfit_test_lock);
  276. if (is_vmalloc_addr(nfit_res->buf))
  277. vfree(nfit_res->buf);
  278. else
  279. dma_free_coherent(nfit_res->dev, resource_size(res),
  280. nfit_res->buf, res->start);
  281. kfree(res);
  282. kfree(nfit_res);
  283. }
  284. static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
  285. void *buf)
  286. {
  287. struct device *dev = &t->pdev.dev;
  288. struct resource *res = kzalloc(sizeof(*res) * 2, GFP_KERNEL);
  289. struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res),
  290. GFP_KERNEL);
  291. int rc;
  292. if (!res || !buf || !nfit_res)
  293. goto err;
  294. rc = devm_add_action(dev, release_nfit_res, nfit_res);
  295. if (rc)
  296. goto err;
  297. INIT_LIST_HEAD(&nfit_res->list);
  298. memset(buf, 0, size);
  299. nfit_res->dev = dev;
  300. nfit_res->buf = buf;
  301. nfit_res->res = res;
  302. res->start = *dma;
  303. res->end = *dma + size - 1;
  304. res->name = "NFIT";
  305. spin_lock(&nfit_test_lock);
  306. list_add(&nfit_res->list, &t->resources);
  307. spin_unlock(&nfit_test_lock);
  308. return nfit_res->buf;
  309. err:
  310. if (buf && !is_vmalloc_addr(buf))
  311. dma_free_coherent(dev, size, buf, *dma);
  312. else if (buf)
  313. vfree(buf);
  314. kfree(res);
  315. kfree(nfit_res);
  316. return NULL;
  317. }
  318. static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma)
  319. {
  320. void *buf = vmalloc(size);
  321. *dma = (unsigned long) buf;
  322. return __test_alloc(t, size, dma, buf);
  323. }
  324. static void *test_alloc_coherent(struct nfit_test *t, size_t size,
  325. dma_addr_t *dma)
  326. {
  327. struct device *dev = &t->pdev.dev;
  328. void *buf = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
  329. return __test_alloc(t, size, dma, buf);
  330. }
  331. static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr)
  332. {
  333. int i;
  334. for (i = 0; i < ARRAY_SIZE(instances); i++) {
  335. struct nfit_test_resource *n, *nfit_res = NULL;
  336. struct nfit_test *t = instances[i];
  337. if (!t)
  338. continue;
  339. spin_lock(&nfit_test_lock);
  340. list_for_each_entry(n, &t->resources, list) {
  341. if (addr >= n->res->start && (addr < n->res->start
  342. + resource_size(n->res))) {
  343. nfit_res = n;
  344. break;
  345. } else if (addr >= (unsigned long) n->buf
  346. && (addr < (unsigned long) n->buf
  347. + resource_size(n->res))) {
  348. nfit_res = n;
  349. break;
  350. }
  351. }
  352. spin_unlock(&nfit_test_lock);
  353. if (nfit_res)
  354. return nfit_res;
  355. }
  356. return NULL;
  357. }
  358. static int nfit_test0_alloc(struct nfit_test *t)
  359. {
  360. size_t nfit_size = sizeof(struct acpi_table_nfit)
  361. + sizeof(struct acpi_nfit_system_address) * NUM_SPA
  362. + sizeof(struct acpi_nfit_memory_map) * NUM_MEM
  363. + sizeof(struct acpi_nfit_control_region) * NUM_DCR
  364. + sizeof(struct acpi_nfit_data_region) * NUM_BDW
  365. + sizeof(struct acpi_nfit_flush_address) * NUM_DCR;
  366. int i;
  367. t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
  368. if (!t->nfit_buf)
  369. return -ENOMEM;
  370. t->nfit_size = nfit_size;
  371. t->spa_set[0] = test_alloc_coherent(t, SPA0_SIZE, &t->spa_set_dma[0]);
  372. if (!t->spa_set[0])
  373. return -ENOMEM;
  374. t->spa_set[1] = test_alloc_coherent(t, SPA1_SIZE, &t->spa_set_dma[1]);
  375. if (!t->spa_set[1])
  376. return -ENOMEM;
  377. for (i = 0; i < NUM_DCR; i++) {
  378. t->dimm[i] = test_alloc(t, DIMM_SIZE, &t->dimm_dma[i]);
  379. if (!t->dimm[i])
  380. return -ENOMEM;
  381. t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
  382. if (!t->label[i])
  383. return -ENOMEM;
  384. sprintf(t->label[i], "label%d", i);
  385. t->flush[i] = test_alloc(t, 8, &t->flush_dma[i]);
  386. if (!t->flush[i])
  387. return -ENOMEM;
  388. }
  389. for (i = 0; i < NUM_DCR; i++) {
  390. t->dcr[i] = test_alloc(t, LABEL_SIZE, &t->dcr_dma[i]);
  391. if (!t->dcr[i])
  392. return -ENOMEM;
  393. }
  394. return 0;
  395. }
  396. static int nfit_test1_alloc(struct nfit_test *t)
  397. {
  398. size_t nfit_size = sizeof(struct acpi_table_nfit)
  399. + sizeof(struct acpi_nfit_system_address)
  400. + sizeof(struct acpi_nfit_memory_map)
  401. + sizeof(struct acpi_nfit_control_region);
  402. t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
  403. if (!t->nfit_buf)
  404. return -ENOMEM;
  405. t->nfit_size = nfit_size;
  406. t->spa_set[0] = test_alloc_coherent(t, SPA2_SIZE, &t->spa_set_dma[0]);
  407. if (!t->spa_set[0])
  408. return -ENOMEM;
  409. return 0;
  410. }
  411. static void nfit_test_init_header(struct acpi_table_nfit *nfit, size_t size)
  412. {
  413. memcpy(nfit->header.signature, ACPI_SIG_NFIT, 4);
  414. nfit->header.length = size;
  415. nfit->header.revision = 1;
  416. memcpy(nfit->header.oem_id, "LIBND", 6);
  417. memcpy(nfit->header.oem_table_id, "TEST", 5);
  418. nfit->header.oem_revision = 1;
  419. memcpy(nfit->header.asl_compiler_id, "TST", 4);
  420. nfit->header.asl_compiler_revision = 1;
  421. }
  422. static void nfit_test0_setup(struct nfit_test *t)
  423. {
  424. struct nvdimm_bus_descriptor *nd_desc;
  425. struct acpi_nfit_desc *acpi_desc;
  426. struct acpi_nfit_memory_map *memdev;
  427. void *nfit_buf = t->nfit_buf;
  428. size_t size = t->nfit_size;
  429. struct acpi_nfit_system_address *spa;
  430. struct acpi_nfit_control_region *dcr;
  431. struct acpi_nfit_data_region *bdw;
  432. struct acpi_nfit_flush_address *flush;
  433. unsigned int offset;
  434. nfit_test_init_header(nfit_buf, size);
  435. /*
  436. * spa0 (interleave first half of dimm0 and dimm1, note storage
  437. * does not actually alias the related block-data-window
  438. * regions)
  439. */
  440. spa = nfit_buf + sizeof(struct acpi_table_nfit);
  441. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  442. spa->header.length = sizeof(*spa);
  443. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  444. spa->range_index = 0+1;
  445. spa->address = t->spa_set_dma[0];
  446. spa->length = SPA0_SIZE;
  447. /*
  448. * spa1 (interleave last half of the 4 DIMMS, note storage
  449. * does not actually alias the related block-data-window
  450. * regions)
  451. */
  452. spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa);
  453. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  454. spa->header.length = sizeof(*spa);
  455. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  456. spa->range_index = 1+1;
  457. spa->address = t->spa_set_dma[1];
  458. spa->length = SPA1_SIZE;
  459. /* spa2 (dcr0) dimm0 */
  460. spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 2;
  461. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  462. spa->header.length = sizeof(*spa);
  463. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  464. spa->range_index = 2+1;
  465. spa->address = t->dcr_dma[0];
  466. spa->length = DCR_SIZE;
  467. /* spa3 (dcr1) dimm1 */
  468. spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 3;
  469. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  470. spa->header.length = sizeof(*spa);
  471. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  472. spa->range_index = 3+1;
  473. spa->address = t->dcr_dma[1];
  474. spa->length = DCR_SIZE;
  475. /* spa4 (dcr2) dimm2 */
  476. spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 4;
  477. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  478. spa->header.length = sizeof(*spa);
  479. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  480. spa->range_index = 4+1;
  481. spa->address = t->dcr_dma[2];
  482. spa->length = DCR_SIZE;
  483. /* spa5 (dcr3) dimm3 */
  484. spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 5;
  485. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  486. spa->header.length = sizeof(*spa);
  487. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  488. spa->range_index = 5+1;
  489. spa->address = t->dcr_dma[3];
  490. spa->length = DCR_SIZE;
  491. /* spa6 (bdw for dcr0) dimm0 */
  492. spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 6;
  493. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  494. spa->header.length = sizeof(*spa);
  495. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  496. spa->range_index = 6+1;
  497. spa->address = t->dimm_dma[0];
  498. spa->length = DIMM_SIZE;
  499. /* spa7 (bdw for dcr1) dimm1 */
  500. spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 7;
  501. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  502. spa->header.length = sizeof(*spa);
  503. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  504. spa->range_index = 7+1;
  505. spa->address = t->dimm_dma[1];
  506. spa->length = DIMM_SIZE;
  507. /* spa8 (bdw for dcr2) dimm2 */
  508. spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 8;
  509. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  510. spa->header.length = sizeof(*spa);
  511. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  512. spa->range_index = 8+1;
  513. spa->address = t->dimm_dma[2];
  514. spa->length = DIMM_SIZE;
  515. /* spa9 (bdw for dcr3) dimm3 */
  516. spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 9;
  517. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  518. spa->header.length = sizeof(*spa);
  519. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  520. spa->range_index = 9+1;
  521. spa->address = t->dimm_dma[3];
  522. spa->length = DIMM_SIZE;
  523. offset = sizeof(struct acpi_table_nfit) + sizeof(*spa) * 10;
  524. /* mem-region0 (spa0, dimm0) */
  525. memdev = nfit_buf + offset;
  526. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  527. memdev->header.length = sizeof(*memdev);
  528. memdev->device_handle = handle[0];
  529. memdev->physical_id = 0;
  530. memdev->region_id = 0;
  531. memdev->range_index = 0+1;
  532. memdev->region_index = 0+1;
  533. memdev->region_size = SPA0_SIZE/2;
  534. memdev->region_offset = t->spa_set_dma[0];
  535. memdev->address = 0;
  536. memdev->interleave_index = 0;
  537. memdev->interleave_ways = 2;
  538. /* mem-region1 (spa0, dimm1) */
  539. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map);
  540. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  541. memdev->header.length = sizeof(*memdev);
  542. memdev->device_handle = handle[1];
  543. memdev->physical_id = 1;
  544. memdev->region_id = 0;
  545. memdev->range_index = 0+1;
  546. memdev->region_index = 1+1;
  547. memdev->region_size = SPA0_SIZE/2;
  548. memdev->region_offset = t->spa_set_dma[0] + SPA0_SIZE/2;
  549. memdev->address = 0;
  550. memdev->interleave_index = 0;
  551. memdev->interleave_ways = 2;
  552. /* mem-region2 (spa1, dimm0) */
  553. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 2;
  554. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  555. memdev->header.length = sizeof(*memdev);
  556. memdev->device_handle = handle[0];
  557. memdev->physical_id = 0;
  558. memdev->region_id = 1;
  559. memdev->range_index = 1+1;
  560. memdev->region_index = 0+1;
  561. memdev->region_size = SPA1_SIZE/4;
  562. memdev->region_offset = t->spa_set_dma[1];
  563. memdev->address = SPA0_SIZE/2;
  564. memdev->interleave_index = 0;
  565. memdev->interleave_ways = 4;
  566. /* mem-region3 (spa1, dimm1) */
  567. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 3;
  568. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  569. memdev->header.length = sizeof(*memdev);
  570. memdev->device_handle = handle[1];
  571. memdev->physical_id = 1;
  572. memdev->region_id = 1;
  573. memdev->range_index = 1+1;
  574. memdev->region_index = 1+1;
  575. memdev->region_size = SPA1_SIZE/4;
  576. memdev->region_offset = t->spa_set_dma[1] + SPA1_SIZE/4;
  577. memdev->address = SPA0_SIZE/2;
  578. memdev->interleave_index = 0;
  579. memdev->interleave_ways = 4;
  580. /* mem-region4 (spa1, dimm2) */
  581. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 4;
  582. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  583. memdev->header.length = sizeof(*memdev);
  584. memdev->device_handle = handle[2];
  585. memdev->physical_id = 2;
  586. memdev->region_id = 0;
  587. memdev->range_index = 1+1;
  588. memdev->region_index = 2+1;
  589. memdev->region_size = SPA1_SIZE/4;
  590. memdev->region_offset = t->spa_set_dma[1] + 2*SPA1_SIZE/4;
  591. memdev->address = SPA0_SIZE/2;
  592. memdev->interleave_index = 0;
  593. memdev->interleave_ways = 4;
  594. /* mem-region5 (spa1, dimm3) */
  595. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 5;
  596. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  597. memdev->header.length = sizeof(*memdev);
  598. memdev->device_handle = handle[3];
  599. memdev->physical_id = 3;
  600. memdev->region_id = 0;
  601. memdev->range_index = 1+1;
  602. memdev->region_index = 3+1;
  603. memdev->region_size = SPA1_SIZE/4;
  604. memdev->region_offset = t->spa_set_dma[1] + 3*SPA1_SIZE/4;
  605. memdev->address = SPA0_SIZE/2;
  606. memdev->interleave_index = 0;
  607. memdev->interleave_ways = 4;
  608. /* mem-region6 (spa/dcr0, dimm0) */
  609. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 6;
  610. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  611. memdev->header.length = sizeof(*memdev);
  612. memdev->device_handle = handle[0];
  613. memdev->physical_id = 0;
  614. memdev->region_id = 0;
  615. memdev->range_index = 2+1;
  616. memdev->region_index = 0+1;
  617. memdev->region_size = 0;
  618. memdev->region_offset = 0;
  619. memdev->address = 0;
  620. memdev->interleave_index = 0;
  621. memdev->interleave_ways = 1;
  622. /* mem-region7 (spa/dcr1, dimm1) */
  623. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 7;
  624. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  625. memdev->header.length = sizeof(*memdev);
  626. memdev->device_handle = handle[1];
  627. memdev->physical_id = 1;
  628. memdev->region_id = 0;
  629. memdev->range_index = 3+1;
  630. memdev->region_index = 1+1;
  631. memdev->region_size = 0;
  632. memdev->region_offset = 0;
  633. memdev->address = 0;
  634. memdev->interleave_index = 0;
  635. memdev->interleave_ways = 1;
  636. /* mem-region8 (spa/dcr2, dimm2) */
  637. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 8;
  638. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  639. memdev->header.length = sizeof(*memdev);
  640. memdev->device_handle = handle[2];
  641. memdev->physical_id = 2;
  642. memdev->region_id = 0;
  643. memdev->range_index = 4+1;
  644. memdev->region_index = 2+1;
  645. memdev->region_size = 0;
  646. memdev->region_offset = 0;
  647. memdev->address = 0;
  648. memdev->interleave_index = 0;
  649. memdev->interleave_ways = 1;
  650. /* mem-region9 (spa/dcr3, dimm3) */
  651. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 9;
  652. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  653. memdev->header.length = sizeof(*memdev);
  654. memdev->device_handle = handle[3];
  655. memdev->physical_id = 3;
  656. memdev->region_id = 0;
  657. memdev->range_index = 5+1;
  658. memdev->region_index = 3+1;
  659. memdev->region_size = 0;
  660. memdev->region_offset = 0;
  661. memdev->address = 0;
  662. memdev->interleave_index = 0;
  663. memdev->interleave_ways = 1;
  664. /* mem-region10 (spa/bdw0, dimm0) */
  665. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 10;
  666. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  667. memdev->header.length = sizeof(*memdev);
  668. memdev->device_handle = handle[0];
  669. memdev->physical_id = 0;
  670. memdev->region_id = 0;
  671. memdev->range_index = 6+1;
  672. memdev->region_index = 0+1;
  673. memdev->region_size = 0;
  674. memdev->region_offset = 0;
  675. memdev->address = 0;
  676. memdev->interleave_index = 0;
  677. memdev->interleave_ways = 1;
  678. /* mem-region11 (spa/bdw1, dimm1) */
  679. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 11;
  680. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  681. memdev->header.length = sizeof(*memdev);
  682. memdev->device_handle = handle[1];
  683. memdev->physical_id = 1;
  684. memdev->region_id = 0;
  685. memdev->range_index = 7+1;
  686. memdev->region_index = 1+1;
  687. memdev->region_size = 0;
  688. memdev->region_offset = 0;
  689. memdev->address = 0;
  690. memdev->interleave_index = 0;
  691. memdev->interleave_ways = 1;
  692. /* mem-region12 (spa/bdw2, dimm2) */
  693. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 12;
  694. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  695. memdev->header.length = sizeof(*memdev);
  696. memdev->device_handle = handle[2];
  697. memdev->physical_id = 2;
  698. memdev->region_id = 0;
  699. memdev->range_index = 8+1;
  700. memdev->region_index = 2+1;
  701. memdev->region_size = 0;
  702. memdev->region_offset = 0;
  703. memdev->address = 0;
  704. memdev->interleave_index = 0;
  705. memdev->interleave_ways = 1;
  706. /* mem-region13 (spa/dcr3, dimm3) */
  707. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 13;
  708. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  709. memdev->header.length = sizeof(*memdev);
  710. memdev->device_handle = handle[3];
  711. memdev->physical_id = 3;
  712. memdev->region_id = 0;
  713. memdev->range_index = 9+1;
  714. memdev->region_index = 3+1;
  715. memdev->region_size = 0;
  716. memdev->region_offset = 0;
  717. memdev->address = 0;
  718. memdev->interleave_index = 0;
  719. memdev->interleave_ways = 1;
  720. offset = offset + sizeof(struct acpi_nfit_memory_map) * 14;
  721. /* dcr-descriptor0 */
  722. dcr = nfit_buf + offset;
  723. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  724. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  725. dcr->region_index = 0+1;
  726. dcr->vendor_id = 0xabcd;
  727. dcr->device_id = 0;
  728. dcr->revision_id = 1;
  729. dcr->serial_number = ~handle[0];
  730. dcr->windows = 1;
  731. dcr->window_size = DCR_SIZE;
  732. dcr->command_offset = 0;
  733. dcr->command_size = 8;
  734. dcr->status_offset = 8;
  735. dcr->status_size = 4;
  736. /* dcr-descriptor1 */
  737. dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region);
  738. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  739. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  740. dcr->region_index = 1+1;
  741. dcr->vendor_id = 0xabcd;
  742. dcr->device_id = 0;
  743. dcr->revision_id = 1;
  744. dcr->serial_number = ~handle[1];
  745. dcr->windows = 1;
  746. dcr->window_size = DCR_SIZE;
  747. dcr->command_offset = 0;
  748. dcr->command_size = 8;
  749. dcr->status_offset = 8;
  750. dcr->status_size = 4;
  751. /* dcr-descriptor2 */
  752. dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 2;
  753. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  754. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  755. dcr->region_index = 2+1;
  756. dcr->vendor_id = 0xabcd;
  757. dcr->device_id = 0;
  758. dcr->revision_id = 1;
  759. dcr->serial_number = ~handle[2];
  760. dcr->windows = 1;
  761. dcr->window_size = DCR_SIZE;
  762. dcr->command_offset = 0;
  763. dcr->command_size = 8;
  764. dcr->status_offset = 8;
  765. dcr->status_size = 4;
  766. /* dcr-descriptor3 */
  767. dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 3;
  768. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  769. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  770. dcr->region_index = 3+1;
  771. dcr->vendor_id = 0xabcd;
  772. dcr->device_id = 0;
  773. dcr->revision_id = 1;
  774. dcr->serial_number = ~handle[3];
  775. dcr->windows = 1;
  776. dcr->window_size = DCR_SIZE;
  777. dcr->command_offset = 0;
  778. dcr->command_size = 8;
  779. dcr->status_offset = 8;
  780. dcr->status_size = 4;
  781. offset = offset + sizeof(struct acpi_nfit_control_region) * 4;
  782. /* bdw0 (spa/dcr0, dimm0) */
  783. bdw = nfit_buf + offset;
  784. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  785. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  786. bdw->region_index = 0+1;
  787. bdw->windows = 1;
  788. bdw->offset = 0;
  789. bdw->size = BDW_SIZE;
  790. bdw->capacity = DIMM_SIZE;
  791. bdw->start_address = 0;
  792. /* bdw1 (spa/dcr1, dimm1) */
  793. bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region);
  794. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  795. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  796. bdw->region_index = 1+1;
  797. bdw->windows = 1;
  798. bdw->offset = 0;
  799. bdw->size = BDW_SIZE;
  800. bdw->capacity = DIMM_SIZE;
  801. bdw->start_address = 0;
  802. /* bdw2 (spa/dcr2, dimm2) */
  803. bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 2;
  804. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  805. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  806. bdw->region_index = 2+1;
  807. bdw->windows = 1;
  808. bdw->offset = 0;
  809. bdw->size = BDW_SIZE;
  810. bdw->capacity = DIMM_SIZE;
  811. bdw->start_address = 0;
  812. /* bdw3 (spa/dcr3, dimm3) */
  813. bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 3;
  814. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  815. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  816. bdw->region_index = 3+1;
  817. bdw->windows = 1;
  818. bdw->offset = 0;
  819. bdw->size = BDW_SIZE;
  820. bdw->capacity = DIMM_SIZE;
  821. bdw->start_address = 0;
  822. offset = offset + sizeof(struct acpi_nfit_data_region) * 4;
  823. /* flush0 (dimm0) */
  824. flush = nfit_buf + offset;
  825. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  826. flush->header.length = sizeof(struct acpi_nfit_flush_address);
  827. flush->device_handle = handle[0];
  828. flush->hint_count = 1;
  829. flush->hint_address[0] = t->flush_dma[0];
  830. /* flush1 (dimm1) */
  831. flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 1;
  832. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  833. flush->header.length = sizeof(struct acpi_nfit_flush_address);
  834. flush->device_handle = handle[1];
  835. flush->hint_count = 1;
  836. flush->hint_address[0] = t->flush_dma[1];
  837. /* flush2 (dimm2) */
  838. flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 2;
  839. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  840. flush->header.length = sizeof(struct acpi_nfit_flush_address);
  841. flush->device_handle = handle[2];
  842. flush->hint_count = 1;
  843. flush->hint_address[0] = t->flush_dma[2];
  844. /* flush3 (dimm3) */
  845. flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 3;
  846. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  847. flush->header.length = sizeof(struct acpi_nfit_flush_address);
  848. flush->device_handle = handle[3];
  849. flush->hint_count = 1;
  850. flush->hint_address[0] = t->flush_dma[3];
  851. acpi_desc = &t->acpi_desc;
  852. set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_dsm_force_en);
  853. set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en);
  854. set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en);
  855. set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_dsm_force_en);
  856. set_bit(ND_CMD_ARS_START, &acpi_desc->bus_dsm_force_en);
  857. set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_dsm_force_en);
  858. nd_desc = &acpi_desc->nd_desc;
  859. nd_desc->ndctl = nfit_test_ctl;
  860. }
  861. static void nfit_test1_setup(struct nfit_test *t)
  862. {
  863. size_t size = t->nfit_size, offset;
  864. void *nfit_buf = t->nfit_buf;
  865. struct acpi_nfit_memory_map *memdev;
  866. struct acpi_nfit_control_region *dcr;
  867. struct acpi_nfit_system_address *spa;
  868. nfit_test_init_header(nfit_buf, size);
  869. offset = sizeof(struct acpi_table_nfit);
  870. /* spa0 (flat range with no bdw aliasing) */
  871. spa = nfit_buf + offset;
  872. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  873. spa->header.length = sizeof(*spa);
  874. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  875. spa->range_index = 0+1;
  876. spa->address = t->spa_set_dma[0];
  877. spa->length = SPA2_SIZE;
  878. offset += sizeof(*spa);
  879. /* mem-region0 (spa0, dimm0) */
  880. memdev = nfit_buf + offset;
  881. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  882. memdev->header.length = sizeof(*memdev);
  883. memdev->device_handle = 0;
  884. memdev->physical_id = 0;
  885. memdev->region_id = 0;
  886. memdev->range_index = 0+1;
  887. memdev->region_index = 0+1;
  888. memdev->region_size = SPA2_SIZE;
  889. memdev->region_offset = 0;
  890. memdev->address = 0;
  891. memdev->interleave_index = 0;
  892. memdev->interleave_ways = 1;
  893. memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED
  894. | ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED
  895. | ACPI_NFIT_MEM_ARMED;
  896. offset += sizeof(*memdev);
  897. /* dcr-descriptor0 */
  898. dcr = nfit_buf + offset;
  899. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  900. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  901. dcr->region_index = 0+1;
  902. dcr->vendor_id = 0xabcd;
  903. dcr->device_id = 0;
  904. dcr->revision_id = 1;
  905. dcr->serial_number = ~0;
  906. dcr->code = 0x201;
  907. dcr->windows = 0;
  908. dcr->window_size = 0;
  909. dcr->command_offset = 0;
  910. dcr->command_size = 0;
  911. dcr->status_offset = 0;
  912. dcr->status_size = 0;
  913. }
  914. static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
  915. void *iobuf, u64 len, int rw)
  916. {
  917. struct nfit_blk *nfit_blk = ndbr->blk_provider_data;
  918. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
  919. struct nd_region *nd_region = &ndbr->nd_region;
  920. unsigned int lane;
  921. lane = nd_region_acquire_lane(nd_region);
  922. if (rw)
  923. memcpy(mmio->addr.base + dpa, iobuf, len);
  924. else {
  925. memcpy(iobuf, mmio->addr.base + dpa, len);
  926. /* give us some some coverage of the mmio_flush_range() API */
  927. mmio_flush_range(mmio->addr.base + dpa, len);
  928. }
  929. nd_region_release_lane(nd_region, lane);
  930. return 0;
  931. }
  932. static int nfit_test_probe(struct platform_device *pdev)
  933. {
  934. struct nvdimm_bus_descriptor *nd_desc;
  935. struct acpi_nfit_desc *acpi_desc;
  936. struct device *dev = &pdev->dev;
  937. struct nfit_test *nfit_test;
  938. int rc;
  939. nfit_test = to_nfit_test(&pdev->dev);
  940. /* common alloc */
  941. if (nfit_test->num_dcr) {
  942. int num = nfit_test->num_dcr;
  943. nfit_test->dimm = devm_kcalloc(dev, num, sizeof(void *),
  944. GFP_KERNEL);
  945. nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
  946. GFP_KERNEL);
  947. nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *),
  948. GFP_KERNEL);
  949. nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
  950. GFP_KERNEL);
  951. nfit_test->label = devm_kcalloc(dev, num, sizeof(void *),
  952. GFP_KERNEL);
  953. nfit_test->label_dma = devm_kcalloc(dev, num,
  954. sizeof(dma_addr_t), GFP_KERNEL);
  955. nfit_test->dcr = devm_kcalloc(dev, num,
  956. sizeof(struct nfit_test_dcr *), GFP_KERNEL);
  957. nfit_test->dcr_dma = devm_kcalloc(dev, num,
  958. sizeof(dma_addr_t), GFP_KERNEL);
  959. if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label
  960. && nfit_test->label_dma && nfit_test->dcr
  961. && nfit_test->dcr_dma && nfit_test->flush
  962. && nfit_test->flush_dma)
  963. /* pass */;
  964. else
  965. return -ENOMEM;
  966. }
  967. if (nfit_test->num_pm) {
  968. int num = nfit_test->num_pm;
  969. nfit_test->spa_set = devm_kcalloc(dev, num, sizeof(void *),
  970. GFP_KERNEL);
  971. nfit_test->spa_set_dma = devm_kcalloc(dev, num,
  972. sizeof(dma_addr_t), GFP_KERNEL);
  973. if (nfit_test->spa_set && nfit_test->spa_set_dma)
  974. /* pass */;
  975. else
  976. return -ENOMEM;
  977. }
  978. /* per-nfit specific alloc */
  979. if (nfit_test->alloc(nfit_test))
  980. return -ENOMEM;
  981. nfit_test->setup(nfit_test);
  982. acpi_desc = &nfit_test->acpi_desc;
  983. acpi_desc->dev = &pdev->dev;
  984. acpi_desc->nfit = nfit_test->nfit_buf;
  985. acpi_desc->blk_do_io = nfit_test_blk_do_io;
  986. nd_desc = &acpi_desc->nd_desc;
  987. nd_desc->attr_groups = acpi_nfit_attribute_groups;
  988. acpi_desc->nvdimm_bus = nvdimm_bus_register(&pdev->dev, nd_desc);
  989. if (!acpi_desc->nvdimm_bus)
  990. return -ENXIO;
  991. rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_size);
  992. if (rc) {
  993. nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
  994. return rc;
  995. }
  996. return 0;
  997. }
  998. static int nfit_test_remove(struct platform_device *pdev)
  999. {
  1000. struct nfit_test *nfit_test = to_nfit_test(&pdev->dev);
  1001. struct acpi_nfit_desc *acpi_desc = &nfit_test->acpi_desc;
  1002. nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
  1003. return 0;
  1004. }
  1005. static void nfit_test_release(struct device *dev)
  1006. {
  1007. struct nfit_test *nfit_test = to_nfit_test(dev);
  1008. kfree(nfit_test);
  1009. }
  1010. static const struct platform_device_id nfit_test_id[] = {
  1011. { KBUILD_MODNAME },
  1012. { },
  1013. };
  1014. static struct platform_driver nfit_test_driver = {
  1015. .probe = nfit_test_probe,
  1016. .remove = nfit_test_remove,
  1017. .driver = {
  1018. .name = KBUILD_MODNAME,
  1019. },
  1020. .id_table = nfit_test_id,
  1021. };
  1022. #ifdef CONFIG_CMA_SIZE_MBYTES
  1023. #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
  1024. #else
  1025. #define CMA_SIZE_MBYTES 0
  1026. #endif
  1027. static __init int nfit_test_init(void)
  1028. {
  1029. int rc, i;
  1030. nfit_test_setup(nfit_test_lookup);
  1031. for (i = 0; i < NUM_NFITS; i++) {
  1032. struct nfit_test *nfit_test;
  1033. struct platform_device *pdev;
  1034. static int once;
  1035. nfit_test = kzalloc(sizeof(*nfit_test), GFP_KERNEL);
  1036. if (!nfit_test) {
  1037. rc = -ENOMEM;
  1038. goto err_register;
  1039. }
  1040. INIT_LIST_HEAD(&nfit_test->resources);
  1041. switch (i) {
  1042. case 0:
  1043. nfit_test->num_pm = NUM_PM;
  1044. nfit_test->num_dcr = NUM_DCR;
  1045. nfit_test->alloc = nfit_test0_alloc;
  1046. nfit_test->setup = nfit_test0_setup;
  1047. break;
  1048. case 1:
  1049. nfit_test->num_pm = 1;
  1050. nfit_test->alloc = nfit_test1_alloc;
  1051. nfit_test->setup = nfit_test1_setup;
  1052. break;
  1053. default:
  1054. rc = -EINVAL;
  1055. goto err_register;
  1056. }
  1057. pdev = &nfit_test->pdev;
  1058. pdev->name = KBUILD_MODNAME;
  1059. pdev->id = i;
  1060. pdev->dev.release = nfit_test_release;
  1061. rc = platform_device_register(pdev);
  1062. if (rc) {
  1063. put_device(&pdev->dev);
  1064. goto err_register;
  1065. }
  1066. rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  1067. if (rc)
  1068. goto err_register;
  1069. instances[i] = nfit_test;
  1070. if (!once++) {
  1071. dma_addr_t dma;
  1072. void *buf;
  1073. buf = dma_alloc_coherent(&pdev->dev, SZ_128M, &dma,
  1074. GFP_KERNEL);
  1075. if (!buf) {
  1076. rc = -ENOMEM;
  1077. dev_warn(&pdev->dev, "need 128M of free cma\n");
  1078. goto err_register;
  1079. }
  1080. dma_free_coherent(&pdev->dev, SZ_128M, buf, dma);
  1081. }
  1082. }
  1083. rc = platform_driver_register(&nfit_test_driver);
  1084. if (rc)
  1085. goto err_register;
  1086. return 0;
  1087. err_register:
  1088. for (i = 0; i < NUM_NFITS; i++)
  1089. if (instances[i])
  1090. platform_device_unregister(&instances[i]->pdev);
  1091. nfit_test_teardown();
  1092. return rc;
  1093. }
  1094. static __exit void nfit_test_exit(void)
  1095. {
  1096. int i;
  1097. platform_driver_unregister(&nfit_test_driver);
  1098. for (i = 0; i < NUM_NFITS; i++)
  1099. platform_device_unregister(&instances[i]->pdev);
  1100. nfit_test_teardown();
  1101. }
  1102. module_init(nfit_test_init);
  1103. module_exit(nfit_test_exit);
  1104. MODULE_LICENSE("GPL v2");
  1105. MODULE_AUTHOR("Intel Corporation");