nfit.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399
  1. /*
  2. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/platform_device.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/libnvdimm.h>
  17. #include <linux/vmalloc.h>
  18. #include <linux/device.h>
  19. #include <linux/module.h>
  20. #include <linux/mutex.h>
  21. #include <linux/ndctl.h>
  22. #include <linux/sizes.h>
  23. #include <linux/list.h>
  24. #include <linux/slab.h>
  25. #include <nfit.h>
  26. #include <nd.h>
  27. #include "nfit_test.h"
  28. /*
  29. * Generate an NFIT table to describe the following topology:
  30. *
  31. * BUS0: Interleaved PMEM regions, and aliasing with BLK regions
  32. *
  33. * (a) (b) DIMM BLK-REGION
  34. * +----------+--------------+----------+---------+
  35. * +------+ | blk2.0 | pm0.0 | blk2.1 | pm1.0 | 0 region2
  36. * | imc0 +--+- - - - - region0 - - - -+----------+ +
  37. * +--+---+ | blk3.0 | pm0.0 | blk3.1 | pm1.0 | 1 region3
  38. * | +----------+--------------v----------v v
  39. * +--+---+ | |
  40. * | cpu0 | region1
  41. * +--+---+ | |
  42. * | +-------------------------^----------^ ^
  43. * +--+---+ | blk4.0 | pm1.0 | 2 region4
  44. * | imc1 +--+-------------------------+----------+ +
  45. * +------+ | blk5.0 | pm1.0 | 3 region5
  46. * +-------------------------+----------+-+-------+
  47. *
  48. * +--+---+
  49. * | cpu1 |
  50. * +--+---+ (Hotplug DIMM)
  51. * | +----------------------------------------------+
  52. * +--+---+ | blk6.0/pm7.0 | 4 region6/7
  53. * | imc0 +--+----------------------------------------------+
  54. * +------+
  55. *
  56. *
  57. * *) In this layout we have four dimms and two memory controllers in one
  58. * socket. Each unique interface (BLK or PMEM) to DPA space
  59. * is identified by a region device with a dynamically assigned id.
  60. *
  61. * *) The first portion of dimm0 and dimm1 are interleaved as REGION0.
  62. * A single PMEM namespace "pm0.0" is created using half of the
  63. * REGION0 SPA-range. REGION0 spans dimm0 and dimm1. PMEM namespace
  64. * allocate from from the bottom of a region. The unallocated
  65. * portion of REGION0 aliases with REGION2 and REGION3. That
  66. * unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and
  67. * "blk3.0") starting at the base of each DIMM to offset (a) in those
  68. * DIMMs. "pm0.0", "blk2.0" and "blk3.0" are free-form readable
  69. * names that can be assigned to a namespace.
  70. *
  71. * *) In the last portion of dimm0 and dimm1 we have an interleaved
  72. * SPA range, REGION1, that spans those two dimms as well as dimm2
  73. * and dimm3. Some of REGION1 allocated to a PMEM namespace named
  74. * "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each
  75. * dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and
  76. * "blk5.0".
  77. *
  78. * *) The portion of dimm2 and dimm3 that do not participate in the
  79. * REGION1 interleaved SPA range (i.e. the DPA address below offset
  80. * (b) are also included in the "blk4.0" and "blk5.0" namespaces.
  81. * Note, that BLK namespaces need not be contiguous in DPA-space, and
  82. * can consume aliased capacity from multiple interleave sets.
  83. *
  84. * BUS1: Legacy NVDIMM (single contiguous range)
  85. *
  86. * region2
  87. * +---------------------+
  88. * |---------------------|
  89. * || pm2.0 ||
  90. * |---------------------|
  91. * +---------------------+
  92. *
  93. * *) A NFIT-table may describe a simple system-physical-address range
  94. * with no BLK aliasing. This type of region may optionally
  95. * reference an NVDIMM.
  96. */
  97. enum {
  98. NUM_PM = 3,
  99. NUM_DCR = 5,
  100. NUM_BDW = NUM_DCR,
  101. NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW,
  102. NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */ + 4 /* spa1 iset */,
  103. DIMM_SIZE = SZ_32M,
  104. LABEL_SIZE = SZ_128K,
  105. SPA0_SIZE = DIMM_SIZE,
  106. SPA1_SIZE = DIMM_SIZE*2,
  107. SPA2_SIZE = DIMM_SIZE,
  108. BDW_SIZE = 64 << 8,
  109. DCR_SIZE = 12,
  110. NUM_NFITS = 2, /* permit testing multiple NFITs per system */
  111. };
  112. struct nfit_test_dcr {
  113. __le64 bdw_addr;
  114. __le32 bdw_status;
  115. __u8 aperature[BDW_SIZE];
  116. };
  117. #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
  118. (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
  119. | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
  120. static u32 handle[NUM_DCR] = {
  121. [0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
  122. [1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
  123. [2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
  124. [3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
  125. [4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
  126. };
  127. struct nfit_test {
  128. struct acpi_nfit_desc acpi_desc;
  129. struct platform_device pdev;
  130. struct list_head resources;
  131. void *nfit_buf;
  132. dma_addr_t nfit_dma;
  133. size_t nfit_size;
  134. int num_dcr;
  135. int num_pm;
  136. void **dimm;
  137. dma_addr_t *dimm_dma;
  138. void **flush;
  139. dma_addr_t *flush_dma;
  140. void **label;
  141. dma_addr_t *label_dma;
  142. void **spa_set;
  143. dma_addr_t *spa_set_dma;
  144. struct nfit_test_dcr **dcr;
  145. dma_addr_t *dcr_dma;
  146. int (*alloc)(struct nfit_test *t);
  147. void (*setup)(struct nfit_test *t);
  148. int setup_hotplug;
  149. };
  150. static struct nfit_test *to_nfit_test(struct device *dev)
  151. {
  152. struct platform_device *pdev = to_platform_device(dev);
  153. return container_of(pdev, struct nfit_test, pdev);
  154. }
  155. static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size *nd_cmd,
  156. unsigned int buf_len)
  157. {
  158. if (buf_len < sizeof(*nd_cmd))
  159. return -EINVAL;
  160. nd_cmd->status = 0;
  161. nd_cmd->config_size = LABEL_SIZE;
  162. nd_cmd->max_xfer = SZ_4K;
  163. return 0;
  164. }
  165. static int nfit_test_cmd_get_config_data(struct nd_cmd_get_config_data_hdr
  166. *nd_cmd, unsigned int buf_len, void *label)
  167. {
  168. unsigned int len, offset = nd_cmd->in_offset;
  169. int rc;
  170. if (buf_len < sizeof(*nd_cmd))
  171. return -EINVAL;
  172. if (offset >= LABEL_SIZE)
  173. return -EINVAL;
  174. if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len)
  175. return -EINVAL;
  176. nd_cmd->status = 0;
  177. len = min(nd_cmd->in_length, LABEL_SIZE - offset);
  178. memcpy(nd_cmd->out_buf, label + offset, len);
  179. rc = buf_len - sizeof(*nd_cmd) - len;
  180. return rc;
  181. }
  182. static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd,
  183. unsigned int buf_len, void *label)
  184. {
  185. unsigned int len, offset = nd_cmd->in_offset;
  186. u32 *status;
  187. int rc;
  188. if (buf_len < sizeof(*nd_cmd))
  189. return -EINVAL;
  190. if (offset >= LABEL_SIZE)
  191. return -EINVAL;
  192. if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len)
  193. return -EINVAL;
  194. status = (void *)nd_cmd + nd_cmd->in_length + sizeof(*nd_cmd);
  195. *status = 0;
  196. len = min(nd_cmd->in_length, LABEL_SIZE - offset);
  197. memcpy(label + offset, nd_cmd->in_buf, len);
  198. rc = buf_len - sizeof(*nd_cmd) - (len + 4);
  199. return rc;
  200. }
  201. static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd,
  202. unsigned int buf_len)
  203. {
  204. if (buf_len < sizeof(*nd_cmd))
  205. return -EINVAL;
  206. nd_cmd->max_ars_out = 256;
  207. nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16;
  208. return 0;
  209. }
  210. static int nfit_test_cmd_ars_start(struct nd_cmd_ars_start *nd_cmd,
  211. unsigned int buf_len)
  212. {
  213. if (buf_len < sizeof(*nd_cmd))
  214. return -EINVAL;
  215. nd_cmd->status = 0;
  216. return 0;
  217. }
  218. static int nfit_test_cmd_ars_status(struct nd_cmd_ars_status *nd_cmd,
  219. unsigned int buf_len)
  220. {
  221. if (buf_len < sizeof(*nd_cmd))
  222. return -EINVAL;
  223. nd_cmd->out_length = 256;
  224. nd_cmd->num_records = 0;
  225. nd_cmd->address = 0;
  226. nd_cmd->length = -1ULL;
  227. nd_cmd->status = 0;
  228. return 0;
  229. }
  230. static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
  231. struct nvdimm *nvdimm, unsigned int cmd, void *buf,
  232. unsigned int buf_len)
  233. {
  234. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  235. struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc);
  236. int i, rc = 0;
  237. if (nvdimm) {
  238. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  239. if (!nfit_mem || !test_bit(cmd, &nfit_mem->dsm_mask))
  240. return -ENOTTY;
  241. /* lookup label space for the given dimm */
  242. for (i = 0; i < ARRAY_SIZE(handle); i++)
  243. if (__to_nfit_memdev(nfit_mem)->device_handle ==
  244. handle[i])
  245. break;
  246. if (i >= ARRAY_SIZE(handle))
  247. return -ENXIO;
  248. switch (cmd) {
  249. case ND_CMD_GET_CONFIG_SIZE:
  250. rc = nfit_test_cmd_get_config_size(buf, buf_len);
  251. break;
  252. case ND_CMD_GET_CONFIG_DATA:
  253. rc = nfit_test_cmd_get_config_data(buf, buf_len,
  254. t->label[i]);
  255. break;
  256. case ND_CMD_SET_CONFIG_DATA:
  257. rc = nfit_test_cmd_set_config_data(buf, buf_len,
  258. t->label[i]);
  259. break;
  260. default:
  261. return -ENOTTY;
  262. }
  263. } else {
  264. if (!nd_desc || !test_bit(cmd, &nd_desc->dsm_mask))
  265. return -ENOTTY;
  266. switch (cmd) {
  267. case ND_CMD_ARS_CAP:
  268. rc = nfit_test_cmd_ars_cap(buf, buf_len);
  269. break;
  270. case ND_CMD_ARS_START:
  271. rc = nfit_test_cmd_ars_start(buf, buf_len);
  272. break;
  273. case ND_CMD_ARS_STATUS:
  274. rc = nfit_test_cmd_ars_status(buf, buf_len);
  275. break;
  276. default:
  277. return -ENOTTY;
  278. }
  279. }
  280. return rc;
  281. }
  282. static DEFINE_SPINLOCK(nfit_test_lock);
  283. static struct nfit_test *instances[NUM_NFITS];
  284. static void release_nfit_res(void *data)
  285. {
  286. struct nfit_test_resource *nfit_res = data;
  287. struct resource *res = nfit_res->res;
  288. spin_lock(&nfit_test_lock);
  289. list_del(&nfit_res->list);
  290. spin_unlock(&nfit_test_lock);
  291. if (is_vmalloc_addr(nfit_res->buf))
  292. vfree(nfit_res->buf);
  293. else
  294. dma_free_coherent(nfit_res->dev, resource_size(res),
  295. nfit_res->buf, res->start);
  296. kfree(res);
  297. kfree(nfit_res);
  298. }
  299. static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
  300. void *buf)
  301. {
  302. struct device *dev = &t->pdev.dev;
  303. struct resource *res = kzalloc(sizeof(*res) * 2, GFP_KERNEL);
  304. struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res),
  305. GFP_KERNEL);
  306. int rc;
  307. if (!res || !buf || !nfit_res)
  308. goto err;
  309. rc = devm_add_action(dev, release_nfit_res, nfit_res);
  310. if (rc)
  311. goto err;
  312. INIT_LIST_HEAD(&nfit_res->list);
  313. memset(buf, 0, size);
  314. nfit_res->dev = dev;
  315. nfit_res->buf = buf;
  316. nfit_res->res = res;
  317. res->start = *dma;
  318. res->end = *dma + size - 1;
  319. res->name = "NFIT";
  320. spin_lock(&nfit_test_lock);
  321. list_add(&nfit_res->list, &t->resources);
  322. spin_unlock(&nfit_test_lock);
  323. return nfit_res->buf;
  324. err:
  325. if (buf && !is_vmalloc_addr(buf))
  326. dma_free_coherent(dev, size, buf, *dma);
  327. else if (buf)
  328. vfree(buf);
  329. kfree(res);
  330. kfree(nfit_res);
  331. return NULL;
  332. }
  333. static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma)
  334. {
  335. void *buf = vmalloc(size);
  336. *dma = (unsigned long) buf;
  337. return __test_alloc(t, size, dma, buf);
  338. }
  339. static void *test_alloc_coherent(struct nfit_test *t, size_t size,
  340. dma_addr_t *dma)
  341. {
  342. struct device *dev = &t->pdev.dev;
  343. void *buf = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
  344. return __test_alloc(t, size, dma, buf);
  345. }
  346. static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr)
  347. {
  348. int i;
  349. for (i = 0; i < ARRAY_SIZE(instances); i++) {
  350. struct nfit_test_resource *n, *nfit_res = NULL;
  351. struct nfit_test *t = instances[i];
  352. if (!t)
  353. continue;
  354. spin_lock(&nfit_test_lock);
  355. list_for_each_entry(n, &t->resources, list) {
  356. if (addr >= n->res->start && (addr < n->res->start
  357. + resource_size(n->res))) {
  358. nfit_res = n;
  359. break;
  360. } else if (addr >= (unsigned long) n->buf
  361. && (addr < (unsigned long) n->buf
  362. + resource_size(n->res))) {
  363. nfit_res = n;
  364. break;
  365. }
  366. }
  367. spin_unlock(&nfit_test_lock);
  368. if (nfit_res)
  369. return nfit_res;
  370. }
  371. return NULL;
  372. }
  373. static int nfit_test0_alloc(struct nfit_test *t)
  374. {
  375. size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA
  376. + sizeof(struct acpi_nfit_memory_map) * NUM_MEM
  377. + sizeof(struct acpi_nfit_control_region) * NUM_DCR
  378. + sizeof(struct acpi_nfit_data_region) * NUM_BDW
  379. + sizeof(struct acpi_nfit_flush_address) * NUM_DCR;
  380. int i;
  381. t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
  382. if (!t->nfit_buf)
  383. return -ENOMEM;
  384. t->nfit_size = nfit_size;
  385. t->spa_set[0] = test_alloc_coherent(t, SPA0_SIZE, &t->spa_set_dma[0]);
  386. if (!t->spa_set[0])
  387. return -ENOMEM;
  388. t->spa_set[1] = test_alloc_coherent(t, SPA1_SIZE, &t->spa_set_dma[1]);
  389. if (!t->spa_set[1])
  390. return -ENOMEM;
  391. t->spa_set[2] = test_alloc_coherent(t, SPA0_SIZE, &t->spa_set_dma[2]);
  392. if (!t->spa_set[2])
  393. return -ENOMEM;
  394. for (i = 0; i < NUM_DCR; i++) {
  395. t->dimm[i] = test_alloc(t, DIMM_SIZE, &t->dimm_dma[i]);
  396. if (!t->dimm[i])
  397. return -ENOMEM;
  398. t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
  399. if (!t->label[i])
  400. return -ENOMEM;
  401. sprintf(t->label[i], "label%d", i);
  402. t->flush[i] = test_alloc(t, 8, &t->flush_dma[i]);
  403. if (!t->flush[i])
  404. return -ENOMEM;
  405. }
  406. for (i = 0; i < NUM_DCR; i++) {
  407. t->dcr[i] = test_alloc(t, LABEL_SIZE, &t->dcr_dma[i]);
  408. if (!t->dcr[i])
  409. return -ENOMEM;
  410. }
  411. return 0;
  412. }
  413. static int nfit_test1_alloc(struct nfit_test *t)
  414. {
  415. size_t nfit_size = sizeof(struct acpi_nfit_system_address)
  416. + sizeof(struct acpi_nfit_memory_map)
  417. + sizeof(struct acpi_nfit_control_region);
  418. t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
  419. if (!t->nfit_buf)
  420. return -ENOMEM;
  421. t->nfit_size = nfit_size;
  422. t->spa_set[0] = test_alloc_coherent(t, SPA2_SIZE, &t->spa_set_dma[0]);
  423. if (!t->spa_set[0])
  424. return -ENOMEM;
  425. return 0;
  426. }
  427. static void nfit_test0_setup(struct nfit_test *t)
  428. {
  429. struct nvdimm_bus_descriptor *nd_desc;
  430. struct acpi_nfit_desc *acpi_desc;
  431. struct acpi_nfit_memory_map *memdev;
  432. void *nfit_buf = t->nfit_buf;
  433. struct acpi_nfit_system_address *spa;
  434. struct acpi_nfit_control_region *dcr;
  435. struct acpi_nfit_data_region *bdw;
  436. struct acpi_nfit_flush_address *flush;
  437. unsigned int offset;
  438. /*
  439. * spa0 (interleave first half of dimm0 and dimm1, note storage
  440. * does not actually alias the related block-data-window
  441. * regions)
  442. */
  443. spa = nfit_buf;
  444. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  445. spa->header.length = sizeof(*spa);
  446. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  447. spa->range_index = 0+1;
  448. spa->address = t->spa_set_dma[0];
  449. spa->length = SPA0_SIZE;
  450. /*
  451. * spa1 (interleave last half of the 4 DIMMS, note storage
  452. * does not actually alias the related block-data-window
  453. * regions)
  454. */
  455. spa = nfit_buf + sizeof(*spa);
  456. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  457. spa->header.length = sizeof(*spa);
  458. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  459. spa->range_index = 1+1;
  460. spa->address = t->spa_set_dma[1];
  461. spa->length = SPA1_SIZE;
  462. /* spa2 (dcr0) dimm0 */
  463. spa = nfit_buf + sizeof(*spa) * 2;
  464. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  465. spa->header.length = sizeof(*spa);
  466. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  467. spa->range_index = 2+1;
  468. spa->address = t->dcr_dma[0];
  469. spa->length = DCR_SIZE;
  470. /* spa3 (dcr1) dimm1 */
  471. spa = nfit_buf + sizeof(*spa) * 3;
  472. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  473. spa->header.length = sizeof(*spa);
  474. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  475. spa->range_index = 3+1;
  476. spa->address = t->dcr_dma[1];
  477. spa->length = DCR_SIZE;
  478. /* spa4 (dcr2) dimm2 */
  479. spa = nfit_buf + sizeof(*spa) * 4;
  480. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  481. spa->header.length = sizeof(*spa);
  482. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  483. spa->range_index = 4+1;
  484. spa->address = t->dcr_dma[2];
  485. spa->length = DCR_SIZE;
  486. /* spa5 (dcr3) dimm3 */
  487. spa = nfit_buf + sizeof(*spa) * 5;
  488. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  489. spa->header.length = sizeof(*spa);
  490. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  491. spa->range_index = 5+1;
  492. spa->address = t->dcr_dma[3];
  493. spa->length = DCR_SIZE;
  494. /* spa6 (bdw for dcr0) dimm0 */
  495. spa = nfit_buf + sizeof(*spa) * 6;
  496. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  497. spa->header.length = sizeof(*spa);
  498. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  499. spa->range_index = 6+1;
  500. spa->address = t->dimm_dma[0];
  501. spa->length = DIMM_SIZE;
  502. /* spa7 (bdw for dcr1) dimm1 */
  503. spa = nfit_buf + sizeof(*spa) * 7;
  504. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  505. spa->header.length = sizeof(*spa);
  506. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  507. spa->range_index = 7+1;
  508. spa->address = t->dimm_dma[1];
  509. spa->length = DIMM_SIZE;
  510. /* spa8 (bdw for dcr2) dimm2 */
  511. spa = nfit_buf + sizeof(*spa) * 8;
  512. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  513. spa->header.length = sizeof(*spa);
  514. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  515. spa->range_index = 8+1;
  516. spa->address = t->dimm_dma[2];
  517. spa->length = DIMM_SIZE;
  518. /* spa9 (bdw for dcr3) dimm3 */
  519. spa = nfit_buf + sizeof(*spa) * 9;
  520. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  521. spa->header.length = sizeof(*spa);
  522. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  523. spa->range_index = 9+1;
  524. spa->address = t->dimm_dma[3];
  525. spa->length = DIMM_SIZE;
  526. offset = sizeof(*spa) * 10;
  527. /* mem-region0 (spa0, dimm0) */
  528. memdev = nfit_buf + offset;
  529. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  530. memdev->header.length = sizeof(*memdev);
  531. memdev->device_handle = handle[0];
  532. memdev->physical_id = 0;
  533. memdev->region_id = 0;
  534. memdev->range_index = 0+1;
  535. memdev->region_index = 0+1;
  536. memdev->region_size = SPA0_SIZE/2;
  537. memdev->region_offset = t->spa_set_dma[0];
  538. memdev->address = 0;
  539. memdev->interleave_index = 0;
  540. memdev->interleave_ways = 2;
  541. /* mem-region1 (spa0, dimm1) */
  542. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map);
  543. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  544. memdev->header.length = sizeof(*memdev);
  545. memdev->device_handle = handle[1];
  546. memdev->physical_id = 1;
  547. memdev->region_id = 0;
  548. memdev->range_index = 0+1;
  549. memdev->region_index = 1+1;
  550. memdev->region_size = SPA0_SIZE/2;
  551. memdev->region_offset = t->spa_set_dma[0] + SPA0_SIZE/2;
  552. memdev->address = 0;
  553. memdev->interleave_index = 0;
  554. memdev->interleave_ways = 2;
  555. /* mem-region2 (spa1, dimm0) */
  556. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 2;
  557. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  558. memdev->header.length = sizeof(*memdev);
  559. memdev->device_handle = handle[0];
  560. memdev->physical_id = 0;
  561. memdev->region_id = 1;
  562. memdev->range_index = 1+1;
  563. memdev->region_index = 0+1;
  564. memdev->region_size = SPA1_SIZE/4;
  565. memdev->region_offset = t->spa_set_dma[1];
  566. memdev->address = SPA0_SIZE/2;
  567. memdev->interleave_index = 0;
  568. memdev->interleave_ways = 4;
  569. /* mem-region3 (spa1, dimm1) */
  570. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 3;
  571. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  572. memdev->header.length = sizeof(*memdev);
  573. memdev->device_handle = handle[1];
  574. memdev->physical_id = 1;
  575. memdev->region_id = 1;
  576. memdev->range_index = 1+1;
  577. memdev->region_index = 1+1;
  578. memdev->region_size = SPA1_SIZE/4;
  579. memdev->region_offset = t->spa_set_dma[1] + SPA1_SIZE/4;
  580. memdev->address = SPA0_SIZE/2;
  581. memdev->interleave_index = 0;
  582. memdev->interleave_ways = 4;
  583. /* mem-region4 (spa1, dimm2) */
  584. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 4;
  585. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  586. memdev->header.length = sizeof(*memdev);
  587. memdev->device_handle = handle[2];
  588. memdev->physical_id = 2;
  589. memdev->region_id = 0;
  590. memdev->range_index = 1+1;
  591. memdev->region_index = 2+1;
  592. memdev->region_size = SPA1_SIZE/4;
  593. memdev->region_offset = t->spa_set_dma[1] + 2*SPA1_SIZE/4;
  594. memdev->address = SPA0_SIZE/2;
  595. memdev->interleave_index = 0;
  596. memdev->interleave_ways = 4;
  597. /* mem-region5 (spa1, dimm3) */
  598. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 5;
  599. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  600. memdev->header.length = sizeof(*memdev);
  601. memdev->device_handle = handle[3];
  602. memdev->physical_id = 3;
  603. memdev->region_id = 0;
  604. memdev->range_index = 1+1;
  605. memdev->region_index = 3+1;
  606. memdev->region_size = SPA1_SIZE/4;
  607. memdev->region_offset = t->spa_set_dma[1] + 3*SPA1_SIZE/4;
  608. memdev->address = SPA0_SIZE/2;
  609. memdev->interleave_index = 0;
  610. memdev->interleave_ways = 4;
  611. /* mem-region6 (spa/dcr0, dimm0) */
  612. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 6;
  613. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  614. memdev->header.length = sizeof(*memdev);
  615. memdev->device_handle = handle[0];
  616. memdev->physical_id = 0;
  617. memdev->region_id = 0;
  618. memdev->range_index = 2+1;
  619. memdev->region_index = 0+1;
  620. memdev->region_size = 0;
  621. memdev->region_offset = 0;
  622. memdev->address = 0;
  623. memdev->interleave_index = 0;
  624. memdev->interleave_ways = 1;
  625. /* mem-region7 (spa/dcr1, dimm1) */
  626. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 7;
  627. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  628. memdev->header.length = sizeof(*memdev);
  629. memdev->device_handle = handle[1];
  630. memdev->physical_id = 1;
  631. memdev->region_id = 0;
  632. memdev->range_index = 3+1;
  633. memdev->region_index = 1+1;
  634. memdev->region_size = 0;
  635. memdev->region_offset = 0;
  636. memdev->address = 0;
  637. memdev->interleave_index = 0;
  638. memdev->interleave_ways = 1;
  639. /* mem-region8 (spa/dcr2, dimm2) */
  640. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 8;
  641. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  642. memdev->header.length = sizeof(*memdev);
  643. memdev->device_handle = handle[2];
  644. memdev->physical_id = 2;
  645. memdev->region_id = 0;
  646. memdev->range_index = 4+1;
  647. memdev->region_index = 2+1;
  648. memdev->region_size = 0;
  649. memdev->region_offset = 0;
  650. memdev->address = 0;
  651. memdev->interleave_index = 0;
  652. memdev->interleave_ways = 1;
  653. /* mem-region9 (spa/dcr3, dimm3) */
  654. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 9;
  655. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  656. memdev->header.length = sizeof(*memdev);
  657. memdev->device_handle = handle[3];
  658. memdev->physical_id = 3;
  659. memdev->region_id = 0;
  660. memdev->range_index = 5+1;
  661. memdev->region_index = 3+1;
  662. memdev->region_size = 0;
  663. memdev->region_offset = 0;
  664. memdev->address = 0;
  665. memdev->interleave_index = 0;
  666. memdev->interleave_ways = 1;
  667. /* mem-region10 (spa/bdw0, dimm0) */
  668. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 10;
  669. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  670. memdev->header.length = sizeof(*memdev);
  671. memdev->device_handle = handle[0];
  672. memdev->physical_id = 0;
  673. memdev->region_id = 0;
  674. memdev->range_index = 6+1;
  675. memdev->region_index = 0+1;
  676. memdev->region_size = 0;
  677. memdev->region_offset = 0;
  678. memdev->address = 0;
  679. memdev->interleave_index = 0;
  680. memdev->interleave_ways = 1;
  681. /* mem-region11 (spa/bdw1, dimm1) */
  682. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 11;
  683. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  684. memdev->header.length = sizeof(*memdev);
  685. memdev->device_handle = handle[1];
  686. memdev->physical_id = 1;
  687. memdev->region_id = 0;
  688. memdev->range_index = 7+1;
  689. memdev->region_index = 1+1;
  690. memdev->region_size = 0;
  691. memdev->region_offset = 0;
  692. memdev->address = 0;
  693. memdev->interleave_index = 0;
  694. memdev->interleave_ways = 1;
  695. /* mem-region12 (spa/bdw2, dimm2) */
  696. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 12;
  697. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  698. memdev->header.length = sizeof(*memdev);
  699. memdev->device_handle = handle[2];
  700. memdev->physical_id = 2;
  701. memdev->region_id = 0;
  702. memdev->range_index = 8+1;
  703. memdev->region_index = 2+1;
  704. memdev->region_size = 0;
  705. memdev->region_offset = 0;
  706. memdev->address = 0;
  707. memdev->interleave_index = 0;
  708. memdev->interleave_ways = 1;
  709. /* mem-region13 (spa/dcr3, dimm3) */
  710. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 13;
  711. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  712. memdev->header.length = sizeof(*memdev);
  713. memdev->device_handle = handle[3];
  714. memdev->physical_id = 3;
  715. memdev->region_id = 0;
  716. memdev->range_index = 9+1;
  717. memdev->region_index = 3+1;
  718. memdev->region_size = 0;
  719. memdev->region_offset = 0;
  720. memdev->address = 0;
  721. memdev->interleave_index = 0;
  722. memdev->interleave_ways = 1;
  723. offset = offset + sizeof(struct acpi_nfit_memory_map) * 14;
  724. /* dcr-descriptor0 */
  725. dcr = nfit_buf + offset;
  726. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  727. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  728. dcr->region_index = 0+1;
  729. dcr->vendor_id = 0xabcd;
  730. dcr->device_id = 0;
  731. dcr->revision_id = 1;
  732. dcr->serial_number = ~handle[0];
  733. dcr->windows = 1;
  734. dcr->window_size = DCR_SIZE;
  735. dcr->command_offset = 0;
  736. dcr->command_size = 8;
  737. dcr->status_offset = 8;
  738. dcr->status_size = 4;
  739. /* dcr-descriptor1 */
  740. dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region);
  741. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  742. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  743. dcr->region_index = 1+1;
  744. dcr->vendor_id = 0xabcd;
  745. dcr->device_id = 0;
  746. dcr->revision_id = 1;
  747. dcr->serial_number = ~handle[1];
  748. dcr->windows = 1;
  749. dcr->window_size = DCR_SIZE;
  750. dcr->command_offset = 0;
  751. dcr->command_size = 8;
  752. dcr->status_offset = 8;
  753. dcr->status_size = 4;
  754. /* dcr-descriptor2 */
  755. dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 2;
  756. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  757. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  758. dcr->region_index = 2+1;
  759. dcr->vendor_id = 0xabcd;
  760. dcr->device_id = 0;
  761. dcr->revision_id = 1;
  762. dcr->serial_number = ~handle[2];
  763. dcr->windows = 1;
  764. dcr->window_size = DCR_SIZE;
  765. dcr->command_offset = 0;
  766. dcr->command_size = 8;
  767. dcr->status_offset = 8;
  768. dcr->status_size = 4;
  769. /* dcr-descriptor3 */
  770. dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 3;
  771. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  772. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  773. dcr->region_index = 3+1;
  774. dcr->vendor_id = 0xabcd;
  775. dcr->device_id = 0;
  776. dcr->revision_id = 1;
  777. dcr->serial_number = ~handle[3];
  778. dcr->windows = 1;
  779. dcr->window_size = DCR_SIZE;
  780. dcr->command_offset = 0;
  781. dcr->command_size = 8;
  782. dcr->status_offset = 8;
  783. dcr->status_size = 4;
  784. offset = offset + sizeof(struct acpi_nfit_control_region) * 4;
  785. /* bdw0 (spa/dcr0, dimm0) */
  786. bdw = nfit_buf + offset;
  787. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  788. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  789. bdw->region_index = 0+1;
  790. bdw->windows = 1;
  791. bdw->offset = 0;
  792. bdw->size = BDW_SIZE;
  793. bdw->capacity = DIMM_SIZE;
  794. bdw->start_address = 0;
  795. /* bdw1 (spa/dcr1, dimm1) */
  796. bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region);
  797. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  798. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  799. bdw->region_index = 1+1;
  800. bdw->windows = 1;
  801. bdw->offset = 0;
  802. bdw->size = BDW_SIZE;
  803. bdw->capacity = DIMM_SIZE;
  804. bdw->start_address = 0;
  805. /* bdw2 (spa/dcr2, dimm2) */
  806. bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 2;
  807. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  808. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  809. bdw->region_index = 2+1;
  810. bdw->windows = 1;
  811. bdw->offset = 0;
  812. bdw->size = BDW_SIZE;
  813. bdw->capacity = DIMM_SIZE;
  814. bdw->start_address = 0;
  815. /* bdw3 (spa/dcr3, dimm3) */
  816. bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 3;
  817. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  818. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  819. bdw->region_index = 3+1;
  820. bdw->windows = 1;
  821. bdw->offset = 0;
  822. bdw->size = BDW_SIZE;
  823. bdw->capacity = DIMM_SIZE;
  824. bdw->start_address = 0;
  825. offset = offset + sizeof(struct acpi_nfit_data_region) * 4;
  826. /* flush0 (dimm0) */
  827. flush = nfit_buf + offset;
  828. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  829. flush->header.length = sizeof(struct acpi_nfit_flush_address);
  830. flush->device_handle = handle[0];
  831. flush->hint_count = 1;
  832. flush->hint_address[0] = t->flush_dma[0];
  833. /* flush1 (dimm1) */
  834. flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 1;
  835. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  836. flush->header.length = sizeof(struct acpi_nfit_flush_address);
  837. flush->device_handle = handle[1];
  838. flush->hint_count = 1;
  839. flush->hint_address[0] = t->flush_dma[1];
  840. /* flush2 (dimm2) */
  841. flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 2;
  842. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  843. flush->header.length = sizeof(struct acpi_nfit_flush_address);
  844. flush->device_handle = handle[2];
  845. flush->hint_count = 1;
  846. flush->hint_address[0] = t->flush_dma[2];
  847. /* flush3 (dimm3) */
  848. flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 3;
  849. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  850. flush->header.length = sizeof(struct acpi_nfit_flush_address);
  851. flush->device_handle = handle[3];
  852. flush->hint_count = 1;
  853. flush->hint_address[0] = t->flush_dma[3];
  854. if (t->setup_hotplug) {
  855. offset = offset + sizeof(struct acpi_nfit_flush_address) * 4;
  856. /* dcr-descriptor4 */
  857. dcr = nfit_buf + offset;
  858. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  859. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  860. dcr->region_index = 4+1;
  861. dcr->vendor_id = 0xabcd;
  862. dcr->device_id = 0;
  863. dcr->revision_id = 1;
  864. dcr->serial_number = ~handle[4];
  865. dcr->windows = 1;
  866. dcr->window_size = DCR_SIZE;
  867. dcr->command_offset = 0;
  868. dcr->command_size = 8;
  869. dcr->status_offset = 8;
  870. dcr->status_size = 4;
  871. offset = offset + sizeof(struct acpi_nfit_control_region);
  872. /* bdw4 (spa/dcr4, dimm4) */
  873. bdw = nfit_buf + offset;
  874. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  875. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  876. bdw->region_index = 4+1;
  877. bdw->windows = 1;
  878. bdw->offset = 0;
  879. bdw->size = BDW_SIZE;
  880. bdw->capacity = DIMM_SIZE;
  881. bdw->start_address = 0;
  882. offset = offset + sizeof(struct acpi_nfit_data_region);
  883. /* spa10 (dcr4) dimm4 */
  884. spa = nfit_buf + offset;
  885. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  886. spa->header.length = sizeof(*spa);
  887. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  888. spa->range_index = 10+1;
  889. spa->address = t->dcr_dma[4];
  890. spa->length = DCR_SIZE;
  891. /*
  892. * spa11 (single-dimm interleave for hotplug, note storage
  893. * does not actually alias the related block-data-window
  894. * regions)
  895. */
  896. spa = nfit_buf + offset + sizeof(*spa);
  897. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  898. spa->header.length = sizeof(*spa);
  899. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  900. spa->range_index = 11+1;
  901. spa->address = t->spa_set_dma[2];
  902. spa->length = SPA0_SIZE;
  903. /* spa12 (bdw for dcr4) dimm4 */
  904. spa = nfit_buf + offset + sizeof(*spa) * 2;
  905. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  906. spa->header.length = sizeof(*spa);
  907. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  908. spa->range_index = 12+1;
  909. spa->address = t->dimm_dma[4];
  910. spa->length = DIMM_SIZE;
  911. offset = offset + sizeof(*spa) * 3;
  912. /* mem-region14 (spa/dcr4, dimm4) */
  913. memdev = nfit_buf + offset;
  914. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  915. memdev->header.length = sizeof(*memdev);
  916. memdev->device_handle = handle[4];
  917. memdev->physical_id = 4;
  918. memdev->region_id = 0;
  919. memdev->range_index = 10+1;
  920. memdev->region_index = 4+1;
  921. memdev->region_size = 0;
  922. memdev->region_offset = 0;
  923. memdev->address = 0;
  924. memdev->interleave_index = 0;
  925. memdev->interleave_ways = 1;
  926. /* mem-region15 (spa0, dimm4) */
  927. memdev = nfit_buf + offset +
  928. sizeof(struct acpi_nfit_memory_map);
  929. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  930. memdev->header.length = sizeof(*memdev);
  931. memdev->device_handle = handle[4];
  932. memdev->physical_id = 4;
  933. memdev->region_id = 0;
  934. memdev->range_index = 11+1;
  935. memdev->region_index = 4+1;
  936. memdev->region_size = SPA0_SIZE;
  937. memdev->region_offset = t->spa_set_dma[2];
  938. memdev->address = 0;
  939. memdev->interleave_index = 0;
  940. memdev->interleave_ways = 1;
  941. /* mem-region16 (spa/dcr4, dimm4) */
  942. memdev = nfit_buf + offset +
  943. sizeof(struct acpi_nfit_memory_map) * 2;
  944. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  945. memdev->header.length = sizeof(*memdev);
  946. memdev->device_handle = handle[4];
  947. memdev->physical_id = 4;
  948. memdev->region_id = 0;
  949. memdev->range_index = 12+1;
  950. memdev->region_index = 4+1;
  951. memdev->region_size = 0;
  952. memdev->region_offset = 0;
  953. memdev->address = 0;
  954. memdev->interleave_index = 0;
  955. memdev->interleave_ways = 1;
  956. offset = offset + sizeof(struct acpi_nfit_memory_map) * 3;
  957. /* flush3 (dimm4) */
  958. flush = nfit_buf + offset;
  959. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  960. flush->header.length = sizeof(struct acpi_nfit_flush_address);
  961. flush->device_handle = handle[4];
  962. flush->hint_count = 1;
  963. flush->hint_address[0] = t->flush_dma[4];
  964. }
  965. acpi_desc = &t->acpi_desc;
  966. set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_dsm_force_en);
  967. set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en);
  968. set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en);
  969. set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_dsm_force_en);
  970. set_bit(ND_CMD_ARS_START, &acpi_desc->bus_dsm_force_en);
  971. set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_dsm_force_en);
  972. nd_desc = &acpi_desc->nd_desc;
  973. nd_desc->ndctl = nfit_test_ctl;
  974. }
  975. static void nfit_test1_setup(struct nfit_test *t)
  976. {
  977. size_t offset;
  978. void *nfit_buf = t->nfit_buf;
  979. struct acpi_nfit_memory_map *memdev;
  980. struct acpi_nfit_control_region *dcr;
  981. struct acpi_nfit_system_address *spa;
  982. struct nvdimm_bus_descriptor *nd_desc;
  983. struct acpi_nfit_desc *acpi_desc;
  984. offset = 0;
  985. /* spa0 (flat range with no bdw aliasing) */
  986. spa = nfit_buf + offset;
  987. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  988. spa->header.length = sizeof(*spa);
  989. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  990. spa->range_index = 0+1;
  991. spa->address = t->spa_set_dma[0];
  992. spa->length = SPA2_SIZE;
  993. offset += sizeof(*spa);
  994. /* mem-region0 (spa0, dimm0) */
  995. memdev = nfit_buf + offset;
  996. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  997. memdev->header.length = sizeof(*memdev);
  998. memdev->device_handle = 0;
  999. memdev->physical_id = 0;
  1000. memdev->region_id = 0;
  1001. memdev->range_index = 0+1;
  1002. memdev->region_index = 0+1;
  1003. memdev->region_size = SPA2_SIZE;
  1004. memdev->region_offset = 0;
  1005. memdev->address = 0;
  1006. memdev->interleave_index = 0;
  1007. memdev->interleave_ways = 1;
  1008. memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED
  1009. | ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED
  1010. | ACPI_NFIT_MEM_NOT_ARMED;
  1011. offset += sizeof(*memdev);
  1012. /* dcr-descriptor0 */
  1013. dcr = nfit_buf + offset;
  1014. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1015. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  1016. dcr->region_index = 0+1;
  1017. dcr->vendor_id = 0xabcd;
  1018. dcr->device_id = 0;
  1019. dcr->revision_id = 1;
  1020. dcr->serial_number = ~0;
  1021. dcr->code = 0x201;
  1022. dcr->windows = 0;
  1023. dcr->window_size = 0;
  1024. dcr->command_offset = 0;
  1025. dcr->command_size = 0;
  1026. dcr->status_offset = 0;
  1027. dcr->status_size = 0;
  1028. acpi_desc = &t->acpi_desc;
  1029. set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_dsm_force_en);
  1030. set_bit(ND_CMD_ARS_START, &acpi_desc->bus_dsm_force_en);
  1031. set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_dsm_force_en);
  1032. nd_desc = &acpi_desc->nd_desc;
  1033. nd_desc->ndctl = nfit_test_ctl;
  1034. }
  1035. static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
  1036. void *iobuf, u64 len, int rw)
  1037. {
  1038. struct nfit_blk *nfit_blk = ndbr->blk_provider_data;
  1039. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
  1040. struct nd_region *nd_region = &ndbr->nd_region;
  1041. unsigned int lane;
  1042. lane = nd_region_acquire_lane(nd_region);
  1043. if (rw)
  1044. memcpy(mmio->addr.base + dpa, iobuf, len);
  1045. else {
  1046. memcpy(iobuf, mmio->addr.base + dpa, len);
  1047. /* give us some some coverage of the mmio_flush_range() API */
  1048. mmio_flush_range(mmio->addr.base + dpa, len);
  1049. }
  1050. nd_region_release_lane(nd_region, lane);
  1051. return 0;
  1052. }
  1053. static int nfit_test_probe(struct platform_device *pdev)
  1054. {
  1055. struct nvdimm_bus_descriptor *nd_desc;
  1056. struct acpi_nfit_desc *acpi_desc;
  1057. struct device *dev = &pdev->dev;
  1058. struct nfit_test *nfit_test;
  1059. int rc;
  1060. nfit_test = to_nfit_test(&pdev->dev);
  1061. /* common alloc */
  1062. if (nfit_test->num_dcr) {
  1063. int num = nfit_test->num_dcr;
  1064. nfit_test->dimm = devm_kcalloc(dev, num, sizeof(void *),
  1065. GFP_KERNEL);
  1066. nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
  1067. GFP_KERNEL);
  1068. nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *),
  1069. GFP_KERNEL);
  1070. nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
  1071. GFP_KERNEL);
  1072. nfit_test->label = devm_kcalloc(dev, num, sizeof(void *),
  1073. GFP_KERNEL);
  1074. nfit_test->label_dma = devm_kcalloc(dev, num,
  1075. sizeof(dma_addr_t), GFP_KERNEL);
  1076. nfit_test->dcr = devm_kcalloc(dev, num,
  1077. sizeof(struct nfit_test_dcr *), GFP_KERNEL);
  1078. nfit_test->dcr_dma = devm_kcalloc(dev, num,
  1079. sizeof(dma_addr_t), GFP_KERNEL);
  1080. if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label
  1081. && nfit_test->label_dma && nfit_test->dcr
  1082. && nfit_test->dcr_dma && nfit_test->flush
  1083. && nfit_test->flush_dma)
  1084. /* pass */;
  1085. else
  1086. return -ENOMEM;
  1087. }
  1088. if (nfit_test->num_pm) {
  1089. int num = nfit_test->num_pm;
  1090. nfit_test->spa_set = devm_kcalloc(dev, num, sizeof(void *),
  1091. GFP_KERNEL);
  1092. nfit_test->spa_set_dma = devm_kcalloc(dev, num,
  1093. sizeof(dma_addr_t), GFP_KERNEL);
  1094. if (nfit_test->spa_set && nfit_test->spa_set_dma)
  1095. /* pass */;
  1096. else
  1097. return -ENOMEM;
  1098. }
  1099. /* per-nfit specific alloc */
  1100. if (nfit_test->alloc(nfit_test))
  1101. return -ENOMEM;
  1102. nfit_test->setup(nfit_test);
  1103. acpi_desc = &nfit_test->acpi_desc;
  1104. acpi_desc->dev = &pdev->dev;
  1105. acpi_desc->nfit = nfit_test->nfit_buf;
  1106. acpi_desc->blk_do_io = nfit_test_blk_do_io;
  1107. nd_desc = &acpi_desc->nd_desc;
  1108. nd_desc->attr_groups = acpi_nfit_attribute_groups;
  1109. acpi_desc->nvdimm_bus = nvdimm_bus_register(&pdev->dev, nd_desc);
  1110. if (!acpi_desc->nvdimm_bus)
  1111. return -ENXIO;
  1112. INIT_LIST_HEAD(&acpi_desc->spa_maps);
  1113. INIT_LIST_HEAD(&acpi_desc->spas);
  1114. INIT_LIST_HEAD(&acpi_desc->dcrs);
  1115. INIT_LIST_HEAD(&acpi_desc->bdws);
  1116. INIT_LIST_HEAD(&acpi_desc->idts);
  1117. INIT_LIST_HEAD(&acpi_desc->flushes);
  1118. INIT_LIST_HEAD(&acpi_desc->memdevs);
  1119. INIT_LIST_HEAD(&acpi_desc->dimms);
  1120. mutex_init(&acpi_desc->spa_map_mutex);
  1121. mutex_init(&acpi_desc->init_mutex);
  1122. rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_size);
  1123. if (rc) {
  1124. nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
  1125. return rc;
  1126. }
  1127. if (nfit_test->setup != nfit_test0_setup)
  1128. return 0;
  1129. nfit_test->setup_hotplug = 1;
  1130. nfit_test->setup(nfit_test);
  1131. rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_size);
  1132. if (rc) {
  1133. nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
  1134. return rc;
  1135. }
  1136. return 0;
  1137. }
  1138. static int nfit_test_remove(struct platform_device *pdev)
  1139. {
  1140. struct nfit_test *nfit_test = to_nfit_test(&pdev->dev);
  1141. struct acpi_nfit_desc *acpi_desc = &nfit_test->acpi_desc;
  1142. nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
  1143. return 0;
  1144. }
  1145. static void nfit_test_release(struct device *dev)
  1146. {
  1147. struct nfit_test *nfit_test = to_nfit_test(dev);
  1148. kfree(nfit_test);
  1149. }
  1150. static const struct platform_device_id nfit_test_id[] = {
  1151. { KBUILD_MODNAME },
  1152. { },
  1153. };
  1154. static struct platform_driver nfit_test_driver = {
  1155. .probe = nfit_test_probe,
  1156. .remove = nfit_test_remove,
  1157. .driver = {
  1158. .name = KBUILD_MODNAME,
  1159. },
  1160. .id_table = nfit_test_id,
  1161. };
  1162. #ifdef CONFIG_CMA_SIZE_MBYTES
  1163. #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
  1164. #else
  1165. #define CMA_SIZE_MBYTES 0
  1166. #endif
  1167. static __init int nfit_test_init(void)
  1168. {
  1169. int rc, i;
  1170. nfit_test_setup(nfit_test_lookup);
  1171. for (i = 0; i < NUM_NFITS; i++) {
  1172. struct nfit_test *nfit_test;
  1173. struct platform_device *pdev;
  1174. static int once;
  1175. nfit_test = kzalloc(sizeof(*nfit_test), GFP_KERNEL);
  1176. if (!nfit_test) {
  1177. rc = -ENOMEM;
  1178. goto err_register;
  1179. }
  1180. INIT_LIST_HEAD(&nfit_test->resources);
  1181. switch (i) {
  1182. case 0:
  1183. nfit_test->num_pm = NUM_PM;
  1184. nfit_test->num_dcr = NUM_DCR;
  1185. nfit_test->alloc = nfit_test0_alloc;
  1186. nfit_test->setup = nfit_test0_setup;
  1187. break;
  1188. case 1:
  1189. nfit_test->num_pm = 1;
  1190. nfit_test->alloc = nfit_test1_alloc;
  1191. nfit_test->setup = nfit_test1_setup;
  1192. break;
  1193. default:
  1194. rc = -EINVAL;
  1195. goto err_register;
  1196. }
  1197. pdev = &nfit_test->pdev;
  1198. pdev->name = KBUILD_MODNAME;
  1199. pdev->id = i;
  1200. pdev->dev.release = nfit_test_release;
  1201. rc = platform_device_register(pdev);
  1202. if (rc) {
  1203. put_device(&pdev->dev);
  1204. goto err_register;
  1205. }
  1206. rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  1207. if (rc)
  1208. goto err_register;
  1209. instances[i] = nfit_test;
  1210. if (!once++) {
  1211. dma_addr_t dma;
  1212. void *buf;
  1213. buf = dma_alloc_coherent(&pdev->dev, SZ_128M, &dma,
  1214. GFP_KERNEL);
  1215. if (!buf) {
  1216. rc = -ENOMEM;
  1217. dev_warn(&pdev->dev, "need 128M of free cma\n");
  1218. goto err_register;
  1219. }
  1220. dma_free_coherent(&pdev->dev, SZ_128M, buf, dma);
  1221. }
  1222. }
  1223. rc = platform_driver_register(&nfit_test_driver);
  1224. if (rc)
  1225. goto err_register;
  1226. return 0;
  1227. err_register:
  1228. for (i = 0; i < NUM_NFITS; i++)
  1229. if (instances[i])
  1230. platform_device_unregister(&instances[i]->pdev);
  1231. nfit_test_teardown();
  1232. return rc;
  1233. }
  1234. static __exit void nfit_test_exit(void)
  1235. {
  1236. int i;
  1237. platform_driver_unregister(&nfit_test_driver);
  1238. for (i = 0; i < NUM_NFITS; i++)
  1239. platform_device_unregister(&instances[i]->pdev);
  1240. nfit_test_teardown();
  1241. }
  1242. module_init(nfit_test_init);
  1243. module_exit(nfit_test_exit);
  1244. MODULE_LICENSE("GPL v2");
  1245. MODULE_AUTHOR("Intel Corporation");