nfit.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562
  1. /*
  2. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/platform_device.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/libnvdimm.h>
  17. #include <linux/vmalloc.h>
  18. #include <linux/device.h>
  19. #include <linux/module.h>
  20. #include <linux/mutex.h>
  21. #include <linux/ndctl.h>
  22. #include <linux/sizes.h>
  23. #include <linux/list.h>
  24. #include <linux/slab.h>
  25. #include <nfit.h>
  26. #include <nd.h>
  27. #include "nfit_test.h"
  28. /*
  29. * Generate an NFIT table to describe the following topology:
  30. *
  31. * BUS0: Interleaved PMEM regions, and aliasing with BLK regions
  32. *
  33. * (a) (b) DIMM BLK-REGION
  34. * +----------+--------------+----------+---------+
  35. * +------+ | blk2.0 | pm0.0 | blk2.1 | pm1.0 | 0 region2
  36. * | imc0 +--+- - - - - region0 - - - -+----------+ +
  37. * +--+---+ | blk3.0 | pm0.0 | blk3.1 | pm1.0 | 1 region3
  38. * | +----------+--------------v----------v v
  39. * +--+---+ | |
  40. * | cpu0 | region1
  41. * +--+---+ | |
  42. * | +-------------------------^----------^ ^
  43. * +--+---+ | blk4.0 | pm1.0 | 2 region4
  44. * | imc1 +--+-------------------------+----------+ +
  45. * +------+ | blk5.0 | pm1.0 | 3 region5
  46. * +-------------------------+----------+-+-------+
  47. *
  48. * +--+---+
  49. * | cpu1 |
  50. * +--+---+ (Hotplug DIMM)
  51. * | +----------------------------------------------+
  52. * +--+---+ | blk6.0/pm7.0 | 4 region6/7
  53. * | imc0 +--+----------------------------------------------+
  54. * +------+
  55. *
  56. *
  57. * *) In this layout we have four dimms and two memory controllers in one
  58. * socket. Each unique interface (BLK or PMEM) to DPA space
  59. * is identified by a region device with a dynamically assigned id.
  60. *
  61. * *) The first portion of dimm0 and dimm1 are interleaved as REGION0.
  62. * A single PMEM namespace "pm0.0" is created using half of the
  63. * REGION0 SPA-range. REGION0 spans dimm0 and dimm1. PMEM namespace
  64. * allocate from from the bottom of a region. The unallocated
  65. * portion of REGION0 aliases with REGION2 and REGION3. That
  66. * unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and
  67. * "blk3.0") starting at the base of each DIMM to offset (a) in those
  68. * DIMMs. "pm0.0", "blk2.0" and "blk3.0" are free-form readable
  69. * names that can be assigned to a namespace.
  70. *
  71. * *) In the last portion of dimm0 and dimm1 we have an interleaved
  72. * SPA range, REGION1, that spans those two dimms as well as dimm2
  73. * and dimm3. Some of REGION1 allocated to a PMEM namespace named
  74. * "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each
  75. * dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and
  76. * "blk5.0".
  77. *
  78. * *) The portion of dimm2 and dimm3 that do not participate in the
  79. * REGION1 interleaved SPA range (i.e. the DPA address below offset
  80. * (b) are also included in the "blk4.0" and "blk5.0" namespaces.
  81. * Note, that BLK namespaces need not be contiguous in DPA-space, and
  82. * can consume aliased capacity from multiple interleave sets.
  83. *
  84. * BUS1: Legacy NVDIMM (single contiguous range)
  85. *
  86. * region2
  87. * +---------------------+
  88. * |---------------------|
  89. * || pm2.0 ||
  90. * |---------------------|
  91. * +---------------------+
  92. *
  93. * *) A NFIT-table may describe a simple system-physical-address range
  94. * with no BLK aliasing. This type of region may optionally
  95. * reference an NVDIMM.
  96. */
  97. enum {
  98. NUM_PM = 3,
  99. NUM_DCR = 5,
  100. NUM_BDW = NUM_DCR,
  101. NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW,
  102. NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */ + 4 /* spa1 iset */,
  103. DIMM_SIZE = SZ_32M,
  104. LABEL_SIZE = SZ_128K,
  105. SPA0_SIZE = DIMM_SIZE,
  106. SPA1_SIZE = DIMM_SIZE*2,
  107. SPA2_SIZE = DIMM_SIZE,
  108. BDW_SIZE = 64 << 8,
  109. DCR_SIZE = 12,
  110. NUM_NFITS = 2, /* permit testing multiple NFITs per system */
  111. };
  112. struct nfit_test_dcr {
  113. __le64 bdw_addr;
  114. __le32 bdw_status;
  115. __u8 aperature[BDW_SIZE];
  116. };
  117. #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
  118. (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
  119. | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
  120. static u32 handle[NUM_DCR] = {
  121. [0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
  122. [1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
  123. [2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
  124. [3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
  125. [4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
  126. };
  127. struct nfit_test {
  128. struct acpi_nfit_desc acpi_desc;
  129. struct platform_device pdev;
  130. struct list_head resources;
  131. void *nfit_buf;
  132. dma_addr_t nfit_dma;
  133. size_t nfit_size;
  134. int num_dcr;
  135. int num_pm;
  136. void **dimm;
  137. dma_addr_t *dimm_dma;
  138. void **flush;
  139. dma_addr_t *flush_dma;
  140. void **label;
  141. dma_addr_t *label_dma;
  142. void **spa_set;
  143. dma_addr_t *spa_set_dma;
  144. struct nfit_test_dcr **dcr;
  145. dma_addr_t *dcr_dma;
  146. int (*alloc)(struct nfit_test *t);
  147. void (*setup)(struct nfit_test *t);
  148. int setup_hotplug;
  149. struct ars_state {
  150. struct nd_cmd_ars_status *ars_status;
  151. unsigned long deadline;
  152. spinlock_t lock;
  153. } ars_state;
  154. };
  155. static struct nfit_test *to_nfit_test(struct device *dev)
  156. {
  157. struct platform_device *pdev = to_platform_device(dev);
  158. return container_of(pdev, struct nfit_test, pdev);
  159. }
  160. static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size *nd_cmd,
  161. unsigned int buf_len)
  162. {
  163. if (buf_len < sizeof(*nd_cmd))
  164. return -EINVAL;
  165. nd_cmd->status = 0;
  166. nd_cmd->config_size = LABEL_SIZE;
  167. nd_cmd->max_xfer = SZ_4K;
  168. return 0;
  169. }
  170. static int nfit_test_cmd_get_config_data(struct nd_cmd_get_config_data_hdr
  171. *nd_cmd, unsigned int buf_len, void *label)
  172. {
  173. unsigned int len, offset = nd_cmd->in_offset;
  174. int rc;
  175. if (buf_len < sizeof(*nd_cmd))
  176. return -EINVAL;
  177. if (offset >= LABEL_SIZE)
  178. return -EINVAL;
  179. if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len)
  180. return -EINVAL;
  181. nd_cmd->status = 0;
  182. len = min(nd_cmd->in_length, LABEL_SIZE - offset);
  183. memcpy(nd_cmd->out_buf, label + offset, len);
  184. rc = buf_len - sizeof(*nd_cmd) - len;
  185. return rc;
  186. }
  187. static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd,
  188. unsigned int buf_len, void *label)
  189. {
  190. unsigned int len, offset = nd_cmd->in_offset;
  191. u32 *status;
  192. int rc;
  193. if (buf_len < sizeof(*nd_cmd))
  194. return -EINVAL;
  195. if (offset >= LABEL_SIZE)
  196. return -EINVAL;
  197. if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len)
  198. return -EINVAL;
  199. status = (void *)nd_cmd + nd_cmd->in_length + sizeof(*nd_cmd);
  200. *status = 0;
  201. len = min(nd_cmd->in_length, LABEL_SIZE - offset);
  202. memcpy(label + offset, nd_cmd->in_buf, len);
  203. rc = buf_len - sizeof(*nd_cmd) - (len + 4);
  204. return rc;
  205. }
  206. #define NFIT_TEST_ARS_RECORDS 4
  207. #define NFIT_TEST_CLEAR_ERR_UNIT 256
  208. static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd,
  209. unsigned int buf_len)
  210. {
  211. if (buf_len < sizeof(*nd_cmd))
  212. return -EINVAL;
  213. nd_cmd->max_ars_out = sizeof(struct nd_cmd_ars_status)
  214. + NFIT_TEST_ARS_RECORDS * sizeof(struct nd_ars_record);
  215. nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16;
  216. nd_cmd->clear_err_unit = NFIT_TEST_CLEAR_ERR_UNIT;
  217. return 0;
  218. }
  219. /*
  220. * Initialize the ars_state to return an ars_result 1 second in the future with
  221. * a 4K error range in the middle of the requested address range.
  222. */
  223. static void post_ars_status(struct ars_state *ars_state, u64 addr, u64 len)
  224. {
  225. struct nd_cmd_ars_status *ars_status;
  226. struct nd_ars_record *ars_record;
  227. ars_state->deadline = jiffies + 1*HZ;
  228. ars_status = ars_state->ars_status;
  229. ars_status->status = 0;
  230. ars_status->out_length = sizeof(struct nd_cmd_ars_status)
  231. + sizeof(struct nd_ars_record);
  232. ars_status->address = addr;
  233. ars_status->length = len;
  234. ars_status->type = ND_ARS_PERSISTENT;
  235. ars_status->num_records = 1;
  236. ars_record = &ars_status->records[0];
  237. ars_record->handle = 0;
  238. ars_record->err_address = addr + len / 2;
  239. ars_record->length = SZ_4K;
  240. }
  241. static int nfit_test_cmd_ars_start(struct ars_state *ars_state,
  242. struct nd_cmd_ars_start *ars_start, unsigned int buf_len,
  243. int *cmd_rc)
  244. {
  245. if (buf_len < sizeof(*ars_start))
  246. return -EINVAL;
  247. spin_lock(&ars_state->lock);
  248. if (time_before(jiffies, ars_state->deadline)) {
  249. ars_start->status = NFIT_ARS_START_BUSY;
  250. *cmd_rc = -EBUSY;
  251. } else {
  252. ars_start->status = 0;
  253. ars_start->scrub_time = 1;
  254. post_ars_status(ars_state, ars_start->address,
  255. ars_start->length);
  256. *cmd_rc = 0;
  257. }
  258. spin_unlock(&ars_state->lock);
  259. return 0;
  260. }
  261. static int nfit_test_cmd_ars_status(struct ars_state *ars_state,
  262. struct nd_cmd_ars_status *ars_status, unsigned int buf_len,
  263. int *cmd_rc)
  264. {
  265. if (buf_len < ars_state->ars_status->out_length)
  266. return -EINVAL;
  267. spin_lock(&ars_state->lock);
  268. if (time_before(jiffies, ars_state->deadline)) {
  269. memset(ars_status, 0, buf_len);
  270. ars_status->status = NFIT_ARS_STATUS_BUSY;
  271. ars_status->out_length = sizeof(*ars_status);
  272. *cmd_rc = -EBUSY;
  273. } else {
  274. memcpy(ars_status, ars_state->ars_status,
  275. ars_state->ars_status->out_length);
  276. *cmd_rc = 0;
  277. }
  278. spin_unlock(&ars_state->lock);
  279. return 0;
  280. }
  281. static int nfit_test_cmd_clear_error(struct nd_cmd_clear_error *clear_err,
  282. unsigned int buf_len, int *cmd_rc)
  283. {
  284. const u64 mask = NFIT_TEST_CLEAR_ERR_UNIT - 1;
  285. if (buf_len < sizeof(*clear_err))
  286. return -EINVAL;
  287. if ((clear_err->address & mask) || (clear_err->length & mask))
  288. return -EINVAL;
  289. /*
  290. * Report 'all clear' success for all commands even though a new
  291. * scrub will find errors again. This is enough to have the
  292. * error removed from the 'badblocks' tracking in the pmem
  293. * driver.
  294. */
  295. clear_err->status = 0;
  296. clear_err->cleared = clear_err->length;
  297. *cmd_rc = 0;
  298. return 0;
  299. }
  300. static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
  301. struct nvdimm *nvdimm, unsigned int cmd, void *buf,
  302. unsigned int buf_len, int *cmd_rc)
  303. {
  304. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  305. struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc);
  306. int i, rc = 0, __cmd_rc;
  307. if (!cmd_rc)
  308. cmd_rc = &__cmd_rc;
  309. *cmd_rc = 0;
  310. if (nvdimm) {
  311. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  312. if (!nfit_mem || !test_bit(cmd, &nfit_mem->dsm_mask))
  313. return -ENOTTY;
  314. /* lookup label space for the given dimm */
  315. for (i = 0; i < ARRAY_SIZE(handle); i++)
  316. if (__to_nfit_memdev(nfit_mem)->device_handle ==
  317. handle[i])
  318. break;
  319. if (i >= ARRAY_SIZE(handle))
  320. return -ENXIO;
  321. switch (cmd) {
  322. case ND_CMD_GET_CONFIG_SIZE:
  323. rc = nfit_test_cmd_get_config_size(buf, buf_len);
  324. break;
  325. case ND_CMD_GET_CONFIG_DATA:
  326. rc = nfit_test_cmd_get_config_data(buf, buf_len,
  327. t->label[i]);
  328. break;
  329. case ND_CMD_SET_CONFIG_DATA:
  330. rc = nfit_test_cmd_set_config_data(buf, buf_len,
  331. t->label[i]);
  332. break;
  333. default:
  334. return -ENOTTY;
  335. }
  336. } else {
  337. struct ars_state *ars_state = &t->ars_state;
  338. if (!nd_desc || !test_bit(cmd, &nd_desc->dsm_mask))
  339. return -ENOTTY;
  340. switch (cmd) {
  341. case ND_CMD_ARS_CAP:
  342. rc = nfit_test_cmd_ars_cap(buf, buf_len);
  343. break;
  344. case ND_CMD_ARS_START:
  345. rc = nfit_test_cmd_ars_start(ars_state, buf, buf_len,
  346. cmd_rc);
  347. break;
  348. case ND_CMD_ARS_STATUS:
  349. rc = nfit_test_cmd_ars_status(ars_state, buf, buf_len,
  350. cmd_rc);
  351. break;
  352. case ND_CMD_CLEAR_ERROR:
  353. rc = nfit_test_cmd_clear_error(buf, buf_len, cmd_rc);
  354. break;
  355. default:
  356. return -ENOTTY;
  357. }
  358. }
  359. return rc;
  360. }
  361. static DEFINE_SPINLOCK(nfit_test_lock);
  362. static struct nfit_test *instances[NUM_NFITS];
  363. static void release_nfit_res(void *data)
  364. {
  365. struct nfit_test_resource *nfit_res = data;
  366. struct resource *res = nfit_res->res;
  367. spin_lock(&nfit_test_lock);
  368. list_del(&nfit_res->list);
  369. spin_unlock(&nfit_test_lock);
  370. if (is_vmalloc_addr(nfit_res->buf))
  371. vfree(nfit_res->buf);
  372. else
  373. dma_free_coherent(nfit_res->dev, resource_size(res),
  374. nfit_res->buf, res->start);
  375. kfree(res);
  376. kfree(nfit_res);
  377. }
  378. static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
  379. void *buf)
  380. {
  381. struct device *dev = &t->pdev.dev;
  382. struct resource *res = kzalloc(sizeof(*res) * 2, GFP_KERNEL);
  383. struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res),
  384. GFP_KERNEL);
  385. int rc;
  386. if (!res || !buf || !nfit_res)
  387. goto err;
  388. rc = devm_add_action(dev, release_nfit_res, nfit_res);
  389. if (rc)
  390. goto err;
  391. INIT_LIST_HEAD(&nfit_res->list);
  392. memset(buf, 0, size);
  393. nfit_res->dev = dev;
  394. nfit_res->buf = buf;
  395. nfit_res->res = res;
  396. res->start = *dma;
  397. res->end = *dma + size - 1;
  398. res->name = "NFIT";
  399. spin_lock(&nfit_test_lock);
  400. list_add(&nfit_res->list, &t->resources);
  401. spin_unlock(&nfit_test_lock);
  402. return nfit_res->buf;
  403. err:
  404. if (buf && !is_vmalloc_addr(buf))
  405. dma_free_coherent(dev, size, buf, *dma);
  406. else if (buf)
  407. vfree(buf);
  408. kfree(res);
  409. kfree(nfit_res);
  410. return NULL;
  411. }
  412. static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma)
  413. {
  414. void *buf = vmalloc(size);
  415. *dma = (unsigned long) buf;
  416. return __test_alloc(t, size, dma, buf);
  417. }
  418. static void *test_alloc_coherent(struct nfit_test *t, size_t size,
  419. dma_addr_t *dma)
  420. {
  421. struct device *dev = &t->pdev.dev;
  422. void *buf = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
  423. return __test_alloc(t, size, dma, buf);
  424. }
  425. static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr)
  426. {
  427. int i;
  428. for (i = 0; i < ARRAY_SIZE(instances); i++) {
  429. struct nfit_test_resource *n, *nfit_res = NULL;
  430. struct nfit_test *t = instances[i];
  431. if (!t)
  432. continue;
  433. spin_lock(&nfit_test_lock);
  434. list_for_each_entry(n, &t->resources, list) {
  435. if (addr >= n->res->start && (addr < n->res->start
  436. + resource_size(n->res))) {
  437. nfit_res = n;
  438. break;
  439. } else if (addr >= (unsigned long) n->buf
  440. && (addr < (unsigned long) n->buf
  441. + resource_size(n->res))) {
  442. nfit_res = n;
  443. break;
  444. }
  445. }
  446. spin_unlock(&nfit_test_lock);
  447. if (nfit_res)
  448. return nfit_res;
  449. }
  450. return NULL;
  451. }
  452. static int ars_state_init(struct device *dev, struct ars_state *ars_state)
  453. {
  454. ars_state->ars_status = devm_kzalloc(dev,
  455. sizeof(struct nd_cmd_ars_status)
  456. + sizeof(struct nd_ars_record) * NFIT_TEST_ARS_RECORDS,
  457. GFP_KERNEL);
  458. if (!ars_state->ars_status)
  459. return -ENOMEM;
  460. spin_lock_init(&ars_state->lock);
  461. return 0;
  462. }
  463. static int nfit_test0_alloc(struct nfit_test *t)
  464. {
  465. size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA
  466. + sizeof(struct acpi_nfit_memory_map) * NUM_MEM
  467. + sizeof(struct acpi_nfit_control_region) * NUM_DCR
  468. + offsetof(struct acpi_nfit_control_region,
  469. window_size) * NUM_DCR
  470. + sizeof(struct acpi_nfit_data_region) * NUM_BDW
  471. + sizeof(struct acpi_nfit_flush_address) * NUM_DCR;
  472. int i;
  473. t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
  474. if (!t->nfit_buf)
  475. return -ENOMEM;
  476. t->nfit_size = nfit_size;
  477. t->spa_set[0] = test_alloc_coherent(t, SPA0_SIZE, &t->spa_set_dma[0]);
  478. if (!t->spa_set[0])
  479. return -ENOMEM;
  480. t->spa_set[1] = test_alloc_coherent(t, SPA1_SIZE, &t->spa_set_dma[1]);
  481. if (!t->spa_set[1])
  482. return -ENOMEM;
  483. t->spa_set[2] = test_alloc_coherent(t, SPA0_SIZE, &t->spa_set_dma[2]);
  484. if (!t->spa_set[2])
  485. return -ENOMEM;
  486. for (i = 0; i < NUM_DCR; i++) {
  487. t->dimm[i] = test_alloc(t, DIMM_SIZE, &t->dimm_dma[i]);
  488. if (!t->dimm[i])
  489. return -ENOMEM;
  490. t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
  491. if (!t->label[i])
  492. return -ENOMEM;
  493. sprintf(t->label[i], "label%d", i);
  494. t->flush[i] = test_alloc(t, 8, &t->flush_dma[i]);
  495. if (!t->flush[i])
  496. return -ENOMEM;
  497. }
  498. for (i = 0; i < NUM_DCR; i++) {
  499. t->dcr[i] = test_alloc(t, LABEL_SIZE, &t->dcr_dma[i]);
  500. if (!t->dcr[i])
  501. return -ENOMEM;
  502. }
  503. return ars_state_init(&t->pdev.dev, &t->ars_state);
  504. }
  505. static int nfit_test1_alloc(struct nfit_test *t)
  506. {
  507. size_t nfit_size = sizeof(struct acpi_nfit_system_address)
  508. + sizeof(struct acpi_nfit_memory_map)
  509. + offsetof(struct acpi_nfit_control_region, window_size);
  510. t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
  511. if (!t->nfit_buf)
  512. return -ENOMEM;
  513. t->nfit_size = nfit_size;
  514. t->spa_set[0] = test_alloc_coherent(t, SPA2_SIZE, &t->spa_set_dma[0]);
  515. if (!t->spa_set[0])
  516. return -ENOMEM;
  517. return ars_state_init(&t->pdev.dev, &t->ars_state);
  518. }
  519. static void nfit_test0_setup(struct nfit_test *t)
  520. {
  521. struct acpi_nfit_desc *acpi_desc;
  522. struct acpi_nfit_memory_map *memdev;
  523. void *nfit_buf = t->nfit_buf;
  524. struct acpi_nfit_system_address *spa;
  525. struct acpi_nfit_control_region *dcr;
  526. struct acpi_nfit_data_region *bdw;
  527. struct acpi_nfit_flush_address *flush;
  528. unsigned int offset;
  529. /*
  530. * spa0 (interleave first half of dimm0 and dimm1, note storage
  531. * does not actually alias the related block-data-window
  532. * regions)
  533. */
  534. spa = nfit_buf;
  535. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  536. spa->header.length = sizeof(*spa);
  537. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  538. spa->range_index = 0+1;
  539. spa->address = t->spa_set_dma[0];
  540. spa->length = SPA0_SIZE;
  541. /*
  542. * spa1 (interleave last half of the 4 DIMMS, note storage
  543. * does not actually alias the related block-data-window
  544. * regions)
  545. */
  546. spa = nfit_buf + sizeof(*spa);
  547. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  548. spa->header.length = sizeof(*spa);
  549. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  550. spa->range_index = 1+1;
  551. spa->address = t->spa_set_dma[1];
  552. spa->length = SPA1_SIZE;
  553. /* spa2 (dcr0) dimm0 */
  554. spa = nfit_buf + sizeof(*spa) * 2;
  555. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  556. spa->header.length = sizeof(*spa);
  557. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  558. spa->range_index = 2+1;
  559. spa->address = t->dcr_dma[0];
  560. spa->length = DCR_SIZE;
  561. /* spa3 (dcr1) dimm1 */
  562. spa = nfit_buf + sizeof(*spa) * 3;
  563. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  564. spa->header.length = sizeof(*spa);
  565. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  566. spa->range_index = 3+1;
  567. spa->address = t->dcr_dma[1];
  568. spa->length = DCR_SIZE;
  569. /* spa4 (dcr2) dimm2 */
  570. spa = nfit_buf + sizeof(*spa) * 4;
  571. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  572. spa->header.length = sizeof(*spa);
  573. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  574. spa->range_index = 4+1;
  575. spa->address = t->dcr_dma[2];
  576. spa->length = DCR_SIZE;
  577. /* spa5 (dcr3) dimm3 */
  578. spa = nfit_buf + sizeof(*spa) * 5;
  579. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  580. spa->header.length = sizeof(*spa);
  581. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  582. spa->range_index = 5+1;
  583. spa->address = t->dcr_dma[3];
  584. spa->length = DCR_SIZE;
  585. /* spa6 (bdw for dcr0) dimm0 */
  586. spa = nfit_buf + sizeof(*spa) * 6;
  587. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  588. spa->header.length = sizeof(*spa);
  589. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  590. spa->range_index = 6+1;
  591. spa->address = t->dimm_dma[0];
  592. spa->length = DIMM_SIZE;
  593. /* spa7 (bdw for dcr1) dimm1 */
  594. spa = nfit_buf + sizeof(*spa) * 7;
  595. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  596. spa->header.length = sizeof(*spa);
  597. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  598. spa->range_index = 7+1;
  599. spa->address = t->dimm_dma[1];
  600. spa->length = DIMM_SIZE;
  601. /* spa8 (bdw for dcr2) dimm2 */
  602. spa = nfit_buf + sizeof(*spa) * 8;
  603. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  604. spa->header.length = sizeof(*spa);
  605. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  606. spa->range_index = 8+1;
  607. spa->address = t->dimm_dma[2];
  608. spa->length = DIMM_SIZE;
  609. /* spa9 (bdw for dcr3) dimm3 */
  610. spa = nfit_buf + sizeof(*spa) * 9;
  611. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  612. spa->header.length = sizeof(*spa);
  613. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  614. spa->range_index = 9+1;
  615. spa->address = t->dimm_dma[3];
  616. spa->length = DIMM_SIZE;
  617. offset = sizeof(*spa) * 10;
  618. /* mem-region0 (spa0, dimm0) */
  619. memdev = nfit_buf + offset;
  620. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  621. memdev->header.length = sizeof(*memdev);
  622. memdev->device_handle = handle[0];
  623. memdev->physical_id = 0;
  624. memdev->region_id = 0;
  625. memdev->range_index = 0+1;
  626. memdev->region_index = 4+1;
  627. memdev->region_size = SPA0_SIZE/2;
  628. memdev->region_offset = t->spa_set_dma[0];
  629. memdev->address = 0;
  630. memdev->interleave_index = 0;
  631. memdev->interleave_ways = 2;
  632. /* mem-region1 (spa0, dimm1) */
  633. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map);
  634. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  635. memdev->header.length = sizeof(*memdev);
  636. memdev->device_handle = handle[1];
  637. memdev->physical_id = 1;
  638. memdev->region_id = 0;
  639. memdev->range_index = 0+1;
  640. memdev->region_index = 5+1;
  641. memdev->region_size = SPA0_SIZE/2;
  642. memdev->region_offset = t->spa_set_dma[0] + SPA0_SIZE/2;
  643. memdev->address = 0;
  644. memdev->interleave_index = 0;
  645. memdev->interleave_ways = 2;
  646. /* mem-region2 (spa1, dimm0) */
  647. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 2;
  648. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  649. memdev->header.length = sizeof(*memdev);
  650. memdev->device_handle = handle[0];
  651. memdev->physical_id = 0;
  652. memdev->region_id = 1;
  653. memdev->range_index = 1+1;
  654. memdev->region_index = 4+1;
  655. memdev->region_size = SPA1_SIZE/4;
  656. memdev->region_offset = t->spa_set_dma[1];
  657. memdev->address = SPA0_SIZE/2;
  658. memdev->interleave_index = 0;
  659. memdev->interleave_ways = 4;
  660. /* mem-region3 (spa1, dimm1) */
  661. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 3;
  662. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  663. memdev->header.length = sizeof(*memdev);
  664. memdev->device_handle = handle[1];
  665. memdev->physical_id = 1;
  666. memdev->region_id = 1;
  667. memdev->range_index = 1+1;
  668. memdev->region_index = 5+1;
  669. memdev->region_size = SPA1_SIZE/4;
  670. memdev->region_offset = t->spa_set_dma[1] + SPA1_SIZE/4;
  671. memdev->address = SPA0_SIZE/2;
  672. memdev->interleave_index = 0;
  673. memdev->interleave_ways = 4;
  674. /* mem-region4 (spa1, dimm2) */
  675. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 4;
  676. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  677. memdev->header.length = sizeof(*memdev);
  678. memdev->device_handle = handle[2];
  679. memdev->physical_id = 2;
  680. memdev->region_id = 0;
  681. memdev->range_index = 1+1;
  682. memdev->region_index = 6+1;
  683. memdev->region_size = SPA1_SIZE/4;
  684. memdev->region_offset = t->spa_set_dma[1] + 2*SPA1_SIZE/4;
  685. memdev->address = SPA0_SIZE/2;
  686. memdev->interleave_index = 0;
  687. memdev->interleave_ways = 4;
  688. /* mem-region5 (spa1, dimm3) */
  689. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 5;
  690. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  691. memdev->header.length = sizeof(*memdev);
  692. memdev->device_handle = handle[3];
  693. memdev->physical_id = 3;
  694. memdev->region_id = 0;
  695. memdev->range_index = 1+1;
  696. memdev->region_index = 7+1;
  697. memdev->region_size = SPA1_SIZE/4;
  698. memdev->region_offset = t->spa_set_dma[1] + 3*SPA1_SIZE/4;
  699. memdev->address = SPA0_SIZE/2;
  700. memdev->interleave_index = 0;
  701. memdev->interleave_ways = 4;
  702. /* mem-region6 (spa/dcr0, dimm0) */
  703. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 6;
  704. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  705. memdev->header.length = sizeof(*memdev);
  706. memdev->device_handle = handle[0];
  707. memdev->physical_id = 0;
  708. memdev->region_id = 0;
  709. memdev->range_index = 2+1;
  710. memdev->region_index = 0+1;
  711. memdev->region_size = 0;
  712. memdev->region_offset = 0;
  713. memdev->address = 0;
  714. memdev->interleave_index = 0;
  715. memdev->interleave_ways = 1;
  716. /* mem-region7 (spa/dcr1, dimm1) */
  717. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 7;
  718. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  719. memdev->header.length = sizeof(*memdev);
  720. memdev->device_handle = handle[1];
  721. memdev->physical_id = 1;
  722. memdev->region_id = 0;
  723. memdev->range_index = 3+1;
  724. memdev->region_index = 1+1;
  725. memdev->region_size = 0;
  726. memdev->region_offset = 0;
  727. memdev->address = 0;
  728. memdev->interleave_index = 0;
  729. memdev->interleave_ways = 1;
  730. /* mem-region8 (spa/dcr2, dimm2) */
  731. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 8;
  732. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  733. memdev->header.length = sizeof(*memdev);
  734. memdev->device_handle = handle[2];
  735. memdev->physical_id = 2;
  736. memdev->region_id = 0;
  737. memdev->range_index = 4+1;
  738. memdev->region_index = 2+1;
  739. memdev->region_size = 0;
  740. memdev->region_offset = 0;
  741. memdev->address = 0;
  742. memdev->interleave_index = 0;
  743. memdev->interleave_ways = 1;
  744. /* mem-region9 (spa/dcr3, dimm3) */
  745. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 9;
  746. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  747. memdev->header.length = sizeof(*memdev);
  748. memdev->device_handle = handle[3];
  749. memdev->physical_id = 3;
  750. memdev->region_id = 0;
  751. memdev->range_index = 5+1;
  752. memdev->region_index = 3+1;
  753. memdev->region_size = 0;
  754. memdev->region_offset = 0;
  755. memdev->address = 0;
  756. memdev->interleave_index = 0;
  757. memdev->interleave_ways = 1;
  758. /* mem-region10 (spa/bdw0, dimm0) */
  759. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 10;
  760. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  761. memdev->header.length = sizeof(*memdev);
  762. memdev->device_handle = handle[0];
  763. memdev->physical_id = 0;
  764. memdev->region_id = 0;
  765. memdev->range_index = 6+1;
  766. memdev->region_index = 0+1;
  767. memdev->region_size = 0;
  768. memdev->region_offset = 0;
  769. memdev->address = 0;
  770. memdev->interleave_index = 0;
  771. memdev->interleave_ways = 1;
  772. /* mem-region11 (spa/bdw1, dimm1) */
  773. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 11;
  774. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  775. memdev->header.length = sizeof(*memdev);
  776. memdev->device_handle = handle[1];
  777. memdev->physical_id = 1;
  778. memdev->region_id = 0;
  779. memdev->range_index = 7+1;
  780. memdev->region_index = 1+1;
  781. memdev->region_size = 0;
  782. memdev->region_offset = 0;
  783. memdev->address = 0;
  784. memdev->interleave_index = 0;
  785. memdev->interleave_ways = 1;
  786. /* mem-region12 (spa/bdw2, dimm2) */
  787. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 12;
  788. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  789. memdev->header.length = sizeof(*memdev);
  790. memdev->device_handle = handle[2];
  791. memdev->physical_id = 2;
  792. memdev->region_id = 0;
  793. memdev->range_index = 8+1;
  794. memdev->region_index = 2+1;
  795. memdev->region_size = 0;
  796. memdev->region_offset = 0;
  797. memdev->address = 0;
  798. memdev->interleave_index = 0;
  799. memdev->interleave_ways = 1;
  800. /* mem-region13 (spa/dcr3, dimm3) */
  801. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 13;
  802. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  803. memdev->header.length = sizeof(*memdev);
  804. memdev->device_handle = handle[3];
  805. memdev->physical_id = 3;
  806. memdev->region_id = 0;
  807. memdev->range_index = 9+1;
  808. memdev->region_index = 3+1;
  809. memdev->region_size = 0;
  810. memdev->region_offset = 0;
  811. memdev->address = 0;
  812. memdev->interleave_index = 0;
  813. memdev->interleave_ways = 1;
  814. offset = offset + sizeof(struct acpi_nfit_memory_map) * 14;
  815. /* dcr-descriptor0: blk */
  816. dcr = nfit_buf + offset;
  817. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  818. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  819. dcr->region_index = 0+1;
  820. dcr->vendor_id = 0xabcd;
  821. dcr->device_id = 0;
  822. dcr->revision_id = 1;
  823. dcr->serial_number = ~handle[0];
  824. dcr->code = NFIT_FIC_BLK;
  825. dcr->windows = 1;
  826. dcr->window_size = DCR_SIZE;
  827. dcr->command_offset = 0;
  828. dcr->command_size = 8;
  829. dcr->status_offset = 8;
  830. dcr->status_size = 4;
  831. /* dcr-descriptor1: blk */
  832. dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region);
  833. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  834. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  835. dcr->region_index = 1+1;
  836. dcr->vendor_id = 0xabcd;
  837. dcr->device_id = 0;
  838. dcr->revision_id = 1;
  839. dcr->serial_number = ~handle[1];
  840. dcr->code = NFIT_FIC_BLK;
  841. dcr->windows = 1;
  842. dcr->window_size = DCR_SIZE;
  843. dcr->command_offset = 0;
  844. dcr->command_size = 8;
  845. dcr->status_offset = 8;
  846. dcr->status_size = 4;
  847. /* dcr-descriptor2: blk */
  848. dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 2;
  849. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  850. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  851. dcr->region_index = 2+1;
  852. dcr->vendor_id = 0xabcd;
  853. dcr->device_id = 0;
  854. dcr->revision_id = 1;
  855. dcr->serial_number = ~handle[2];
  856. dcr->code = NFIT_FIC_BLK;
  857. dcr->windows = 1;
  858. dcr->window_size = DCR_SIZE;
  859. dcr->command_offset = 0;
  860. dcr->command_size = 8;
  861. dcr->status_offset = 8;
  862. dcr->status_size = 4;
  863. /* dcr-descriptor3: blk */
  864. dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 3;
  865. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  866. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  867. dcr->region_index = 3+1;
  868. dcr->vendor_id = 0xabcd;
  869. dcr->device_id = 0;
  870. dcr->revision_id = 1;
  871. dcr->serial_number = ~handle[3];
  872. dcr->code = NFIT_FIC_BLK;
  873. dcr->windows = 1;
  874. dcr->window_size = DCR_SIZE;
  875. dcr->command_offset = 0;
  876. dcr->command_size = 8;
  877. dcr->status_offset = 8;
  878. dcr->status_size = 4;
  879. offset = offset + sizeof(struct acpi_nfit_control_region) * 4;
  880. /* dcr-descriptor0: pmem */
  881. dcr = nfit_buf + offset;
  882. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  883. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  884. window_size);
  885. dcr->region_index = 4+1;
  886. dcr->vendor_id = 0xabcd;
  887. dcr->device_id = 0;
  888. dcr->revision_id = 1;
  889. dcr->serial_number = ~handle[0];
  890. dcr->code = NFIT_FIC_BYTEN;
  891. dcr->windows = 0;
  892. /* dcr-descriptor1: pmem */
  893. dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
  894. window_size);
  895. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  896. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  897. window_size);
  898. dcr->region_index = 5+1;
  899. dcr->vendor_id = 0xabcd;
  900. dcr->device_id = 0;
  901. dcr->revision_id = 1;
  902. dcr->serial_number = ~handle[1];
  903. dcr->code = NFIT_FIC_BYTEN;
  904. dcr->windows = 0;
  905. /* dcr-descriptor2: pmem */
  906. dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
  907. window_size) * 2;
  908. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  909. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  910. window_size);
  911. dcr->region_index = 6+1;
  912. dcr->vendor_id = 0xabcd;
  913. dcr->device_id = 0;
  914. dcr->revision_id = 1;
  915. dcr->serial_number = ~handle[2];
  916. dcr->code = NFIT_FIC_BYTEN;
  917. dcr->windows = 0;
  918. /* dcr-descriptor3: pmem */
  919. dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
  920. window_size) * 3;
  921. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  922. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  923. window_size);
  924. dcr->region_index = 7+1;
  925. dcr->vendor_id = 0xabcd;
  926. dcr->device_id = 0;
  927. dcr->revision_id = 1;
  928. dcr->serial_number = ~handle[3];
  929. dcr->code = NFIT_FIC_BYTEN;
  930. dcr->windows = 0;
  931. offset = offset + offsetof(struct acpi_nfit_control_region,
  932. window_size) * 4;
  933. /* bdw0 (spa/dcr0, dimm0) */
  934. bdw = nfit_buf + offset;
  935. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  936. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  937. bdw->region_index = 0+1;
  938. bdw->windows = 1;
  939. bdw->offset = 0;
  940. bdw->size = BDW_SIZE;
  941. bdw->capacity = DIMM_SIZE;
  942. bdw->start_address = 0;
  943. /* bdw1 (spa/dcr1, dimm1) */
  944. bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region);
  945. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  946. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  947. bdw->region_index = 1+1;
  948. bdw->windows = 1;
  949. bdw->offset = 0;
  950. bdw->size = BDW_SIZE;
  951. bdw->capacity = DIMM_SIZE;
  952. bdw->start_address = 0;
  953. /* bdw2 (spa/dcr2, dimm2) */
  954. bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 2;
  955. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  956. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  957. bdw->region_index = 2+1;
  958. bdw->windows = 1;
  959. bdw->offset = 0;
  960. bdw->size = BDW_SIZE;
  961. bdw->capacity = DIMM_SIZE;
  962. bdw->start_address = 0;
  963. /* bdw3 (spa/dcr3, dimm3) */
  964. bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 3;
  965. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  966. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  967. bdw->region_index = 3+1;
  968. bdw->windows = 1;
  969. bdw->offset = 0;
  970. bdw->size = BDW_SIZE;
  971. bdw->capacity = DIMM_SIZE;
  972. bdw->start_address = 0;
  973. offset = offset + sizeof(struct acpi_nfit_data_region) * 4;
  974. /* flush0 (dimm0) */
  975. flush = nfit_buf + offset;
  976. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  977. flush->header.length = sizeof(struct acpi_nfit_flush_address);
  978. flush->device_handle = handle[0];
  979. flush->hint_count = 1;
  980. flush->hint_address[0] = t->flush_dma[0];
  981. /* flush1 (dimm1) */
  982. flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 1;
  983. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  984. flush->header.length = sizeof(struct acpi_nfit_flush_address);
  985. flush->device_handle = handle[1];
  986. flush->hint_count = 1;
  987. flush->hint_address[0] = t->flush_dma[1];
  988. /* flush2 (dimm2) */
  989. flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 2;
  990. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  991. flush->header.length = sizeof(struct acpi_nfit_flush_address);
  992. flush->device_handle = handle[2];
  993. flush->hint_count = 1;
  994. flush->hint_address[0] = t->flush_dma[2];
  995. /* flush3 (dimm3) */
  996. flush = nfit_buf + offset + sizeof(struct acpi_nfit_flush_address) * 3;
  997. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  998. flush->header.length = sizeof(struct acpi_nfit_flush_address);
  999. flush->device_handle = handle[3];
  1000. flush->hint_count = 1;
  1001. flush->hint_address[0] = t->flush_dma[3];
  1002. if (t->setup_hotplug) {
  1003. offset = offset + sizeof(struct acpi_nfit_flush_address) * 4;
  1004. /* dcr-descriptor4: blk */
  1005. dcr = nfit_buf + offset;
  1006. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1007. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  1008. dcr->region_index = 8+1;
  1009. dcr->vendor_id = 0xabcd;
  1010. dcr->device_id = 0;
  1011. dcr->revision_id = 1;
  1012. dcr->serial_number = ~handle[4];
  1013. dcr->code = NFIT_FIC_BLK;
  1014. dcr->windows = 1;
  1015. dcr->window_size = DCR_SIZE;
  1016. dcr->command_offset = 0;
  1017. dcr->command_size = 8;
  1018. dcr->status_offset = 8;
  1019. dcr->status_size = 4;
  1020. offset = offset + sizeof(struct acpi_nfit_control_region);
  1021. /* dcr-descriptor4: pmem */
  1022. dcr = nfit_buf + offset;
  1023. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1024. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1025. window_size);
  1026. dcr->region_index = 9+1;
  1027. dcr->vendor_id = 0xabcd;
  1028. dcr->device_id = 0;
  1029. dcr->revision_id = 1;
  1030. dcr->serial_number = ~handle[4];
  1031. dcr->code = NFIT_FIC_BYTEN;
  1032. dcr->windows = 0;
  1033. offset = offset + offsetof(struct acpi_nfit_control_region,
  1034. window_size);
  1035. /* bdw4 (spa/dcr4, dimm4) */
  1036. bdw = nfit_buf + offset;
  1037. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  1038. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  1039. bdw->region_index = 8+1;
  1040. bdw->windows = 1;
  1041. bdw->offset = 0;
  1042. bdw->size = BDW_SIZE;
  1043. bdw->capacity = DIMM_SIZE;
  1044. bdw->start_address = 0;
  1045. offset = offset + sizeof(struct acpi_nfit_data_region);
  1046. /* spa10 (dcr4) dimm4 */
  1047. spa = nfit_buf + offset;
  1048. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1049. spa->header.length = sizeof(*spa);
  1050. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  1051. spa->range_index = 10+1;
  1052. spa->address = t->dcr_dma[4];
  1053. spa->length = DCR_SIZE;
  1054. /*
  1055. * spa11 (single-dimm interleave for hotplug, note storage
  1056. * does not actually alias the related block-data-window
  1057. * regions)
  1058. */
  1059. spa = nfit_buf + offset + sizeof(*spa);
  1060. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1061. spa->header.length = sizeof(*spa);
  1062. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  1063. spa->range_index = 11+1;
  1064. spa->address = t->spa_set_dma[2];
  1065. spa->length = SPA0_SIZE;
  1066. /* spa12 (bdw for dcr4) dimm4 */
  1067. spa = nfit_buf + offset + sizeof(*spa) * 2;
  1068. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1069. spa->header.length = sizeof(*spa);
  1070. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  1071. spa->range_index = 12+1;
  1072. spa->address = t->dimm_dma[4];
  1073. spa->length = DIMM_SIZE;
  1074. offset = offset + sizeof(*spa) * 3;
  1075. /* mem-region14 (spa/dcr4, dimm4) */
  1076. memdev = nfit_buf + offset;
  1077. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1078. memdev->header.length = sizeof(*memdev);
  1079. memdev->device_handle = handle[4];
  1080. memdev->physical_id = 4;
  1081. memdev->region_id = 0;
  1082. memdev->range_index = 10+1;
  1083. memdev->region_index = 8+1;
  1084. memdev->region_size = 0;
  1085. memdev->region_offset = 0;
  1086. memdev->address = 0;
  1087. memdev->interleave_index = 0;
  1088. memdev->interleave_ways = 1;
  1089. /* mem-region15 (spa0, dimm4) */
  1090. memdev = nfit_buf + offset +
  1091. sizeof(struct acpi_nfit_memory_map);
  1092. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1093. memdev->header.length = sizeof(*memdev);
  1094. memdev->device_handle = handle[4];
  1095. memdev->physical_id = 4;
  1096. memdev->region_id = 0;
  1097. memdev->range_index = 11+1;
  1098. memdev->region_index = 9+1;
  1099. memdev->region_size = SPA0_SIZE;
  1100. memdev->region_offset = t->spa_set_dma[2];
  1101. memdev->address = 0;
  1102. memdev->interleave_index = 0;
  1103. memdev->interleave_ways = 1;
  1104. /* mem-region16 (spa/bdw4, dimm4) */
  1105. memdev = nfit_buf + offset +
  1106. sizeof(struct acpi_nfit_memory_map) * 2;
  1107. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1108. memdev->header.length = sizeof(*memdev);
  1109. memdev->device_handle = handle[4];
  1110. memdev->physical_id = 4;
  1111. memdev->region_id = 0;
  1112. memdev->range_index = 12+1;
  1113. memdev->region_index = 8+1;
  1114. memdev->region_size = 0;
  1115. memdev->region_offset = 0;
  1116. memdev->address = 0;
  1117. memdev->interleave_index = 0;
  1118. memdev->interleave_ways = 1;
  1119. offset = offset + sizeof(struct acpi_nfit_memory_map) * 3;
  1120. /* flush3 (dimm4) */
  1121. flush = nfit_buf + offset;
  1122. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  1123. flush->header.length = sizeof(struct acpi_nfit_flush_address);
  1124. flush->device_handle = handle[4];
  1125. flush->hint_count = 1;
  1126. flush->hint_address[0] = t->flush_dma[4];
  1127. }
  1128. post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA0_SIZE);
  1129. acpi_desc = &t->acpi_desc;
  1130. set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_dsm_force_en);
  1131. set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en);
  1132. set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en);
  1133. set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_dsm_force_en);
  1134. set_bit(ND_CMD_ARS_START, &acpi_desc->bus_dsm_force_en);
  1135. set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_dsm_force_en);
  1136. set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_dsm_force_en);
  1137. }
  1138. static void nfit_test1_setup(struct nfit_test *t)
  1139. {
  1140. size_t offset;
  1141. void *nfit_buf = t->nfit_buf;
  1142. struct acpi_nfit_memory_map *memdev;
  1143. struct acpi_nfit_control_region *dcr;
  1144. struct acpi_nfit_system_address *spa;
  1145. struct acpi_nfit_desc *acpi_desc;
  1146. offset = 0;
  1147. /* spa0 (flat range with no bdw aliasing) */
  1148. spa = nfit_buf + offset;
  1149. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1150. spa->header.length = sizeof(*spa);
  1151. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  1152. spa->range_index = 0+1;
  1153. spa->address = t->spa_set_dma[0];
  1154. spa->length = SPA2_SIZE;
  1155. offset += sizeof(*spa);
  1156. /* mem-region0 (spa0, dimm0) */
  1157. memdev = nfit_buf + offset;
  1158. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1159. memdev->header.length = sizeof(*memdev);
  1160. memdev->device_handle = 0;
  1161. memdev->physical_id = 0;
  1162. memdev->region_id = 0;
  1163. memdev->range_index = 0+1;
  1164. memdev->region_index = 0+1;
  1165. memdev->region_size = SPA2_SIZE;
  1166. memdev->region_offset = 0;
  1167. memdev->address = 0;
  1168. memdev->interleave_index = 0;
  1169. memdev->interleave_ways = 1;
  1170. memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED
  1171. | ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED
  1172. | ACPI_NFIT_MEM_NOT_ARMED;
  1173. offset += sizeof(*memdev);
  1174. /* dcr-descriptor0 */
  1175. dcr = nfit_buf + offset;
  1176. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1177. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1178. window_size);
  1179. dcr->region_index = 0+1;
  1180. dcr->vendor_id = 0xabcd;
  1181. dcr->device_id = 0;
  1182. dcr->revision_id = 1;
  1183. dcr->serial_number = ~0;
  1184. dcr->code = NFIT_FIC_BYTE;
  1185. dcr->windows = 0;
  1186. post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA2_SIZE);
  1187. acpi_desc = &t->acpi_desc;
  1188. set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_dsm_force_en);
  1189. set_bit(ND_CMD_ARS_START, &acpi_desc->bus_dsm_force_en);
  1190. set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_dsm_force_en);
  1191. set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_dsm_force_en);
  1192. }
  1193. static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
  1194. void *iobuf, u64 len, int rw)
  1195. {
  1196. struct nfit_blk *nfit_blk = ndbr->blk_provider_data;
  1197. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
  1198. struct nd_region *nd_region = &ndbr->nd_region;
  1199. unsigned int lane;
  1200. lane = nd_region_acquire_lane(nd_region);
  1201. if (rw)
  1202. memcpy(mmio->addr.base + dpa, iobuf, len);
  1203. else {
  1204. memcpy(iobuf, mmio->addr.base + dpa, len);
  1205. /* give us some some coverage of the mmio_flush_range() API */
  1206. mmio_flush_range(mmio->addr.base + dpa, len);
  1207. }
  1208. nd_region_release_lane(nd_region, lane);
  1209. return 0;
  1210. }
  1211. static int nfit_test_probe(struct platform_device *pdev)
  1212. {
  1213. struct nvdimm_bus_descriptor *nd_desc;
  1214. struct acpi_nfit_desc *acpi_desc;
  1215. struct device *dev = &pdev->dev;
  1216. struct nfit_test *nfit_test;
  1217. int rc;
  1218. nfit_test = to_nfit_test(&pdev->dev);
  1219. /* common alloc */
  1220. if (nfit_test->num_dcr) {
  1221. int num = nfit_test->num_dcr;
  1222. nfit_test->dimm = devm_kcalloc(dev, num, sizeof(void *),
  1223. GFP_KERNEL);
  1224. nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
  1225. GFP_KERNEL);
  1226. nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *),
  1227. GFP_KERNEL);
  1228. nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
  1229. GFP_KERNEL);
  1230. nfit_test->label = devm_kcalloc(dev, num, sizeof(void *),
  1231. GFP_KERNEL);
  1232. nfit_test->label_dma = devm_kcalloc(dev, num,
  1233. sizeof(dma_addr_t), GFP_KERNEL);
  1234. nfit_test->dcr = devm_kcalloc(dev, num,
  1235. sizeof(struct nfit_test_dcr *), GFP_KERNEL);
  1236. nfit_test->dcr_dma = devm_kcalloc(dev, num,
  1237. sizeof(dma_addr_t), GFP_KERNEL);
  1238. if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label
  1239. && nfit_test->label_dma && nfit_test->dcr
  1240. && nfit_test->dcr_dma && nfit_test->flush
  1241. && nfit_test->flush_dma)
  1242. /* pass */;
  1243. else
  1244. return -ENOMEM;
  1245. }
  1246. if (nfit_test->num_pm) {
  1247. int num = nfit_test->num_pm;
  1248. nfit_test->spa_set = devm_kcalloc(dev, num, sizeof(void *),
  1249. GFP_KERNEL);
  1250. nfit_test->spa_set_dma = devm_kcalloc(dev, num,
  1251. sizeof(dma_addr_t), GFP_KERNEL);
  1252. if (nfit_test->spa_set && nfit_test->spa_set_dma)
  1253. /* pass */;
  1254. else
  1255. return -ENOMEM;
  1256. }
  1257. /* per-nfit specific alloc */
  1258. if (nfit_test->alloc(nfit_test))
  1259. return -ENOMEM;
  1260. nfit_test->setup(nfit_test);
  1261. acpi_desc = &nfit_test->acpi_desc;
  1262. acpi_nfit_desc_init(acpi_desc, &pdev->dev);
  1263. acpi_desc->nfit = nfit_test->nfit_buf;
  1264. acpi_desc->blk_do_io = nfit_test_blk_do_io;
  1265. nd_desc = &acpi_desc->nd_desc;
  1266. nd_desc->provider_name = NULL;
  1267. nd_desc->ndctl = nfit_test_ctl;
  1268. acpi_desc->nvdimm_bus = nvdimm_bus_register(&pdev->dev, nd_desc);
  1269. if (!acpi_desc->nvdimm_bus)
  1270. return -ENXIO;
  1271. rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_size);
  1272. if (rc) {
  1273. nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
  1274. return rc;
  1275. }
  1276. if (nfit_test->setup != nfit_test0_setup)
  1277. return 0;
  1278. nfit_test->setup_hotplug = 1;
  1279. nfit_test->setup(nfit_test);
  1280. rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_size);
  1281. if (rc) {
  1282. nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
  1283. return rc;
  1284. }
  1285. return 0;
  1286. }
  1287. static int nfit_test_remove(struct platform_device *pdev)
  1288. {
  1289. struct nfit_test *nfit_test = to_nfit_test(&pdev->dev);
  1290. struct acpi_nfit_desc *acpi_desc = &nfit_test->acpi_desc;
  1291. nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
  1292. return 0;
  1293. }
  1294. static void nfit_test_release(struct device *dev)
  1295. {
  1296. struct nfit_test *nfit_test = to_nfit_test(dev);
  1297. kfree(nfit_test);
  1298. }
  1299. static const struct platform_device_id nfit_test_id[] = {
  1300. { KBUILD_MODNAME },
  1301. { },
  1302. };
  1303. static struct platform_driver nfit_test_driver = {
  1304. .probe = nfit_test_probe,
  1305. .remove = nfit_test_remove,
  1306. .driver = {
  1307. .name = KBUILD_MODNAME,
  1308. },
  1309. .id_table = nfit_test_id,
  1310. };
  1311. #ifdef CONFIG_CMA_SIZE_MBYTES
  1312. #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
  1313. #else
  1314. #define CMA_SIZE_MBYTES 0
  1315. #endif
  1316. static __init int nfit_test_init(void)
  1317. {
  1318. int rc, i;
  1319. nfit_test_setup(nfit_test_lookup);
  1320. for (i = 0; i < NUM_NFITS; i++) {
  1321. struct nfit_test *nfit_test;
  1322. struct platform_device *pdev;
  1323. static int once;
  1324. nfit_test = kzalloc(sizeof(*nfit_test), GFP_KERNEL);
  1325. if (!nfit_test) {
  1326. rc = -ENOMEM;
  1327. goto err_register;
  1328. }
  1329. INIT_LIST_HEAD(&nfit_test->resources);
  1330. switch (i) {
  1331. case 0:
  1332. nfit_test->num_pm = NUM_PM;
  1333. nfit_test->num_dcr = NUM_DCR;
  1334. nfit_test->alloc = nfit_test0_alloc;
  1335. nfit_test->setup = nfit_test0_setup;
  1336. break;
  1337. case 1:
  1338. nfit_test->num_pm = 1;
  1339. nfit_test->alloc = nfit_test1_alloc;
  1340. nfit_test->setup = nfit_test1_setup;
  1341. break;
  1342. default:
  1343. rc = -EINVAL;
  1344. goto err_register;
  1345. }
  1346. pdev = &nfit_test->pdev;
  1347. pdev->name = KBUILD_MODNAME;
  1348. pdev->id = i;
  1349. pdev->dev.release = nfit_test_release;
  1350. rc = platform_device_register(pdev);
  1351. if (rc) {
  1352. put_device(&pdev->dev);
  1353. goto err_register;
  1354. }
  1355. rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  1356. if (rc)
  1357. goto err_register;
  1358. instances[i] = nfit_test;
  1359. if (!once++) {
  1360. dma_addr_t dma;
  1361. void *buf;
  1362. buf = dma_alloc_coherent(&pdev->dev, SZ_128M, &dma,
  1363. GFP_KERNEL);
  1364. if (!buf) {
  1365. rc = -ENOMEM;
  1366. dev_warn(&pdev->dev, "need 128M of free cma\n");
  1367. goto err_register;
  1368. }
  1369. dma_free_coherent(&pdev->dev, SZ_128M, buf, dma);
  1370. }
  1371. }
  1372. rc = platform_driver_register(&nfit_test_driver);
  1373. if (rc)
  1374. goto err_register;
  1375. return 0;
  1376. err_register:
  1377. for (i = 0; i < NUM_NFITS; i++)
  1378. if (instances[i])
  1379. platform_device_unregister(&instances[i]->pdev);
  1380. nfit_test_teardown();
  1381. return rc;
  1382. }
  1383. static __exit void nfit_test_exit(void)
  1384. {
  1385. int i;
  1386. platform_driver_unregister(&nfit_test_driver);
  1387. for (i = 0; i < NUM_NFITS; i++)
  1388. platform_device_unregister(&instances[i]->pdev);
  1389. nfit_test_teardown();
  1390. }
  1391. module_init(nfit_test_init);
  1392. module_exit(nfit_test_exit);
  1393. MODULE_LICENSE("GPL v2");
  1394. MODULE_AUTHOR("Intel Corporation");