nfit.c 78 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959
  1. /*
  2. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/platform_device.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/workqueue.h>
  17. #include <linux/libnvdimm.h>
  18. #include <linux/genalloc.h>
  19. #include <linux/vmalloc.h>
  20. #include <linux/device.h>
  21. #include <linux/module.h>
  22. #include <linux/mutex.h>
  23. #include <linux/ndctl.h>
  24. #include <linux/sizes.h>
  25. #include <linux/list.h>
  26. #include <linux/slab.h>
  27. #include <nd-core.h>
  28. #include <intel.h>
  29. #include <nfit.h>
  30. #include <nd.h>
  31. #include "nfit_test.h"
  32. #include "../watermark.h"
  33. #include <asm/mcsafe_test.h>
  34. /*
  35. * Generate an NFIT table to describe the following topology:
  36. *
  37. * BUS0: Interleaved PMEM regions, and aliasing with BLK regions
  38. *
  39. * (a) (b) DIMM BLK-REGION
  40. * +----------+--------------+----------+---------+
  41. * +------+ | blk2.0 | pm0.0 | blk2.1 | pm1.0 | 0 region2
  42. * | imc0 +--+- - - - - region0 - - - -+----------+ +
  43. * +--+---+ | blk3.0 | pm0.0 | blk3.1 | pm1.0 | 1 region3
  44. * | +----------+--------------v----------v v
  45. * +--+---+ | |
  46. * | cpu0 | region1
  47. * +--+---+ | |
  48. * | +-------------------------^----------^ ^
  49. * +--+---+ | blk4.0 | pm1.0 | 2 region4
  50. * | imc1 +--+-------------------------+----------+ +
  51. * +------+ | blk5.0 | pm1.0 | 3 region5
  52. * +-------------------------+----------+-+-------+
  53. *
  54. * +--+---+
  55. * | cpu1 |
  56. * +--+---+ (Hotplug DIMM)
  57. * | +----------------------------------------------+
  58. * +--+---+ | blk6.0/pm7.0 | 4 region6/7
  59. * | imc0 +--+----------------------------------------------+
  60. * +------+
  61. *
  62. *
  63. * *) In this layout we have four dimms and two memory controllers in one
  64. * socket. Each unique interface (BLK or PMEM) to DPA space
  65. * is identified by a region device with a dynamically assigned id.
  66. *
  67. * *) The first portion of dimm0 and dimm1 are interleaved as REGION0.
  68. * A single PMEM namespace "pm0.0" is created using half of the
  69. * REGION0 SPA-range. REGION0 spans dimm0 and dimm1. PMEM namespace
  70. * allocate from from the bottom of a region. The unallocated
  71. * portion of REGION0 aliases with REGION2 and REGION3. That
  72. * unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and
  73. * "blk3.0") starting at the base of each DIMM to offset (a) in those
  74. * DIMMs. "pm0.0", "blk2.0" and "blk3.0" are free-form readable
  75. * names that can be assigned to a namespace.
  76. *
  77. * *) In the last portion of dimm0 and dimm1 we have an interleaved
  78. * SPA range, REGION1, that spans those two dimms as well as dimm2
  79. * and dimm3. Some of REGION1 allocated to a PMEM namespace named
  80. * "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each
  81. * dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and
  82. * "blk5.0".
  83. *
  84. * *) The portion of dimm2 and dimm3 that do not participate in the
  85. * REGION1 interleaved SPA range (i.e. the DPA address below offset
  86. * (b) are also included in the "blk4.0" and "blk5.0" namespaces.
  87. * Note, that BLK namespaces need not be contiguous in DPA-space, and
  88. * can consume aliased capacity from multiple interleave sets.
  89. *
  90. * BUS1: Legacy NVDIMM (single contiguous range)
  91. *
  92. * region2
  93. * +---------------------+
  94. * |---------------------|
  95. * || pm2.0 ||
  96. * |---------------------|
  97. * +---------------------+
  98. *
  99. * *) A NFIT-table may describe a simple system-physical-address range
  100. * with no BLK aliasing. This type of region may optionally
  101. * reference an NVDIMM.
  102. */
  103. enum {
  104. NUM_PM = 3,
  105. NUM_DCR = 5,
  106. NUM_HINTS = 8,
  107. NUM_BDW = NUM_DCR,
  108. NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW,
  109. NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */
  110. + 4 /* spa1 iset */ + 1 /* spa11 iset */,
  111. DIMM_SIZE = SZ_32M,
  112. LABEL_SIZE = SZ_128K,
  113. SPA_VCD_SIZE = SZ_4M,
  114. SPA0_SIZE = DIMM_SIZE,
  115. SPA1_SIZE = DIMM_SIZE*2,
  116. SPA2_SIZE = DIMM_SIZE,
  117. BDW_SIZE = 64 << 8,
  118. DCR_SIZE = 12,
  119. NUM_NFITS = 2, /* permit testing multiple NFITs per system */
  120. };
  121. struct nfit_test_dcr {
  122. __le64 bdw_addr;
  123. __le32 bdw_status;
  124. __u8 aperature[BDW_SIZE];
  125. };
  126. #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
  127. (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
  128. | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
  129. static u32 handle[] = {
  130. [0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
  131. [1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
  132. [2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
  133. [3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
  134. [4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
  135. [5] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 0),
  136. [6] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 1),
  137. };
  138. static unsigned long dimm_fail_cmd_flags[ARRAY_SIZE(handle)];
  139. static int dimm_fail_cmd_code[ARRAY_SIZE(handle)];
  140. static const struct nd_intel_smart smart_def = {
  141. .flags = ND_INTEL_SMART_HEALTH_VALID
  142. | ND_INTEL_SMART_SPARES_VALID
  143. | ND_INTEL_SMART_ALARM_VALID
  144. | ND_INTEL_SMART_USED_VALID
  145. | ND_INTEL_SMART_SHUTDOWN_VALID
  146. | ND_INTEL_SMART_SHUTDOWN_COUNT_VALID
  147. | ND_INTEL_SMART_MTEMP_VALID
  148. | ND_INTEL_SMART_CTEMP_VALID,
  149. .health = ND_INTEL_SMART_NON_CRITICAL_HEALTH,
  150. .media_temperature = 23 * 16,
  151. .ctrl_temperature = 25 * 16,
  152. .pmic_temperature = 40 * 16,
  153. .spares = 75,
  154. .alarm_flags = ND_INTEL_SMART_SPARE_TRIP
  155. | ND_INTEL_SMART_TEMP_TRIP,
  156. .ait_status = 1,
  157. .life_used = 5,
  158. .shutdown_state = 0,
  159. .shutdown_count = 42,
  160. .vendor_size = 0,
  161. };
  162. struct nfit_test_fw {
  163. enum intel_fw_update_state state;
  164. u32 context;
  165. u64 version;
  166. u32 size_received;
  167. u64 end_time;
  168. };
  169. struct nfit_test {
  170. struct acpi_nfit_desc acpi_desc;
  171. struct platform_device pdev;
  172. struct list_head resources;
  173. void *nfit_buf;
  174. dma_addr_t nfit_dma;
  175. size_t nfit_size;
  176. size_t nfit_filled;
  177. int dcr_idx;
  178. int num_dcr;
  179. int num_pm;
  180. void **dimm;
  181. dma_addr_t *dimm_dma;
  182. void **flush;
  183. dma_addr_t *flush_dma;
  184. void **label;
  185. dma_addr_t *label_dma;
  186. void **spa_set;
  187. dma_addr_t *spa_set_dma;
  188. struct nfit_test_dcr **dcr;
  189. dma_addr_t *dcr_dma;
  190. int (*alloc)(struct nfit_test *t);
  191. void (*setup)(struct nfit_test *t);
  192. int setup_hotplug;
  193. union acpi_object **_fit;
  194. dma_addr_t _fit_dma;
  195. struct ars_state {
  196. struct nd_cmd_ars_status *ars_status;
  197. unsigned long deadline;
  198. spinlock_t lock;
  199. } ars_state;
  200. struct device *dimm_dev[ARRAY_SIZE(handle)];
  201. struct nd_intel_smart *smart;
  202. struct nd_intel_smart_threshold *smart_threshold;
  203. struct badrange badrange;
  204. struct work_struct work;
  205. struct nfit_test_fw *fw;
  206. };
  207. static struct workqueue_struct *nfit_wq;
  208. static struct gen_pool *nfit_pool;
  209. static struct nfit_test *to_nfit_test(struct device *dev)
  210. {
  211. struct platform_device *pdev = to_platform_device(dev);
  212. return container_of(pdev, struct nfit_test, pdev);
  213. }
  214. static int nd_intel_test_get_fw_info(struct nfit_test *t,
  215. struct nd_intel_fw_info *nd_cmd, unsigned int buf_len,
  216. int idx)
  217. {
  218. struct device *dev = &t->pdev.dev;
  219. struct nfit_test_fw *fw = &t->fw[idx];
  220. dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p, buf_len: %u, idx: %d\n",
  221. __func__, t, nd_cmd, buf_len, idx);
  222. if (buf_len < sizeof(*nd_cmd))
  223. return -EINVAL;
  224. nd_cmd->status = 0;
  225. nd_cmd->storage_size = INTEL_FW_STORAGE_SIZE;
  226. nd_cmd->max_send_len = INTEL_FW_MAX_SEND_LEN;
  227. nd_cmd->query_interval = INTEL_FW_QUERY_INTERVAL;
  228. nd_cmd->max_query_time = INTEL_FW_QUERY_MAX_TIME;
  229. nd_cmd->update_cap = 0;
  230. nd_cmd->fis_version = INTEL_FW_FIS_VERSION;
  231. nd_cmd->run_version = 0;
  232. nd_cmd->updated_version = fw->version;
  233. return 0;
  234. }
  235. static int nd_intel_test_start_update(struct nfit_test *t,
  236. struct nd_intel_fw_start *nd_cmd, unsigned int buf_len,
  237. int idx)
  238. {
  239. struct device *dev = &t->pdev.dev;
  240. struct nfit_test_fw *fw = &t->fw[idx];
  241. dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
  242. __func__, t, nd_cmd, buf_len, idx);
  243. if (buf_len < sizeof(*nd_cmd))
  244. return -EINVAL;
  245. if (fw->state != FW_STATE_NEW) {
  246. /* extended status, FW update in progress */
  247. nd_cmd->status = 0x10007;
  248. return 0;
  249. }
  250. fw->state = FW_STATE_IN_PROGRESS;
  251. fw->context++;
  252. fw->size_received = 0;
  253. nd_cmd->status = 0;
  254. nd_cmd->context = fw->context;
  255. dev_dbg(dev, "%s: context issued: %#x\n", __func__, nd_cmd->context);
  256. return 0;
  257. }
  258. static int nd_intel_test_send_data(struct nfit_test *t,
  259. struct nd_intel_fw_send_data *nd_cmd, unsigned int buf_len,
  260. int idx)
  261. {
  262. struct device *dev = &t->pdev.dev;
  263. struct nfit_test_fw *fw = &t->fw[idx];
  264. u32 *status = (u32 *)&nd_cmd->data[nd_cmd->length];
  265. dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
  266. __func__, t, nd_cmd, buf_len, idx);
  267. if (buf_len < sizeof(*nd_cmd))
  268. return -EINVAL;
  269. dev_dbg(dev, "%s: cmd->status: %#x\n", __func__, *status);
  270. dev_dbg(dev, "%s: cmd->data[0]: %#x\n", __func__, nd_cmd->data[0]);
  271. dev_dbg(dev, "%s: cmd->data[%u]: %#x\n", __func__, nd_cmd->length-1,
  272. nd_cmd->data[nd_cmd->length-1]);
  273. if (fw->state != FW_STATE_IN_PROGRESS) {
  274. dev_dbg(dev, "%s: not in IN_PROGRESS state\n", __func__);
  275. *status = 0x5;
  276. return 0;
  277. }
  278. if (nd_cmd->context != fw->context) {
  279. dev_dbg(dev, "%s: incorrect context: in: %#x correct: %#x\n",
  280. __func__, nd_cmd->context, fw->context);
  281. *status = 0x10007;
  282. return 0;
  283. }
  284. /*
  285. * check offset + len > size of fw storage
  286. * check length is > max send length
  287. */
  288. if (nd_cmd->offset + nd_cmd->length > INTEL_FW_STORAGE_SIZE ||
  289. nd_cmd->length > INTEL_FW_MAX_SEND_LEN) {
  290. *status = 0x3;
  291. dev_dbg(dev, "%s: buffer boundary violation\n", __func__);
  292. return 0;
  293. }
  294. fw->size_received += nd_cmd->length;
  295. dev_dbg(dev, "%s: copying %u bytes, %u bytes so far\n",
  296. __func__, nd_cmd->length, fw->size_received);
  297. *status = 0;
  298. return 0;
  299. }
  300. static int nd_intel_test_finish_fw(struct nfit_test *t,
  301. struct nd_intel_fw_finish_update *nd_cmd,
  302. unsigned int buf_len, int idx)
  303. {
  304. struct device *dev = &t->pdev.dev;
  305. struct nfit_test_fw *fw = &t->fw[idx];
  306. dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
  307. __func__, t, nd_cmd, buf_len, idx);
  308. if (fw->state == FW_STATE_UPDATED) {
  309. /* update already done, need cold boot */
  310. nd_cmd->status = 0x20007;
  311. return 0;
  312. }
  313. dev_dbg(dev, "%s: context: %#x ctrl_flags: %#x\n",
  314. __func__, nd_cmd->context, nd_cmd->ctrl_flags);
  315. switch (nd_cmd->ctrl_flags) {
  316. case 0: /* finish */
  317. if (nd_cmd->context != fw->context) {
  318. dev_dbg(dev, "%s: incorrect context: in: %#x correct: %#x\n",
  319. __func__, nd_cmd->context,
  320. fw->context);
  321. nd_cmd->status = 0x10007;
  322. return 0;
  323. }
  324. nd_cmd->status = 0;
  325. fw->state = FW_STATE_VERIFY;
  326. /* set 1 second of time for firmware "update" */
  327. fw->end_time = jiffies + HZ;
  328. break;
  329. case 1: /* abort */
  330. fw->size_received = 0;
  331. /* successfully aborted status */
  332. nd_cmd->status = 0x40007;
  333. fw->state = FW_STATE_NEW;
  334. dev_dbg(dev, "%s: abort successful\n", __func__);
  335. break;
  336. default: /* bad control flag */
  337. dev_warn(dev, "%s: unknown control flag: %#x\n",
  338. __func__, nd_cmd->ctrl_flags);
  339. return -EINVAL;
  340. }
  341. return 0;
  342. }
  343. static int nd_intel_test_finish_query(struct nfit_test *t,
  344. struct nd_intel_fw_finish_query *nd_cmd,
  345. unsigned int buf_len, int idx)
  346. {
  347. struct device *dev = &t->pdev.dev;
  348. struct nfit_test_fw *fw = &t->fw[idx];
  349. dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
  350. __func__, t, nd_cmd, buf_len, idx);
  351. if (buf_len < sizeof(*nd_cmd))
  352. return -EINVAL;
  353. if (nd_cmd->context != fw->context) {
  354. dev_dbg(dev, "%s: incorrect context: in: %#x correct: %#x\n",
  355. __func__, nd_cmd->context, fw->context);
  356. nd_cmd->status = 0x10007;
  357. return 0;
  358. }
  359. dev_dbg(dev, "%s context: %#x\n", __func__, nd_cmd->context);
  360. switch (fw->state) {
  361. case FW_STATE_NEW:
  362. nd_cmd->updated_fw_rev = 0;
  363. nd_cmd->status = 0;
  364. dev_dbg(dev, "%s: new state\n", __func__);
  365. break;
  366. case FW_STATE_IN_PROGRESS:
  367. /* sequencing error */
  368. nd_cmd->status = 0x40007;
  369. nd_cmd->updated_fw_rev = 0;
  370. dev_dbg(dev, "%s: sequence error\n", __func__);
  371. break;
  372. case FW_STATE_VERIFY:
  373. if (time_is_after_jiffies64(fw->end_time)) {
  374. nd_cmd->updated_fw_rev = 0;
  375. nd_cmd->status = 0x20007;
  376. dev_dbg(dev, "%s: still verifying\n", __func__);
  377. break;
  378. }
  379. dev_dbg(dev, "%s: transition out verify\n", __func__);
  380. fw->state = FW_STATE_UPDATED;
  381. /* we are going to fall through if it's "done" */
  382. case FW_STATE_UPDATED:
  383. nd_cmd->status = 0;
  384. /* bogus test version */
  385. fw->version = nd_cmd->updated_fw_rev =
  386. INTEL_FW_FAKE_VERSION;
  387. dev_dbg(dev, "%s: updated\n", __func__);
  388. break;
  389. default: /* we should never get here */
  390. return -EINVAL;
  391. }
  392. return 0;
  393. }
  394. static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size *nd_cmd,
  395. unsigned int buf_len)
  396. {
  397. if (buf_len < sizeof(*nd_cmd))
  398. return -EINVAL;
  399. nd_cmd->status = 0;
  400. nd_cmd->config_size = LABEL_SIZE;
  401. nd_cmd->max_xfer = SZ_4K;
  402. return 0;
  403. }
  404. static int nfit_test_cmd_get_config_data(struct nd_cmd_get_config_data_hdr
  405. *nd_cmd, unsigned int buf_len, void *label)
  406. {
  407. unsigned int len, offset = nd_cmd->in_offset;
  408. int rc;
  409. if (buf_len < sizeof(*nd_cmd))
  410. return -EINVAL;
  411. if (offset >= LABEL_SIZE)
  412. return -EINVAL;
  413. if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len)
  414. return -EINVAL;
  415. nd_cmd->status = 0;
  416. len = min(nd_cmd->in_length, LABEL_SIZE - offset);
  417. memcpy(nd_cmd->out_buf, label + offset, len);
  418. rc = buf_len - sizeof(*nd_cmd) - len;
  419. return rc;
  420. }
  421. static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd,
  422. unsigned int buf_len, void *label)
  423. {
  424. unsigned int len, offset = nd_cmd->in_offset;
  425. u32 *status;
  426. int rc;
  427. if (buf_len < sizeof(*nd_cmd))
  428. return -EINVAL;
  429. if (offset >= LABEL_SIZE)
  430. return -EINVAL;
  431. if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len)
  432. return -EINVAL;
  433. status = (void *)nd_cmd + nd_cmd->in_length + sizeof(*nd_cmd);
  434. *status = 0;
  435. len = min(nd_cmd->in_length, LABEL_SIZE - offset);
  436. memcpy(label + offset, nd_cmd->in_buf, len);
  437. rc = buf_len - sizeof(*nd_cmd) - (len + 4);
  438. return rc;
  439. }
  440. #define NFIT_TEST_CLEAR_ERR_UNIT 256
  441. static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd,
  442. unsigned int buf_len)
  443. {
  444. int ars_recs;
  445. if (buf_len < sizeof(*nd_cmd))
  446. return -EINVAL;
  447. /* for testing, only store up to n records that fit within 4k */
  448. ars_recs = SZ_4K / sizeof(struct nd_ars_record);
  449. nd_cmd->max_ars_out = sizeof(struct nd_cmd_ars_status)
  450. + ars_recs * sizeof(struct nd_ars_record);
  451. nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16;
  452. nd_cmd->clear_err_unit = NFIT_TEST_CLEAR_ERR_UNIT;
  453. return 0;
  454. }
  455. static void post_ars_status(struct ars_state *ars_state,
  456. struct badrange *badrange, u64 addr, u64 len)
  457. {
  458. struct nd_cmd_ars_status *ars_status;
  459. struct nd_ars_record *ars_record;
  460. struct badrange_entry *be;
  461. u64 end = addr + len - 1;
  462. int i = 0;
  463. ars_state->deadline = jiffies + 1*HZ;
  464. ars_status = ars_state->ars_status;
  465. ars_status->status = 0;
  466. ars_status->address = addr;
  467. ars_status->length = len;
  468. ars_status->type = ND_ARS_PERSISTENT;
  469. spin_lock(&badrange->lock);
  470. list_for_each_entry(be, &badrange->list, list) {
  471. u64 be_end = be->start + be->length - 1;
  472. u64 rstart, rend;
  473. /* skip entries outside the range */
  474. if (be_end < addr || be->start > end)
  475. continue;
  476. rstart = (be->start < addr) ? addr : be->start;
  477. rend = (be_end < end) ? be_end : end;
  478. ars_record = &ars_status->records[i];
  479. ars_record->handle = 0;
  480. ars_record->err_address = rstart;
  481. ars_record->length = rend - rstart + 1;
  482. i++;
  483. }
  484. spin_unlock(&badrange->lock);
  485. ars_status->num_records = i;
  486. ars_status->out_length = sizeof(struct nd_cmd_ars_status)
  487. + i * sizeof(struct nd_ars_record);
  488. }
  489. static int nfit_test_cmd_ars_start(struct nfit_test *t,
  490. struct ars_state *ars_state,
  491. struct nd_cmd_ars_start *ars_start, unsigned int buf_len,
  492. int *cmd_rc)
  493. {
  494. if (buf_len < sizeof(*ars_start))
  495. return -EINVAL;
  496. spin_lock(&ars_state->lock);
  497. if (time_before(jiffies, ars_state->deadline)) {
  498. ars_start->status = NFIT_ARS_START_BUSY;
  499. *cmd_rc = -EBUSY;
  500. } else {
  501. ars_start->status = 0;
  502. ars_start->scrub_time = 1;
  503. post_ars_status(ars_state, &t->badrange, ars_start->address,
  504. ars_start->length);
  505. *cmd_rc = 0;
  506. }
  507. spin_unlock(&ars_state->lock);
  508. return 0;
  509. }
  510. static int nfit_test_cmd_ars_status(struct ars_state *ars_state,
  511. struct nd_cmd_ars_status *ars_status, unsigned int buf_len,
  512. int *cmd_rc)
  513. {
  514. if (buf_len < ars_state->ars_status->out_length)
  515. return -EINVAL;
  516. spin_lock(&ars_state->lock);
  517. if (time_before(jiffies, ars_state->deadline)) {
  518. memset(ars_status, 0, buf_len);
  519. ars_status->status = NFIT_ARS_STATUS_BUSY;
  520. ars_status->out_length = sizeof(*ars_status);
  521. *cmd_rc = -EBUSY;
  522. } else {
  523. memcpy(ars_status, ars_state->ars_status,
  524. ars_state->ars_status->out_length);
  525. *cmd_rc = 0;
  526. }
  527. spin_unlock(&ars_state->lock);
  528. return 0;
  529. }
  530. static int nfit_test_cmd_clear_error(struct nfit_test *t,
  531. struct nd_cmd_clear_error *clear_err,
  532. unsigned int buf_len, int *cmd_rc)
  533. {
  534. const u64 mask = NFIT_TEST_CLEAR_ERR_UNIT - 1;
  535. if (buf_len < sizeof(*clear_err))
  536. return -EINVAL;
  537. if ((clear_err->address & mask) || (clear_err->length & mask))
  538. return -EINVAL;
  539. badrange_forget(&t->badrange, clear_err->address, clear_err->length);
  540. clear_err->status = 0;
  541. clear_err->cleared = clear_err->length;
  542. *cmd_rc = 0;
  543. return 0;
  544. }
  545. struct region_search_spa {
  546. u64 addr;
  547. struct nd_region *region;
  548. };
  549. static int is_region_device(struct device *dev)
  550. {
  551. return !strncmp(dev->kobj.name, "region", 6);
  552. }
  553. static int nfit_test_search_region_spa(struct device *dev, void *data)
  554. {
  555. struct region_search_spa *ctx = data;
  556. struct nd_region *nd_region;
  557. resource_size_t ndr_end;
  558. if (!is_region_device(dev))
  559. return 0;
  560. nd_region = to_nd_region(dev);
  561. ndr_end = nd_region->ndr_start + nd_region->ndr_size;
  562. if (ctx->addr >= nd_region->ndr_start && ctx->addr < ndr_end) {
  563. ctx->region = nd_region;
  564. return 1;
  565. }
  566. return 0;
  567. }
  568. static int nfit_test_search_spa(struct nvdimm_bus *bus,
  569. struct nd_cmd_translate_spa *spa)
  570. {
  571. int ret;
  572. struct nd_region *nd_region = NULL;
  573. struct nvdimm *nvdimm = NULL;
  574. struct nd_mapping *nd_mapping = NULL;
  575. struct region_search_spa ctx = {
  576. .addr = spa->spa,
  577. .region = NULL,
  578. };
  579. u64 dpa;
  580. ret = device_for_each_child(&bus->dev, &ctx,
  581. nfit_test_search_region_spa);
  582. if (!ret)
  583. return -ENODEV;
  584. nd_region = ctx.region;
  585. dpa = ctx.addr - nd_region->ndr_start;
  586. /*
  587. * last dimm is selected for test
  588. */
  589. nd_mapping = &nd_region->mapping[nd_region->ndr_mappings - 1];
  590. nvdimm = nd_mapping->nvdimm;
  591. spa->devices[0].nfit_device_handle = handle[nvdimm->id];
  592. spa->num_nvdimms = 1;
  593. spa->devices[0].dpa = dpa;
  594. return 0;
  595. }
  596. static int nfit_test_cmd_translate_spa(struct nvdimm_bus *bus,
  597. struct nd_cmd_translate_spa *spa, unsigned int buf_len)
  598. {
  599. if (buf_len < spa->translate_length)
  600. return -EINVAL;
  601. if (nfit_test_search_spa(bus, spa) < 0 || !spa->num_nvdimms)
  602. spa->status = 2;
  603. return 0;
  604. }
  605. static int nfit_test_cmd_smart(struct nd_intel_smart *smart, unsigned int buf_len,
  606. struct nd_intel_smart *smart_data)
  607. {
  608. if (buf_len < sizeof(*smart))
  609. return -EINVAL;
  610. memcpy(smart, smart_data, sizeof(*smart));
  611. return 0;
  612. }
  613. static int nfit_test_cmd_smart_threshold(
  614. struct nd_intel_smart_threshold *out,
  615. unsigned int buf_len,
  616. struct nd_intel_smart_threshold *smart_t)
  617. {
  618. if (buf_len < sizeof(*smart_t))
  619. return -EINVAL;
  620. memcpy(out, smart_t, sizeof(*smart_t));
  621. return 0;
  622. }
  623. static void smart_notify(struct device *bus_dev,
  624. struct device *dimm_dev, struct nd_intel_smart *smart,
  625. struct nd_intel_smart_threshold *thresh)
  626. {
  627. dev_dbg(dimm_dev, "%s: alarm: %#x spares: %d (%d) mtemp: %d (%d) ctemp: %d (%d)\n",
  628. __func__, thresh->alarm_control, thresh->spares,
  629. smart->spares, thresh->media_temperature,
  630. smart->media_temperature, thresh->ctrl_temperature,
  631. smart->ctrl_temperature);
  632. if (((thresh->alarm_control & ND_INTEL_SMART_SPARE_TRIP)
  633. && smart->spares
  634. <= thresh->spares)
  635. || ((thresh->alarm_control & ND_INTEL_SMART_TEMP_TRIP)
  636. && smart->media_temperature
  637. >= thresh->media_temperature)
  638. || ((thresh->alarm_control & ND_INTEL_SMART_CTEMP_TRIP)
  639. && smart->ctrl_temperature
  640. >= thresh->ctrl_temperature)
  641. || (smart->health != ND_INTEL_SMART_NON_CRITICAL_HEALTH)
  642. || (smart->shutdown_state != 0)) {
  643. device_lock(bus_dev);
  644. __acpi_nvdimm_notify(dimm_dev, 0x81);
  645. device_unlock(bus_dev);
  646. }
  647. }
  648. static int nfit_test_cmd_smart_set_threshold(
  649. struct nd_intel_smart_set_threshold *in,
  650. unsigned int buf_len,
  651. struct nd_intel_smart_threshold *thresh,
  652. struct nd_intel_smart *smart,
  653. struct device *bus_dev, struct device *dimm_dev)
  654. {
  655. unsigned int size;
  656. size = sizeof(*in) - 4;
  657. if (buf_len < size)
  658. return -EINVAL;
  659. memcpy(thresh->data, in, size);
  660. in->status = 0;
  661. smart_notify(bus_dev, dimm_dev, smart, thresh);
  662. return 0;
  663. }
  664. static int nfit_test_cmd_smart_inject(
  665. struct nd_intel_smart_inject *inj,
  666. unsigned int buf_len,
  667. struct nd_intel_smart_threshold *thresh,
  668. struct nd_intel_smart *smart,
  669. struct device *bus_dev, struct device *dimm_dev)
  670. {
  671. if (buf_len != sizeof(*inj))
  672. return -EINVAL;
  673. if (inj->flags & ND_INTEL_SMART_INJECT_MTEMP) {
  674. if (inj->mtemp_enable)
  675. smart->media_temperature = inj->media_temperature;
  676. else
  677. smart->media_temperature = smart_def.media_temperature;
  678. }
  679. if (inj->flags & ND_INTEL_SMART_INJECT_SPARE) {
  680. if (inj->spare_enable)
  681. smart->spares = inj->spares;
  682. else
  683. smart->spares = smart_def.spares;
  684. }
  685. if (inj->flags & ND_INTEL_SMART_INJECT_FATAL) {
  686. if (inj->fatal_enable)
  687. smart->health = ND_INTEL_SMART_FATAL_HEALTH;
  688. else
  689. smart->health = ND_INTEL_SMART_NON_CRITICAL_HEALTH;
  690. }
  691. if (inj->flags & ND_INTEL_SMART_INJECT_SHUTDOWN) {
  692. if (inj->unsafe_shutdown_enable) {
  693. smart->shutdown_state = 1;
  694. smart->shutdown_count++;
  695. } else
  696. smart->shutdown_state = 0;
  697. }
  698. inj->status = 0;
  699. smart_notify(bus_dev, dimm_dev, smart, thresh);
  700. return 0;
  701. }
  702. static void uc_error_notify(struct work_struct *work)
  703. {
  704. struct nfit_test *t = container_of(work, typeof(*t), work);
  705. __acpi_nfit_notify(&t->pdev.dev, t, NFIT_NOTIFY_UC_MEMORY_ERROR);
  706. }
  707. static int nfit_test_cmd_ars_error_inject(struct nfit_test *t,
  708. struct nd_cmd_ars_err_inj *err_inj, unsigned int buf_len)
  709. {
  710. int rc;
  711. if (buf_len != sizeof(*err_inj)) {
  712. rc = -EINVAL;
  713. goto err;
  714. }
  715. if (err_inj->err_inj_spa_range_length <= 0) {
  716. rc = -EINVAL;
  717. goto err;
  718. }
  719. rc = badrange_add(&t->badrange, err_inj->err_inj_spa_range_base,
  720. err_inj->err_inj_spa_range_length);
  721. if (rc < 0)
  722. goto err;
  723. if (err_inj->err_inj_options & (1 << ND_ARS_ERR_INJ_OPT_NOTIFY))
  724. queue_work(nfit_wq, &t->work);
  725. err_inj->status = 0;
  726. return 0;
  727. err:
  728. err_inj->status = NFIT_ARS_INJECT_INVALID;
  729. return rc;
  730. }
  731. static int nfit_test_cmd_ars_inject_clear(struct nfit_test *t,
  732. struct nd_cmd_ars_err_inj_clr *err_clr, unsigned int buf_len)
  733. {
  734. int rc;
  735. if (buf_len != sizeof(*err_clr)) {
  736. rc = -EINVAL;
  737. goto err;
  738. }
  739. if (err_clr->err_inj_clr_spa_range_length <= 0) {
  740. rc = -EINVAL;
  741. goto err;
  742. }
  743. badrange_forget(&t->badrange, err_clr->err_inj_clr_spa_range_base,
  744. err_clr->err_inj_clr_spa_range_length);
  745. err_clr->status = 0;
  746. return 0;
  747. err:
  748. err_clr->status = NFIT_ARS_INJECT_INVALID;
  749. return rc;
  750. }
  751. static int nfit_test_cmd_ars_inject_status(struct nfit_test *t,
  752. struct nd_cmd_ars_err_inj_stat *err_stat,
  753. unsigned int buf_len)
  754. {
  755. struct badrange_entry *be;
  756. int max = SZ_4K / sizeof(struct nd_error_stat_query_record);
  757. int i = 0;
  758. err_stat->status = 0;
  759. spin_lock(&t->badrange.lock);
  760. list_for_each_entry(be, &t->badrange.list, list) {
  761. err_stat->record[i].err_inj_stat_spa_range_base = be->start;
  762. err_stat->record[i].err_inj_stat_spa_range_length = be->length;
  763. i++;
  764. if (i > max)
  765. break;
  766. }
  767. spin_unlock(&t->badrange.lock);
  768. err_stat->inj_err_rec_count = i;
  769. return 0;
  770. }
  771. static int nd_intel_test_cmd_set_lss_status(struct nfit_test *t,
  772. struct nd_intel_lss *nd_cmd, unsigned int buf_len)
  773. {
  774. struct device *dev = &t->pdev.dev;
  775. if (buf_len < sizeof(*nd_cmd))
  776. return -EINVAL;
  777. switch (nd_cmd->enable) {
  778. case 0:
  779. nd_cmd->status = 0;
  780. dev_dbg(dev, "%s: Latch System Shutdown Status disabled\n",
  781. __func__);
  782. break;
  783. case 1:
  784. nd_cmd->status = 0;
  785. dev_dbg(dev, "%s: Latch System Shutdown Status enabled\n",
  786. __func__);
  787. break;
  788. default:
  789. dev_warn(dev, "Unknown enable value: %#x\n", nd_cmd->enable);
  790. nd_cmd->status = 0x3;
  791. break;
  792. }
  793. return 0;
  794. }
  795. static int override_return_code(int dimm, unsigned int func, int rc)
  796. {
  797. if ((1 << func) & dimm_fail_cmd_flags[dimm]) {
  798. if (dimm_fail_cmd_code[dimm])
  799. return dimm_fail_cmd_code[dimm];
  800. return -EIO;
  801. }
  802. return rc;
  803. }
  804. static int get_dimm(struct nfit_mem *nfit_mem, unsigned int func)
  805. {
  806. int i;
  807. /* lookup per-dimm data */
  808. for (i = 0; i < ARRAY_SIZE(handle); i++)
  809. if (__to_nfit_memdev(nfit_mem)->device_handle == handle[i])
  810. break;
  811. if (i >= ARRAY_SIZE(handle))
  812. return -ENXIO;
  813. return i;
  814. }
  815. static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
  816. struct nvdimm *nvdimm, unsigned int cmd, void *buf,
  817. unsigned int buf_len, int *cmd_rc)
  818. {
  819. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  820. struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc);
  821. unsigned int func = cmd;
  822. int i, rc = 0, __cmd_rc;
  823. if (!cmd_rc)
  824. cmd_rc = &__cmd_rc;
  825. *cmd_rc = 0;
  826. if (nvdimm) {
  827. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  828. unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
  829. if (!nfit_mem)
  830. return -ENOTTY;
  831. if (cmd == ND_CMD_CALL) {
  832. struct nd_cmd_pkg *call_pkg = buf;
  833. buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out;
  834. buf = (void *) call_pkg->nd_payload;
  835. func = call_pkg->nd_command;
  836. if (call_pkg->nd_family != nfit_mem->family)
  837. return -ENOTTY;
  838. i = get_dimm(nfit_mem, func);
  839. if (i < 0)
  840. return i;
  841. switch (func) {
  842. case ND_INTEL_ENABLE_LSS_STATUS:
  843. rc = nd_intel_test_cmd_set_lss_status(t,
  844. buf, buf_len);
  845. break;
  846. case ND_INTEL_FW_GET_INFO:
  847. rc = nd_intel_test_get_fw_info(t, buf,
  848. buf_len, i - t->dcr_idx);
  849. break;
  850. case ND_INTEL_FW_START_UPDATE:
  851. rc = nd_intel_test_start_update(t, buf,
  852. buf_len, i - t->dcr_idx);
  853. break;
  854. case ND_INTEL_FW_SEND_DATA:
  855. rc = nd_intel_test_send_data(t, buf,
  856. buf_len, i - t->dcr_idx);
  857. break;
  858. case ND_INTEL_FW_FINISH_UPDATE:
  859. rc = nd_intel_test_finish_fw(t, buf,
  860. buf_len, i - t->dcr_idx);
  861. break;
  862. case ND_INTEL_FW_FINISH_QUERY:
  863. rc = nd_intel_test_finish_query(t, buf,
  864. buf_len, i - t->dcr_idx);
  865. break;
  866. case ND_INTEL_SMART:
  867. rc = nfit_test_cmd_smart(buf, buf_len,
  868. &t->smart[i - t->dcr_idx]);
  869. break;
  870. case ND_INTEL_SMART_THRESHOLD:
  871. rc = nfit_test_cmd_smart_threshold(buf,
  872. buf_len,
  873. &t->smart_threshold[i -
  874. t->dcr_idx]);
  875. break;
  876. case ND_INTEL_SMART_SET_THRESHOLD:
  877. rc = nfit_test_cmd_smart_set_threshold(buf,
  878. buf_len,
  879. &t->smart_threshold[i -
  880. t->dcr_idx],
  881. &t->smart[i - t->dcr_idx],
  882. &t->pdev.dev, t->dimm_dev[i]);
  883. break;
  884. case ND_INTEL_SMART_INJECT:
  885. rc = nfit_test_cmd_smart_inject(buf,
  886. buf_len,
  887. &t->smart_threshold[i -
  888. t->dcr_idx],
  889. &t->smart[i - t->dcr_idx],
  890. &t->pdev.dev, t->dimm_dev[i]);
  891. break;
  892. default:
  893. return -ENOTTY;
  894. }
  895. return override_return_code(i, func, rc);
  896. }
  897. if (!test_bit(cmd, &cmd_mask)
  898. || !test_bit(func, &nfit_mem->dsm_mask))
  899. return -ENOTTY;
  900. i = get_dimm(nfit_mem, func);
  901. if (i < 0)
  902. return i;
  903. switch (func) {
  904. case ND_CMD_GET_CONFIG_SIZE:
  905. rc = nfit_test_cmd_get_config_size(buf, buf_len);
  906. break;
  907. case ND_CMD_GET_CONFIG_DATA:
  908. rc = nfit_test_cmd_get_config_data(buf, buf_len,
  909. t->label[i - t->dcr_idx]);
  910. break;
  911. case ND_CMD_SET_CONFIG_DATA:
  912. rc = nfit_test_cmd_set_config_data(buf, buf_len,
  913. t->label[i - t->dcr_idx]);
  914. break;
  915. default:
  916. return -ENOTTY;
  917. }
  918. return override_return_code(i, func, rc);
  919. } else {
  920. struct ars_state *ars_state = &t->ars_state;
  921. struct nd_cmd_pkg *call_pkg = buf;
  922. if (!nd_desc)
  923. return -ENOTTY;
  924. if (cmd == ND_CMD_CALL) {
  925. func = call_pkg->nd_command;
  926. buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out;
  927. buf = (void *) call_pkg->nd_payload;
  928. switch (func) {
  929. case NFIT_CMD_TRANSLATE_SPA:
  930. rc = nfit_test_cmd_translate_spa(
  931. acpi_desc->nvdimm_bus, buf, buf_len);
  932. return rc;
  933. case NFIT_CMD_ARS_INJECT_SET:
  934. rc = nfit_test_cmd_ars_error_inject(t, buf,
  935. buf_len);
  936. return rc;
  937. case NFIT_CMD_ARS_INJECT_CLEAR:
  938. rc = nfit_test_cmd_ars_inject_clear(t, buf,
  939. buf_len);
  940. return rc;
  941. case NFIT_CMD_ARS_INJECT_GET:
  942. rc = nfit_test_cmd_ars_inject_status(t, buf,
  943. buf_len);
  944. return rc;
  945. default:
  946. return -ENOTTY;
  947. }
  948. }
  949. if (!nd_desc || !test_bit(cmd, &nd_desc->cmd_mask))
  950. return -ENOTTY;
  951. switch (func) {
  952. case ND_CMD_ARS_CAP:
  953. rc = nfit_test_cmd_ars_cap(buf, buf_len);
  954. break;
  955. case ND_CMD_ARS_START:
  956. rc = nfit_test_cmd_ars_start(t, ars_state, buf,
  957. buf_len, cmd_rc);
  958. break;
  959. case ND_CMD_ARS_STATUS:
  960. rc = nfit_test_cmd_ars_status(ars_state, buf, buf_len,
  961. cmd_rc);
  962. break;
  963. case ND_CMD_CLEAR_ERROR:
  964. rc = nfit_test_cmd_clear_error(t, buf, buf_len, cmd_rc);
  965. break;
  966. default:
  967. return -ENOTTY;
  968. }
  969. }
  970. return rc;
  971. }
  972. static DEFINE_SPINLOCK(nfit_test_lock);
  973. static struct nfit_test *instances[NUM_NFITS];
  974. static void release_nfit_res(void *data)
  975. {
  976. struct nfit_test_resource *nfit_res = data;
  977. spin_lock(&nfit_test_lock);
  978. list_del(&nfit_res->list);
  979. spin_unlock(&nfit_test_lock);
  980. if (resource_size(&nfit_res->res) >= DIMM_SIZE)
  981. gen_pool_free(nfit_pool, nfit_res->res.start,
  982. resource_size(&nfit_res->res));
  983. vfree(nfit_res->buf);
  984. kfree(nfit_res);
  985. }
  986. static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
  987. void *buf)
  988. {
  989. struct device *dev = &t->pdev.dev;
  990. struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res),
  991. GFP_KERNEL);
  992. int rc;
  993. if (!buf || !nfit_res || !*dma)
  994. goto err;
  995. rc = devm_add_action(dev, release_nfit_res, nfit_res);
  996. if (rc)
  997. goto err;
  998. INIT_LIST_HEAD(&nfit_res->list);
  999. memset(buf, 0, size);
  1000. nfit_res->dev = dev;
  1001. nfit_res->buf = buf;
  1002. nfit_res->res.start = *dma;
  1003. nfit_res->res.end = *dma + size - 1;
  1004. nfit_res->res.name = "NFIT";
  1005. spin_lock_init(&nfit_res->lock);
  1006. INIT_LIST_HEAD(&nfit_res->requests);
  1007. spin_lock(&nfit_test_lock);
  1008. list_add(&nfit_res->list, &t->resources);
  1009. spin_unlock(&nfit_test_lock);
  1010. return nfit_res->buf;
  1011. err:
  1012. if (*dma && size >= DIMM_SIZE)
  1013. gen_pool_free(nfit_pool, *dma, size);
  1014. if (buf)
  1015. vfree(buf);
  1016. kfree(nfit_res);
  1017. return NULL;
  1018. }
  1019. static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma)
  1020. {
  1021. struct genpool_data_align data = {
  1022. .align = SZ_128M,
  1023. };
  1024. void *buf = vmalloc(size);
  1025. if (size >= DIMM_SIZE)
  1026. *dma = gen_pool_alloc_algo(nfit_pool, size,
  1027. gen_pool_first_fit_align, &data);
  1028. else
  1029. *dma = (unsigned long) buf;
  1030. return __test_alloc(t, size, dma, buf);
  1031. }
  1032. static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr)
  1033. {
  1034. int i;
  1035. for (i = 0; i < ARRAY_SIZE(instances); i++) {
  1036. struct nfit_test_resource *n, *nfit_res = NULL;
  1037. struct nfit_test *t = instances[i];
  1038. if (!t)
  1039. continue;
  1040. spin_lock(&nfit_test_lock);
  1041. list_for_each_entry(n, &t->resources, list) {
  1042. if (addr >= n->res.start && (addr < n->res.start
  1043. + resource_size(&n->res))) {
  1044. nfit_res = n;
  1045. break;
  1046. } else if (addr >= (unsigned long) n->buf
  1047. && (addr < (unsigned long) n->buf
  1048. + resource_size(&n->res))) {
  1049. nfit_res = n;
  1050. break;
  1051. }
  1052. }
  1053. spin_unlock(&nfit_test_lock);
  1054. if (nfit_res)
  1055. return nfit_res;
  1056. }
  1057. return NULL;
  1058. }
  1059. static int ars_state_init(struct device *dev, struct ars_state *ars_state)
  1060. {
  1061. /* for testing, only store up to n records that fit within 4k */
  1062. ars_state->ars_status = devm_kzalloc(dev,
  1063. sizeof(struct nd_cmd_ars_status) + SZ_4K, GFP_KERNEL);
  1064. if (!ars_state->ars_status)
  1065. return -ENOMEM;
  1066. spin_lock_init(&ars_state->lock);
  1067. return 0;
  1068. }
  1069. static void put_dimms(void *data)
  1070. {
  1071. struct nfit_test *t = data;
  1072. int i;
  1073. for (i = 0; i < t->num_dcr; i++)
  1074. if (t->dimm_dev[i])
  1075. device_unregister(t->dimm_dev[i]);
  1076. }
  1077. static struct class *nfit_test_dimm;
  1078. static int dimm_name_to_id(struct device *dev)
  1079. {
  1080. int dimm;
  1081. if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1)
  1082. return -ENXIO;
  1083. return dimm;
  1084. }
  1085. static ssize_t handle_show(struct device *dev, struct device_attribute *attr,
  1086. char *buf)
  1087. {
  1088. int dimm = dimm_name_to_id(dev);
  1089. if (dimm < 0)
  1090. return dimm;
  1091. return sprintf(buf, "%#x\n", handle[dimm]);
  1092. }
  1093. DEVICE_ATTR_RO(handle);
  1094. static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr,
  1095. char *buf)
  1096. {
  1097. int dimm = dimm_name_to_id(dev);
  1098. if (dimm < 0)
  1099. return dimm;
  1100. return sprintf(buf, "%#lx\n", dimm_fail_cmd_flags[dimm]);
  1101. }
  1102. static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr,
  1103. const char *buf, size_t size)
  1104. {
  1105. int dimm = dimm_name_to_id(dev);
  1106. unsigned long val;
  1107. ssize_t rc;
  1108. if (dimm < 0)
  1109. return dimm;
  1110. rc = kstrtol(buf, 0, &val);
  1111. if (rc)
  1112. return rc;
  1113. dimm_fail_cmd_flags[dimm] = val;
  1114. return size;
  1115. }
  1116. static DEVICE_ATTR_RW(fail_cmd);
  1117. static ssize_t fail_cmd_code_show(struct device *dev, struct device_attribute *attr,
  1118. char *buf)
  1119. {
  1120. int dimm = dimm_name_to_id(dev);
  1121. if (dimm < 0)
  1122. return dimm;
  1123. return sprintf(buf, "%d\n", dimm_fail_cmd_code[dimm]);
  1124. }
  1125. static ssize_t fail_cmd_code_store(struct device *dev, struct device_attribute *attr,
  1126. const char *buf, size_t size)
  1127. {
  1128. int dimm = dimm_name_to_id(dev);
  1129. unsigned long val;
  1130. ssize_t rc;
  1131. if (dimm < 0)
  1132. return dimm;
  1133. rc = kstrtol(buf, 0, &val);
  1134. if (rc)
  1135. return rc;
  1136. dimm_fail_cmd_code[dimm] = val;
  1137. return size;
  1138. }
  1139. static DEVICE_ATTR_RW(fail_cmd_code);
  1140. static struct attribute *nfit_test_dimm_attributes[] = {
  1141. &dev_attr_fail_cmd.attr,
  1142. &dev_attr_fail_cmd_code.attr,
  1143. &dev_attr_handle.attr,
  1144. NULL,
  1145. };
  1146. static struct attribute_group nfit_test_dimm_attribute_group = {
  1147. .attrs = nfit_test_dimm_attributes,
  1148. };
  1149. static const struct attribute_group *nfit_test_dimm_attribute_groups[] = {
  1150. &nfit_test_dimm_attribute_group,
  1151. NULL,
  1152. };
  1153. static int nfit_test_dimm_init(struct nfit_test *t)
  1154. {
  1155. int i;
  1156. if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t))
  1157. return -ENOMEM;
  1158. for (i = 0; i < t->num_dcr; i++) {
  1159. t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm,
  1160. &t->pdev.dev, 0, NULL,
  1161. nfit_test_dimm_attribute_groups,
  1162. "test_dimm%d", i + t->dcr_idx);
  1163. if (!t->dimm_dev[i])
  1164. return -ENOMEM;
  1165. }
  1166. return 0;
  1167. }
  1168. static void smart_init(struct nfit_test *t)
  1169. {
  1170. int i;
  1171. const struct nd_intel_smart_threshold smart_t_data = {
  1172. .alarm_control = ND_INTEL_SMART_SPARE_TRIP
  1173. | ND_INTEL_SMART_TEMP_TRIP,
  1174. .media_temperature = 40 * 16,
  1175. .ctrl_temperature = 30 * 16,
  1176. .spares = 5,
  1177. };
  1178. for (i = 0; i < t->num_dcr; i++) {
  1179. memcpy(&t->smart[i], &smart_def, sizeof(smart_def));
  1180. memcpy(&t->smart_threshold[i], &smart_t_data,
  1181. sizeof(smart_t_data));
  1182. }
  1183. }
  1184. static int nfit_test0_alloc(struct nfit_test *t)
  1185. {
  1186. size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA
  1187. + sizeof(struct acpi_nfit_memory_map) * NUM_MEM
  1188. + sizeof(struct acpi_nfit_control_region) * NUM_DCR
  1189. + offsetof(struct acpi_nfit_control_region,
  1190. window_size) * NUM_DCR
  1191. + sizeof(struct acpi_nfit_data_region) * NUM_BDW
  1192. + (sizeof(struct acpi_nfit_flush_address)
  1193. + sizeof(u64) * NUM_HINTS) * NUM_DCR
  1194. + sizeof(struct acpi_nfit_capabilities);
  1195. int i;
  1196. t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
  1197. if (!t->nfit_buf)
  1198. return -ENOMEM;
  1199. t->nfit_size = nfit_size;
  1200. t->spa_set[0] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[0]);
  1201. if (!t->spa_set[0])
  1202. return -ENOMEM;
  1203. t->spa_set[1] = test_alloc(t, SPA1_SIZE, &t->spa_set_dma[1]);
  1204. if (!t->spa_set[1])
  1205. return -ENOMEM;
  1206. t->spa_set[2] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[2]);
  1207. if (!t->spa_set[2])
  1208. return -ENOMEM;
  1209. for (i = 0; i < t->num_dcr; i++) {
  1210. t->dimm[i] = test_alloc(t, DIMM_SIZE, &t->dimm_dma[i]);
  1211. if (!t->dimm[i])
  1212. return -ENOMEM;
  1213. t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
  1214. if (!t->label[i])
  1215. return -ENOMEM;
  1216. sprintf(t->label[i], "label%d", i);
  1217. t->flush[i] = test_alloc(t, max(PAGE_SIZE,
  1218. sizeof(u64) * NUM_HINTS),
  1219. &t->flush_dma[i]);
  1220. if (!t->flush[i])
  1221. return -ENOMEM;
  1222. }
  1223. for (i = 0; i < t->num_dcr; i++) {
  1224. t->dcr[i] = test_alloc(t, LABEL_SIZE, &t->dcr_dma[i]);
  1225. if (!t->dcr[i])
  1226. return -ENOMEM;
  1227. }
  1228. t->_fit = test_alloc(t, sizeof(union acpi_object **), &t->_fit_dma);
  1229. if (!t->_fit)
  1230. return -ENOMEM;
  1231. if (nfit_test_dimm_init(t))
  1232. return -ENOMEM;
  1233. smart_init(t);
  1234. return ars_state_init(&t->pdev.dev, &t->ars_state);
  1235. }
  1236. static int nfit_test1_alloc(struct nfit_test *t)
  1237. {
  1238. size_t nfit_size = sizeof(struct acpi_nfit_system_address) * 2
  1239. + sizeof(struct acpi_nfit_memory_map) * 2
  1240. + offsetof(struct acpi_nfit_control_region, window_size) * 2;
  1241. int i;
  1242. t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
  1243. if (!t->nfit_buf)
  1244. return -ENOMEM;
  1245. t->nfit_size = nfit_size;
  1246. t->spa_set[0] = test_alloc(t, SPA2_SIZE, &t->spa_set_dma[0]);
  1247. if (!t->spa_set[0])
  1248. return -ENOMEM;
  1249. for (i = 0; i < t->num_dcr; i++) {
  1250. t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
  1251. if (!t->label[i])
  1252. return -ENOMEM;
  1253. sprintf(t->label[i], "label%d", i);
  1254. }
  1255. t->spa_set[1] = test_alloc(t, SPA_VCD_SIZE, &t->spa_set_dma[1]);
  1256. if (!t->spa_set[1])
  1257. return -ENOMEM;
  1258. if (nfit_test_dimm_init(t))
  1259. return -ENOMEM;
  1260. smart_init(t);
  1261. return ars_state_init(&t->pdev.dev, &t->ars_state);
  1262. }
  1263. static void dcr_common_init(struct acpi_nfit_control_region *dcr)
  1264. {
  1265. dcr->vendor_id = 0xabcd;
  1266. dcr->device_id = 0;
  1267. dcr->revision_id = 1;
  1268. dcr->valid_fields = 1;
  1269. dcr->manufacturing_location = 0xa;
  1270. dcr->manufacturing_date = cpu_to_be16(2016);
  1271. }
  1272. static void nfit_test0_setup(struct nfit_test *t)
  1273. {
  1274. const int flush_hint_size = sizeof(struct acpi_nfit_flush_address)
  1275. + (sizeof(u64) * NUM_HINTS);
  1276. struct acpi_nfit_desc *acpi_desc;
  1277. struct acpi_nfit_memory_map *memdev;
  1278. void *nfit_buf = t->nfit_buf;
  1279. struct acpi_nfit_system_address *spa;
  1280. struct acpi_nfit_control_region *dcr;
  1281. struct acpi_nfit_data_region *bdw;
  1282. struct acpi_nfit_flush_address *flush;
  1283. struct acpi_nfit_capabilities *pcap;
  1284. unsigned int offset = 0, i;
  1285. /*
  1286. * spa0 (interleave first half of dimm0 and dimm1, note storage
  1287. * does not actually alias the related block-data-window
  1288. * regions)
  1289. */
  1290. spa = nfit_buf;
  1291. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1292. spa->header.length = sizeof(*spa);
  1293. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  1294. spa->range_index = 0+1;
  1295. spa->address = t->spa_set_dma[0];
  1296. spa->length = SPA0_SIZE;
  1297. offset += spa->header.length;
  1298. /*
  1299. * spa1 (interleave last half of the 4 DIMMS, note storage
  1300. * does not actually alias the related block-data-window
  1301. * regions)
  1302. */
  1303. spa = nfit_buf + offset;
  1304. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1305. spa->header.length = sizeof(*spa);
  1306. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  1307. spa->range_index = 1+1;
  1308. spa->address = t->spa_set_dma[1];
  1309. spa->length = SPA1_SIZE;
  1310. offset += spa->header.length;
  1311. /* spa2 (dcr0) dimm0 */
  1312. spa = nfit_buf + offset;
  1313. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1314. spa->header.length = sizeof(*spa);
  1315. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  1316. spa->range_index = 2+1;
  1317. spa->address = t->dcr_dma[0];
  1318. spa->length = DCR_SIZE;
  1319. offset += spa->header.length;
  1320. /* spa3 (dcr1) dimm1 */
  1321. spa = nfit_buf + offset;
  1322. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1323. spa->header.length = sizeof(*spa);
  1324. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  1325. spa->range_index = 3+1;
  1326. spa->address = t->dcr_dma[1];
  1327. spa->length = DCR_SIZE;
  1328. offset += spa->header.length;
  1329. /* spa4 (dcr2) dimm2 */
  1330. spa = nfit_buf + offset;
  1331. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1332. spa->header.length = sizeof(*spa);
  1333. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  1334. spa->range_index = 4+1;
  1335. spa->address = t->dcr_dma[2];
  1336. spa->length = DCR_SIZE;
  1337. offset += spa->header.length;
  1338. /* spa5 (dcr3) dimm3 */
  1339. spa = nfit_buf + offset;
  1340. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1341. spa->header.length = sizeof(*spa);
  1342. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  1343. spa->range_index = 5+1;
  1344. spa->address = t->dcr_dma[3];
  1345. spa->length = DCR_SIZE;
  1346. offset += spa->header.length;
  1347. /* spa6 (bdw for dcr0) dimm0 */
  1348. spa = nfit_buf + offset;
  1349. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1350. spa->header.length = sizeof(*spa);
  1351. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  1352. spa->range_index = 6+1;
  1353. spa->address = t->dimm_dma[0];
  1354. spa->length = DIMM_SIZE;
  1355. offset += spa->header.length;
  1356. /* spa7 (bdw for dcr1) dimm1 */
  1357. spa = nfit_buf + offset;
  1358. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1359. spa->header.length = sizeof(*spa);
  1360. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  1361. spa->range_index = 7+1;
  1362. spa->address = t->dimm_dma[1];
  1363. spa->length = DIMM_SIZE;
  1364. offset += spa->header.length;
  1365. /* spa8 (bdw for dcr2) dimm2 */
  1366. spa = nfit_buf + offset;
  1367. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1368. spa->header.length = sizeof(*spa);
  1369. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  1370. spa->range_index = 8+1;
  1371. spa->address = t->dimm_dma[2];
  1372. spa->length = DIMM_SIZE;
  1373. offset += spa->header.length;
  1374. /* spa9 (bdw for dcr3) dimm3 */
  1375. spa = nfit_buf + offset;
  1376. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1377. spa->header.length = sizeof(*spa);
  1378. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  1379. spa->range_index = 9+1;
  1380. spa->address = t->dimm_dma[3];
  1381. spa->length = DIMM_SIZE;
  1382. offset += spa->header.length;
  1383. /* mem-region0 (spa0, dimm0) */
  1384. memdev = nfit_buf + offset;
  1385. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1386. memdev->header.length = sizeof(*memdev);
  1387. memdev->device_handle = handle[0];
  1388. memdev->physical_id = 0;
  1389. memdev->region_id = 0;
  1390. memdev->range_index = 0+1;
  1391. memdev->region_index = 4+1;
  1392. memdev->region_size = SPA0_SIZE/2;
  1393. memdev->region_offset = 1;
  1394. memdev->address = 0;
  1395. memdev->interleave_index = 0;
  1396. memdev->interleave_ways = 2;
  1397. offset += memdev->header.length;
  1398. /* mem-region1 (spa0, dimm1) */
  1399. memdev = nfit_buf + offset;
  1400. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1401. memdev->header.length = sizeof(*memdev);
  1402. memdev->device_handle = handle[1];
  1403. memdev->physical_id = 1;
  1404. memdev->region_id = 0;
  1405. memdev->range_index = 0+1;
  1406. memdev->region_index = 5+1;
  1407. memdev->region_size = SPA0_SIZE/2;
  1408. memdev->region_offset = (1 << 8);
  1409. memdev->address = 0;
  1410. memdev->interleave_index = 0;
  1411. memdev->interleave_ways = 2;
  1412. memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
  1413. offset += memdev->header.length;
  1414. /* mem-region2 (spa1, dimm0) */
  1415. memdev = nfit_buf + offset;
  1416. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1417. memdev->header.length = sizeof(*memdev);
  1418. memdev->device_handle = handle[0];
  1419. memdev->physical_id = 0;
  1420. memdev->region_id = 1;
  1421. memdev->range_index = 1+1;
  1422. memdev->region_index = 4+1;
  1423. memdev->region_size = SPA1_SIZE/4;
  1424. memdev->region_offset = (1 << 16);
  1425. memdev->address = SPA0_SIZE/2;
  1426. memdev->interleave_index = 0;
  1427. memdev->interleave_ways = 4;
  1428. memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
  1429. offset += memdev->header.length;
  1430. /* mem-region3 (spa1, dimm1) */
  1431. memdev = nfit_buf + offset;
  1432. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1433. memdev->header.length = sizeof(*memdev);
  1434. memdev->device_handle = handle[1];
  1435. memdev->physical_id = 1;
  1436. memdev->region_id = 1;
  1437. memdev->range_index = 1+1;
  1438. memdev->region_index = 5+1;
  1439. memdev->region_size = SPA1_SIZE/4;
  1440. memdev->region_offset = (1 << 24);
  1441. memdev->address = SPA0_SIZE/2;
  1442. memdev->interleave_index = 0;
  1443. memdev->interleave_ways = 4;
  1444. offset += memdev->header.length;
  1445. /* mem-region4 (spa1, dimm2) */
  1446. memdev = nfit_buf + offset;
  1447. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1448. memdev->header.length = sizeof(*memdev);
  1449. memdev->device_handle = handle[2];
  1450. memdev->physical_id = 2;
  1451. memdev->region_id = 0;
  1452. memdev->range_index = 1+1;
  1453. memdev->region_index = 6+1;
  1454. memdev->region_size = SPA1_SIZE/4;
  1455. memdev->region_offset = (1ULL << 32);
  1456. memdev->address = SPA0_SIZE/2;
  1457. memdev->interleave_index = 0;
  1458. memdev->interleave_ways = 4;
  1459. memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
  1460. offset += memdev->header.length;
  1461. /* mem-region5 (spa1, dimm3) */
  1462. memdev = nfit_buf + offset;
  1463. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1464. memdev->header.length = sizeof(*memdev);
  1465. memdev->device_handle = handle[3];
  1466. memdev->physical_id = 3;
  1467. memdev->region_id = 0;
  1468. memdev->range_index = 1+1;
  1469. memdev->region_index = 7+1;
  1470. memdev->region_size = SPA1_SIZE/4;
  1471. memdev->region_offset = (1ULL << 40);
  1472. memdev->address = SPA0_SIZE/2;
  1473. memdev->interleave_index = 0;
  1474. memdev->interleave_ways = 4;
  1475. offset += memdev->header.length;
  1476. /* mem-region6 (spa/dcr0, dimm0) */
  1477. memdev = nfit_buf + offset;
  1478. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1479. memdev->header.length = sizeof(*memdev);
  1480. memdev->device_handle = handle[0];
  1481. memdev->physical_id = 0;
  1482. memdev->region_id = 0;
  1483. memdev->range_index = 2+1;
  1484. memdev->region_index = 0+1;
  1485. memdev->region_size = 0;
  1486. memdev->region_offset = 0;
  1487. memdev->address = 0;
  1488. memdev->interleave_index = 0;
  1489. memdev->interleave_ways = 1;
  1490. offset += memdev->header.length;
  1491. /* mem-region7 (spa/dcr1, dimm1) */
  1492. memdev = nfit_buf + offset;
  1493. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1494. memdev->header.length = sizeof(*memdev);
  1495. memdev->device_handle = handle[1];
  1496. memdev->physical_id = 1;
  1497. memdev->region_id = 0;
  1498. memdev->range_index = 3+1;
  1499. memdev->region_index = 1+1;
  1500. memdev->region_size = 0;
  1501. memdev->region_offset = 0;
  1502. memdev->address = 0;
  1503. memdev->interleave_index = 0;
  1504. memdev->interleave_ways = 1;
  1505. offset += memdev->header.length;
  1506. /* mem-region8 (spa/dcr2, dimm2) */
  1507. memdev = nfit_buf + offset;
  1508. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1509. memdev->header.length = sizeof(*memdev);
  1510. memdev->device_handle = handle[2];
  1511. memdev->physical_id = 2;
  1512. memdev->region_id = 0;
  1513. memdev->range_index = 4+1;
  1514. memdev->region_index = 2+1;
  1515. memdev->region_size = 0;
  1516. memdev->region_offset = 0;
  1517. memdev->address = 0;
  1518. memdev->interleave_index = 0;
  1519. memdev->interleave_ways = 1;
  1520. offset += memdev->header.length;
  1521. /* mem-region9 (spa/dcr3, dimm3) */
  1522. memdev = nfit_buf + offset;
  1523. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1524. memdev->header.length = sizeof(*memdev);
  1525. memdev->device_handle = handle[3];
  1526. memdev->physical_id = 3;
  1527. memdev->region_id = 0;
  1528. memdev->range_index = 5+1;
  1529. memdev->region_index = 3+1;
  1530. memdev->region_size = 0;
  1531. memdev->region_offset = 0;
  1532. memdev->address = 0;
  1533. memdev->interleave_index = 0;
  1534. memdev->interleave_ways = 1;
  1535. offset += memdev->header.length;
  1536. /* mem-region10 (spa/bdw0, dimm0) */
  1537. memdev = nfit_buf + offset;
  1538. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1539. memdev->header.length = sizeof(*memdev);
  1540. memdev->device_handle = handle[0];
  1541. memdev->physical_id = 0;
  1542. memdev->region_id = 0;
  1543. memdev->range_index = 6+1;
  1544. memdev->region_index = 0+1;
  1545. memdev->region_size = 0;
  1546. memdev->region_offset = 0;
  1547. memdev->address = 0;
  1548. memdev->interleave_index = 0;
  1549. memdev->interleave_ways = 1;
  1550. offset += memdev->header.length;
  1551. /* mem-region11 (spa/bdw1, dimm1) */
  1552. memdev = nfit_buf + offset;
  1553. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1554. memdev->header.length = sizeof(*memdev);
  1555. memdev->device_handle = handle[1];
  1556. memdev->physical_id = 1;
  1557. memdev->region_id = 0;
  1558. memdev->range_index = 7+1;
  1559. memdev->region_index = 1+1;
  1560. memdev->region_size = 0;
  1561. memdev->region_offset = 0;
  1562. memdev->address = 0;
  1563. memdev->interleave_index = 0;
  1564. memdev->interleave_ways = 1;
  1565. offset += memdev->header.length;
  1566. /* mem-region12 (spa/bdw2, dimm2) */
  1567. memdev = nfit_buf + offset;
  1568. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1569. memdev->header.length = sizeof(*memdev);
  1570. memdev->device_handle = handle[2];
  1571. memdev->physical_id = 2;
  1572. memdev->region_id = 0;
  1573. memdev->range_index = 8+1;
  1574. memdev->region_index = 2+1;
  1575. memdev->region_size = 0;
  1576. memdev->region_offset = 0;
  1577. memdev->address = 0;
  1578. memdev->interleave_index = 0;
  1579. memdev->interleave_ways = 1;
  1580. offset += memdev->header.length;
  1581. /* mem-region13 (spa/dcr3, dimm3) */
  1582. memdev = nfit_buf + offset;
  1583. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1584. memdev->header.length = sizeof(*memdev);
  1585. memdev->device_handle = handle[3];
  1586. memdev->physical_id = 3;
  1587. memdev->region_id = 0;
  1588. memdev->range_index = 9+1;
  1589. memdev->region_index = 3+1;
  1590. memdev->region_size = 0;
  1591. memdev->region_offset = 0;
  1592. memdev->address = 0;
  1593. memdev->interleave_index = 0;
  1594. memdev->interleave_ways = 1;
  1595. memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
  1596. offset += memdev->header.length;
  1597. /* dcr-descriptor0: blk */
  1598. dcr = nfit_buf + offset;
  1599. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1600. dcr->header.length = sizeof(*dcr);
  1601. dcr->region_index = 0+1;
  1602. dcr_common_init(dcr);
  1603. dcr->serial_number = ~handle[0];
  1604. dcr->code = NFIT_FIC_BLK;
  1605. dcr->windows = 1;
  1606. dcr->window_size = DCR_SIZE;
  1607. dcr->command_offset = 0;
  1608. dcr->command_size = 8;
  1609. dcr->status_offset = 8;
  1610. dcr->status_size = 4;
  1611. offset += dcr->header.length;
  1612. /* dcr-descriptor1: blk */
  1613. dcr = nfit_buf + offset;
  1614. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1615. dcr->header.length = sizeof(*dcr);
  1616. dcr->region_index = 1+1;
  1617. dcr_common_init(dcr);
  1618. dcr->serial_number = ~handle[1];
  1619. dcr->code = NFIT_FIC_BLK;
  1620. dcr->windows = 1;
  1621. dcr->window_size = DCR_SIZE;
  1622. dcr->command_offset = 0;
  1623. dcr->command_size = 8;
  1624. dcr->status_offset = 8;
  1625. dcr->status_size = 4;
  1626. offset += dcr->header.length;
  1627. /* dcr-descriptor2: blk */
  1628. dcr = nfit_buf + offset;
  1629. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1630. dcr->header.length = sizeof(*dcr);
  1631. dcr->region_index = 2+1;
  1632. dcr_common_init(dcr);
  1633. dcr->serial_number = ~handle[2];
  1634. dcr->code = NFIT_FIC_BLK;
  1635. dcr->windows = 1;
  1636. dcr->window_size = DCR_SIZE;
  1637. dcr->command_offset = 0;
  1638. dcr->command_size = 8;
  1639. dcr->status_offset = 8;
  1640. dcr->status_size = 4;
  1641. offset += dcr->header.length;
  1642. /* dcr-descriptor3: blk */
  1643. dcr = nfit_buf + offset;
  1644. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1645. dcr->header.length = sizeof(*dcr);
  1646. dcr->region_index = 3+1;
  1647. dcr_common_init(dcr);
  1648. dcr->serial_number = ~handle[3];
  1649. dcr->code = NFIT_FIC_BLK;
  1650. dcr->windows = 1;
  1651. dcr->window_size = DCR_SIZE;
  1652. dcr->command_offset = 0;
  1653. dcr->command_size = 8;
  1654. dcr->status_offset = 8;
  1655. dcr->status_size = 4;
  1656. offset += dcr->header.length;
  1657. /* dcr-descriptor0: pmem */
  1658. dcr = nfit_buf + offset;
  1659. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1660. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1661. window_size);
  1662. dcr->region_index = 4+1;
  1663. dcr_common_init(dcr);
  1664. dcr->serial_number = ~handle[0];
  1665. dcr->code = NFIT_FIC_BYTEN;
  1666. dcr->windows = 0;
  1667. offset += dcr->header.length;
  1668. /* dcr-descriptor1: pmem */
  1669. dcr = nfit_buf + offset;
  1670. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1671. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1672. window_size);
  1673. dcr->region_index = 5+1;
  1674. dcr_common_init(dcr);
  1675. dcr->serial_number = ~handle[1];
  1676. dcr->code = NFIT_FIC_BYTEN;
  1677. dcr->windows = 0;
  1678. offset += dcr->header.length;
  1679. /* dcr-descriptor2: pmem */
  1680. dcr = nfit_buf + offset;
  1681. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1682. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1683. window_size);
  1684. dcr->region_index = 6+1;
  1685. dcr_common_init(dcr);
  1686. dcr->serial_number = ~handle[2];
  1687. dcr->code = NFIT_FIC_BYTEN;
  1688. dcr->windows = 0;
  1689. offset += dcr->header.length;
  1690. /* dcr-descriptor3: pmem */
  1691. dcr = nfit_buf + offset;
  1692. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1693. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1694. window_size);
  1695. dcr->region_index = 7+1;
  1696. dcr_common_init(dcr);
  1697. dcr->serial_number = ~handle[3];
  1698. dcr->code = NFIT_FIC_BYTEN;
  1699. dcr->windows = 0;
  1700. offset += dcr->header.length;
  1701. /* bdw0 (spa/dcr0, dimm0) */
  1702. bdw = nfit_buf + offset;
  1703. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  1704. bdw->header.length = sizeof(*bdw);
  1705. bdw->region_index = 0+1;
  1706. bdw->windows = 1;
  1707. bdw->offset = 0;
  1708. bdw->size = BDW_SIZE;
  1709. bdw->capacity = DIMM_SIZE;
  1710. bdw->start_address = 0;
  1711. offset += bdw->header.length;
  1712. /* bdw1 (spa/dcr1, dimm1) */
  1713. bdw = nfit_buf + offset;
  1714. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  1715. bdw->header.length = sizeof(*bdw);
  1716. bdw->region_index = 1+1;
  1717. bdw->windows = 1;
  1718. bdw->offset = 0;
  1719. bdw->size = BDW_SIZE;
  1720. bdw->capacity = DIMM_SIZE;
  1721. bdw->start_address = 0;
  1722. offset += bdw->header.length;
  1723. /* bdw2 (spa/dcr2, dimm2) */
  1724. bdw = nfit_buf + offset;
  1725. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  1726. bdw->header.length = sizeof(*bdw);
  1727. bdw->region_index = 2+1;
  1728. bdw->windows = 1;
  1729. bdw->offset = 0;
  1730. bdw->size = BDW_SIZE;
  1731. bdw->capacity = DIMM_SIZE;
  1732. bdw->start_address = 0;
  1733. offset += bdw->header.length;
  1734. /* bdw3 (spa/dcr3, dimm3) */
  1735. bdw = nfit_buf + offset;
  1736. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  1737. bdw->header.length = sizeof(*bdw);
  1738. bdw->region_index = 3+1;
  1739. bdw->windows = 1;
  1740. bdw->offset = 0;
  1741. bdw->size = BDW_SIZE;
  1742. bdw->capacity = DIMM_SIZE;
  1743. bdw->start_address = 0;
  1744. offset += bdw->header.length;
  1745. /* flush0 (dimm0) */
  1746. flush = nfit_buf + offset;
  1747. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  1748. flush->header.length = flush_hint_size;
  1749. flush->device_handle = handle[0];
  1750. flush->hint_count = NUM_HINTS;
  1751. for (i = 0; i < NUM_HINTS; i++)
  1752. flush->hint_address[i] = t->flush_dma[0] + i * sizeof(u64);
  1753. offset += flush->header.length;
  1754. /* flush1 (dimm1) */
  1755. flush = nfit_buf + offset;
  1756. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  1757. flush->header.length = flush_hint_size;
  1758. flush->device_handle = handle[1];
  1759. flush->hint_count = NUM_HINTS;
  1760. for (i = 0; i < NUM_HINTS; i++)
  1761. flush->hint_address[i] = t->flush_dma[1] + i * sizeof(u64);
  1762. offset += flush->header.length;
  1763. /* flush2 (dimm2) */
  1764. flush = nfit_buf + offset;
  1765. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  1766. flush->header.length = flush_hint_size;
  1767. flush->device_handle = handle[2];
  1768. flush->hint_count = NUM_HINTS;
  1769. for (i = 0; i < NUM_HINTS; i++)
  1770. flush->hint_address[i] = t->flush_dma[2] + i * sizeof(u64);
  1771. offset += flush->header.length;
  1772. /* flush3 (dimm3) */
  1773. flush = nfit_buf + offset;
  1774. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  1775. flush->header.length = flush_hint_size;
  1776. flush->device_handle = handle[3];
  1777. flush->hint_count = NUM_HINTS;
  1778. for (i = 0; i < NUM_HINTS; i++)
  1779. flush->hint_address[i] = t->flush_dma[3] + i * sizeof(u64);
  1780. offset += flush->header.length;
  1781. /* platform capabilities */
  1782. pcap = nfit_buf + offset;
  1783. pcap->header.type = ACPI_NFIT_TYPE_CAPABILITIES;
  1784. pcap->header.length = sizeof(*pcap);
  1785. pcap->highest_capability = 1;
  1786. pcap->capabilities = ACPI_NFIT_CAPABILITY_MEM_FLUSH;
  1787. offset += pcap->header.length;
  1788. if (t->setup_hotplug) {
  1789. /* dcr-descriptor4: blk */
  1790. dcr = nfit_buf + offset;
  1791. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1792. dcr->header.length = sizeof(*dcr);
  1793. dcr->region_index = 8+1;
  1794. dcr_common_init(dcr);
  1795. dcr->serial_number = ~handle[4];
  1796. dcr->code = NFIT_FIC_BLK;
  1797. dcr->windows = 1;
  1798. dcr->window_size = DCR_SIZE;
  1799. dcr->command_offset = 0;
  1800. dcr->command_size = 8;
  1801. dcr->status_offset = 8;
  1802. dcr->status_size = 4;
  1803. offset += dcr->header.length;
  1804. /* dcr-descriptor4: pmem */
  1805. dcr = nfit_buf + offset;
  1806. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1807. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1808. window_size);
  1809. dcr->region_index = 9+1;
  1810. dcr_common_init(dcr);
  1811. dcr->serial_number = ~handle[4];
  1812. dcr->code = NFIT_FIC_BYTEN;
  1813. dcr->windows = 0;
  1814. offset += dcr->header.length;
  1815. /* bdw4 (spa/dcr4, dimm4) */
  1816. bdw = nfit_buf + offset;
  1817. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  1818. bdw->header.length = sizeof(*bdw);
  1819. bdw->region_index = 8+1;
  1820. bdw->windows = 1;
  1821. bdw->offset = 0;
  1822. bdw->size = BDW_SIZE;
  1823. bdw->capacity = DIMM_SIZE;
  1824. bdw->start_address = 0;
  1825. offset += bdw->header.length;
  1826. /* spa10 (dcr4) dimm4 */
  1827. spa = nfit_buf + offset;
  1828. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1829. spa->header.length = sizeof(*spa);
  1830. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  1831. spa->range_index = 10+1;
  1832. spa->address = t->dcr_dma[4];
  1833. spa->length = DCR_SIZE;
  1834. offset += spa->header.length;
  1835. /*
  1836. * spa11 (single-dimm interleave for hotplug, note storage
  1837. * does not actually alias the related block-data-window
  1838. * regions)
  1839. */
  1840. spa = nfit_buf + offset;
  1841. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1842. spa->header.length = sizeof(*spa);
  1843. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  1844. spa->range_index = 11+1;
  1845. spa->address = t->spa_set_dma[2];
  1846. spa->length = SPA0_SIZE;
  1847. offset += spa->header.length;
  1848. /* spa12 (bdw for dcr4) dimm4 */
  1849. spa = nfit_buf + offset;
  1850. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1851. spa->header.length = sizeof(*spa);
  1852. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  1853. spa->range_index = 12+1;
  1854. spa->address = t->dimm_dma[4];
  1855. spa->length = DIMM_SIZE;
  1856. offset += spa->header.length;
  1857. /* mem-region14 (spa/dcr4, dimm4) */
  1858. memdev = nfit_buf + offset;
  1859. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1860. memdev->header.length = sizeof(*memdev);
  1861. memdev->device_handle = handle[4];
  1862. memdev->physical_id = 4;
  1863. memdev->region_id = 0;
  1864. memdev->range_index = 10+1;
  1865. memdev->region_index = 8+1;
  1866. memdev->region_size = 0;
  1867. memdev->region_offset = 0;
  1868. memdev->address = 0;
  1869. memdev->interleave_index = 0;
  1870. memdev->interleave_ways = 1;
  1871. offset += memdev->header.length;
  1872. /* mem-region15 (spa11, dimm4) */
  1873. memdev = nfit_buf + offset;
  1874. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1875. memdev->header.length = sizeof(*memdev);
  1876. memdev->device_handle = handle[4];
  1877. memdev->physical_id = 4;
  1878. memdev->region_id = 0;
  1879. memdev->range_index = 11+1;
  1880. memdev->region_index = 9+1;
  1881. memdev->region_size = SPA0_SIZE;
  1882. memdev->region_offset = (1ULL << 48);
  1883. memdev->address = 0;
  1884. memdev->interleave_index = 0;
  1885. memdev->interleave_ways = 1;
  1886. memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
  1887. offset += memdev->header.length;
  1888. /* mem-region16 (spa/bdw4, dimm4) */
  1889. memdev = nfit_buf + offset;
  1890. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1891. memdev->header.length = sizeof(*memdev);
  1892. memdev->device_handle = handle[4];
  1893. memdev->physical_id = 4;
  1894. memdev->region_id = 0;
  1895. memdev->range_index = 12+1;
  1896. memdev->region_index = 8+1;
  1897. memdev->region_size = 0;
  1898. memdev->region_offset = 0;
  1899. memdev->address = 0;
  1900. memdev->interleave_index = 0;
  1901. memdev->interleave_ways = 1;
  1902. offset += memdev->header.length;
  1903. /* flush3 (dimm4) */
  1904. flush = nfit_buf + offset;
  1905. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  1906. flush->header.length = flush_hint_size;
  1907. flush->device_handle = handle[4];
  1908. flush->hint_count = NUM_HINTS;
  1909. for (i = 0; i < NUM_HINTS; i++)
  1910. flush->hint_address[i] = t->flush_dma[4]
  1911. + i * sizeof(u64);
  1912. offset += flush->header.length;
  1913. /* sanity check to make sure we've filled the buffer */
  1914. WARN_ON(offset != t->nfit_size);
  1915. }
  1916. t->nfit_filled = offset;
  1917. post_ars_status(&t->ars_state, &t->badrange, t->spa_set_dma[0],
  1918. SPA0_SIZE);
  1919. acpi_desc = &t->acpi_desc;
  1920. set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en);
  1921. set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
  1922. set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
  1923. set_bit(ND_INTEL_SMART, &acpi_desc->dimm_cmd_force_en);
  1924. set_bit(ND_INTEL_SMART_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
  1925. set_bit(ND_INTEL_SMART_SET_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
  1926. set_bit(ND_INTEL_SMART_INJECT, &acpi_desc->dimm_cmd_force_en);
  1927. set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
  1928. set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
  1929. set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
  1930. set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
  1931. set_bit(ND_CMD_CALL, &acpi_desc->bus_cmd_force_en);
  1932. set_bit(NFIT_CMD_TRANSLATE_SPA, &acpi_desc->bus_nfit_cmd_force_en);
  1933. set_bit(NFIT_CMD_ARS_INJECT_SET, &acpi_desc->bus_nfit_cmd_force_en);
  1934. set_bit(NFIT_CMD_ARS_INJECT_CLEAR, &acpi_desc->bus_nfit_cmd_force_en);
  1935. set_bit(NFIT_CMD_ARS_INJECT_GET, &acpi_desc->bus_nfit_cmd_force_en);
  1936. set_bit(ND_INTEL_FW_GET_INFO, &acpi_desc->dimm_cmd_force_en);
  1937. set_bit(ND_INTEL_FW_START_UPDATE, &acpi_desc->dimm_cmd_force_en);
  1938. set_bit(ND_INTEL_FW_SEND_DATA, &acpi_desc->dimm_cmd_force_en);
  1939. set_bit(ND_INTEL_FW_FINISH_UPDATE, &acpi_desc->dimm_cmd_force_en);
  1940. set_bit(ND_INTEL_FW_FINISH_QUERY, &acpi_desc->dimm_cmd_force_en);
  1941. set_bit(ND_INTEL_ENABLE_LSS_STATUS, &acpi_desc->dimm_cmd_force_en);
  1942. }
  1943. static void nfit_test1_setup(struct nfit_test *t)
  1944. {
  1945. size_t offset;
  1946. void *nfit_buf = t->nfit_buf;
  1947. struct acpi_nfit_memory_map *memdev;
  1948. struct acpi_nfit_control_region *dcr;
  1949. struct acpi_nfit_system_address *spa;
  1950. struct acpi_nfit_desc *acpi_desc;
  1951. offset = 0;
  1952. /* spa0 (flat range with no bdw aliasing) */
  1953. spa = nfit_buf + offset;
  1954. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1955. spa->header.length = sizeof(*spa);
  1956. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  1957. spa->range_index = 0+1;
  1958. spa->address = t->spa_set_dma[0];
  1959. spa->length = SPA2_SIZE;
  1960. offset += spa->header.length;
  1961. /* virtual cd region */
  1962. spa = nfit_buf + offset;
  1963. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1964. spa->header.length = sizeof(*spa);
  1965. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_VCD), 16);
  1966. spa->range_index = 0;
  1967. spa->address = t->spa_set_dma[1];
  1968. spa->length = SPA_VCD_SIZE;
  1969. offset += spa->header.length;
  1970. /* mem-region0 (spa0, dimm0) */
  1971. memdev = nfit_buf + offset;
  1972. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1973. memdev->header.length = sizeof(*memdev);
  1974. memdev->device_handle = handle[5];
  1975. memdev->physical_id = 0;
  1976. memdev->region_id = 0;
  1977. memdev->range_index = 0+1;
  1978. memdev->region_index = 0+1;
  1979. memdev->region_size = SPA2_SIZE;
  1980. memdev->region_offset = 0;
  1981. memdev->address = 0;
  1982. memdev->interleave_index = 0;
  1983. memdev->interleave_ways = 1;
  1984. memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED
  1985. | ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED
  1986. | ACPI_NFIT_MEM_NOT_ARMED;
  1987. offset += memdev->header.length;
  1988. /* dcr-descriptor0 */
  1989. dcr = nfit_buf + offset;
  1990. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1991. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1992. window_size);
  1993. dcr->region_index = 0+1;
  1994. dcr_common_init(dcr);
  1995. dcr->serial_number = ~handle[5];
  1996. dcr->code = NFIT_FIC_BYTE;
  1997. dcr->windows = 0;
  1998. offset += dcr->header.length;
  1999. memdev = nfit_buf + offset;
  2000. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  2001. memdev->header.length = sizeof(*memdev);
  2002. memdev->device_handle = handle[6];
  2003. memdev->physical_id = 0;
  2004. memdev->region_id = 0;
  2005. memdev->range_index = 0;
  2006. memdev->region_index = 0+2;
  2007. memdev->region_size = SPA2_SIZE;
  2008. memdev->region_offset = 0;
  2009. memdev->address = 0;
  2010. memdev->interleave_index = 0;
  2011. memdev->interleave_ways = 1;
  2012. memdev->flags = ACPI_NFIT_MEM_MAP_FAILED;
  2013. offset += memdev->header.length;
  2014. /* dcr-descriptor1 */
  2015. dcr = nfit_buf + offset;
  2016. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  2017. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  2018. window_size);
  2019. dcr->region_index = 0+2;
  2020. dcr_common_init(dcr);
  2021. dcr->serial_number = ~handle[6];
  2022. dcr->code = NFIT_FIC_BYTE;
  2023. dcr->windows = 0;
  2024. offset += dcr->header.length;
  2025. /* sanity check to make sure we've filled the buffer */
  2026. WARN_ON(offset != t->nfit_size);
  2027. t->nfit_filled = offset;
  2028. post_ars_status(&t->ars_state, &t->badrange, t->spa_set_dma[0],
  2029. SPA2_SIZE);
  2030. acpi_desc = &t->acpi_desc;
  2031. set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
  2032. set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
  2033. set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
  2034. set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
  2035. set_bit(ND_INTEL_ENABLE_LSS_STATUS, &acpi_desc->dimm_cmd_force_en);
  2036. set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en);
  2037. set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
  2038. set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
  2039. }
  2040. static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
  2041. void *iobuf, u64 len, int rw)
  2042. {
  2043. struct nfit_blk *nfit_blk = ndbr->blk_provider_data;
  2044. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
  2045. struct nd_region *nd_region = &ndbr->nd_region;
  2046. unsigned int lane;
  2047. lane = nd_region_acquire_lane(nd_region);
  2048. if (rw)
  2049. memcpy(mmio->addr.base + dpa, iobuf, len);
  2050. else {
  2051. memcpy(iobuf, mmio->addr.base + dpa, len);
  2052. /* give us some some coverage of the arch_invalidate_pmem() API */
  2053. arch_invalidate_pmem(mmio->addr.base + dpa, len);
  2054. }
  2055. nd_region_release_lane(nd_region, lane);
  2056. return 0;
  2057. }
  2058. static unsigned long nfit_ctl_handle;
  2059. union acpi_object *result;
  2060. static union acpi_object *nfit_test_evaluate_dsm(acpi_handle handle,
  2061. const guid_t *guid, u64 rev, u64 func, union acpi_object *argv4)
  2062. {
  2063. if (handle != &nfit_ctl_handle)
  2064. return ERR_PTR(-ENXIO);
  2065. return result;
  2066. }
  2067. static int setup_result(void *buf, size_t size)
  2068. {
  2069. result = kmalloc(sizeof(union acpi_object) + size, GFP_KERNEL);
  2070. if (!result)
  2071. return -ENOMEM;
  2072. result->package.type = ACPI_TYPE_BUFFER,
  2073. result->buffer.pointer = (void *) (result + 1);
  2074. result->buffer.length = size;
  2075. memcpy(result->buffer.pointer, buf, size);
  2076. memset(buf, 0, size);
  2077. return 0;
  2078. }
  2079. static int nfit_ctl_test(struct device *dev)
  2080. {
  2081. int rc, cmd_rc;
  2082. struct nvdimm *nvdimm;
  2083. struct acpi_device *adev;
  2084. struct nfit_mem *nfit_mem;
  2085. struct nd_ars_record *record;
  2086. struct acpi_nfit_desc *acpi_desc;
  2087. const u64 test_val = 0x0123456789abcdefULL;
  2088. unsigned long mask, cmd_size, offset;
  2089. union {
  2090. struct nd_cmd_get_config_size cfg_size;
  2091. struct nd_cmd_clear_error clear_err;
  2092. struct nd_cmd_ars_status ars_stat;
  2093. struct nd_cmd_ars_cap ars_cap;
  2094. char buf[sizeof(struct nd_cmd_ars_status)
  2095. + sizeof(struct nd_ars_record)];
  2096. } cmds;
  2097. adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL);
  2098. if (!adev)
  2099. return -ENOMEM;
  2100. *adev = (struct acpi_device) {
  2101. .handle = &nfit_ctl_handle,
  2102. .dev = {
  2103. .init_name = "test-adev",
  2104. },
  2105. };
  2106. acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
  2107. if (!acpi_desc)
  2108. return -ENOMEM;
  2109. *acpi_desc = (struct acpi_nfit_desc) {
  2110. .nd_desc = {
  2111. .cmd_mask = 1UL << ND_CMD_ARS_CAP
  2112. | 1UL << ND_CMD_ARS_START
  2113. | 1UL << ND_CMD_ARS_STATUS
  2114. | 1UL << ND_CMD_CLEAR_ERROR
  2115. | 1UL << ND_CMD_CALL,
  2116. .module = THIS_MODULE,
  2117. .provider_name = "ACPI.NFIT",
  2118. .ndctl = acpi_nfit_ctl,
  2119. .bus_dsm_mask = 1UL << NFIT_CMD_TRANSLATE_SPA
  2120. | 1UL << NFIT_CMD_ARS_INJECT_SET
  2121. | 1UL << NFIT_CMD_ARS_INJECT_CLEAR
  2122. | 1UL << NFIT_CMD_ARS_INJECT_GET,
  2123. },
  2124. .dev = &adev->dev,
  2125. };
  2126. nfit_mem = devm_kzalloc(dev, sizeof(*nfit_mem), GFP_KERNEL);
  2127. if (!nfit_mem)
  2128. return -ENOMEM;
  2129. mask = 1UL << ND_CMD_SMART | 1UL << ND_CMD_SMART_THRESHOLD
  2130. | 1UL << ND_CMD_DIMM_FLAGS | 1UL << ND_CMD_GET_CONFIG_SIZE
  2131. | 1UL << ND_CMD_GET_CONFIG_DATA | 1UL << ND_CMD_SET_CONFIG_DATA
  2132. | 1UL << ND_CMD_VENDOR;
  2133. *nfit_mem = (struct nfit_mem) {
  2134. .adev = adev,
  2135. .family = NVDIMM_FAMILY_INTEL,
  2136. .dsm_mask = mask,
  2137. };
  2138. nvdimm = devm_kzalloc(dev, sizeof(*nvdimm), GFP_KERNEL);
  2139. if (!nvdimm)
  2140. return -ENOMEM;
  2141. *nvdimm = (struct nvdimm) {
  2142. .provider_data = nfit_mem,
  2143. .cmd_mask = mask,
  2144. .dev = {
  2145. .init_name = "test-dimm",
  2146. },
  2147. };
  2148. /* basic checkout of a typical 'get config size' command */
  2149. cmd_size = sizeof(cmds.cfg_size);
  2150. cmds.cfg_size = (struct nd_cmd_get_config_size) {
  2151. .status = 0,
  2152. .config_size = SZ_128K,
  2153. .max_xfer = SZ_4K,
  2154. };
  2155. rc = setup_result(cmds.buf, cmd_size);
  2156. if (rc)
  2157. return rc;
  2158. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, nvdimm, ND_CMD_GET_CONFIG_SIZE,
  2159. cmds.buf, cmd_size, &cmd_rc);
  2160. if (rc < 0 || cmd_rc || cmds.cfg_size.status != 0
  2161. || cmds.cfg_size.config_size != SZ_128K
  2162. || cmds.cfg_size.max_xfer != SZ_4K) {
  2163. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  2164. __func__, __LINE__, rc, cmd_rc);
  2165. return -EIO;
  2166. }
  2167. /* test ars_status with zero output */
  2168. cmd_size = offsetof(struct nd_cmd_ars_status, address);
  2169. cmds.ars_stat = (struct nd_cmd_ars_status) {
  2170. .out_length = 0,
  2171. };
  2172. rc = setup_result(cmds.buf, cmd_size);
  2173. if (rc)
  2174. return rc;
  2175. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
  2176. cmds.buf, cmd_size, &cmd_rc);
  2177. if (rc < 0 || cmd_rc) {
  2178. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  2179. __func__, __LINE__, rc, cmd_rc);
  2180. return -EIO;
  2181. }
  2182. /* test ars_cap with benign extended status */
  2183. cmd_size = sizeof(cmds.ars_cap);
  2184. cmds.ars_cap = (struct nd_cmd_ars_cap) {
  2185. .status = ND_ARS_PERSISTENT << 16,
  2186. };
  2187. offset = offsetof(struct nd_cmd_ars_cap, status);
  2188. rc = setup_result(cmds.buf + offset, cmd_size - offset);
  2189. if (rc)
  2190. return rc;
  2191. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_CAP,
  2192. cmds.buf, cmd_size, &cmd_rc);
  2193. if (rc < 0 || cmd_rc) {
  2194. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  2195. __func__, __LINE__, rc, cmd_rc);
  2196. return -EIO;
  2197. }
  2198. /* test ars_status with 'status' trimmed from 'out_length' */
  2199. cmd_size = sizeof(cmds.ars_stat) + sizeof(struct nd_ars_record);
  2200. cmds.ars_stat = (struct nd_cmd_ars_status) {
  2201. .out_length = cmd_size - 4,
  2202. };
  2203. record = &cmds.ars_stat.records[0];
  2204. *record = (struct nd_ars_record) {
  2205. .length = test_val,
  2206. };
  2207. rc = setup_result(cmds.buf, cmd_size);
  2208. if (rc)
  2209. return rc;
  2210. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
  2211. cmds.buf, cmd_size, &cmd_rc);
  2212. if (rc < 0 || cmd_rc || record->length != test_val) {
  2213. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  2214. __func__, __LINE__, rc, cmd_rc);
  2215. return -EIO;
  2216. }
  2217. /* test ars_status with 'Output (Size)' including 'status' */
  2218. cmd_size = sizeof(cmds.ars_stat) + sizeof(struct nd_ars_record);
  2219. cmds.ars_stat = (struct nd_cmd_ars_status) {
  2220. .out_length = cmd_size,
  2221. };
  2222. record = &cmds.ars_stat.records[0];
  2223. *record = (struct nd_ars_record) {
  2224. .length = test_val,
  2225. };
  2226. rc = setup_result(cmds.buf, cmd_size);
  2227. if (rc)
  2228. return rc;
  2229. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
  2230. cmds.buf, cmd_size, &cmd_rc);
  2231. if (rc < 0 || cmd_rc || record->length != test_val) {
  2232. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  2233. __func__, __LINE__, rc, cmd_rc);
  2234. return -EIO;
  2235. }
  2236. /* test extended status for get_config_size results in failure */
  2237. cmd_size = sizeof(cmds.cfg_size);
  2238. cmds.cfg_size = (struct nd_cmd_get_config_size) {
  2239. .status = 1 << 16,
  2240. };
  2241. rc = setup_result(cmds.buf, cmd_size);
  2242. if (rc)
  2243. return rc;
  2244. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, nvdimm, ND_CMD_GET_CONFIG_SIZE,
  2245. cmds.buf, cmd_size, &cmd_rc);
  2246. if (rc < 0 || cmd_rc >= 0) {
  2247. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  2248. __func__, __LINE__, rc, cmd_rc);
  2249. return -EIO;
  2250. }
  2251. /* test clear error */
  2252. cmd_size = sizeof(cmds.clear_err);
  2253. cmds.clear_err = (struct nd_cmd_clear_error) {
  2254. .length = 512,
  2255. .cleared = 512,
  2256. };
  2257. rc = setup_result(cmds.buf, cmd_size);
  2258. if (rc)
  2259. return rc;
  2260. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_CLEAR_ERROR,
  2261. cmds.buf, cmd_size, &cmd_rc);
  2262. if (rc < 0 || cmd_rc) {
  2263. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  2264. __func__, __LINE__, rc, cmd_rc);
  2265. return -EIO;
  2266. }
  2267. return 0;
  2268. }
  2269. static int nfit_test_probe(struct platform_device *pdev)
  2270. {
  2271. struct nvdimm_bus_descriptor *nd_desc;
  2272. struct acpi_nfit_desc *acpi_desc;
  2273. struct device *dev = &pdev->dev;
  2274. struct nfit_test *nfit_test;
  2275. struct nfit_mem *nfit_mem;
  2276. union acpi_object *obj;
  2277. int rc;
  2278. if (strcmp(dev_name(&pdev->dev), "nfit_test.0") == 0) {
  2279. rc = nfit_ctl_test(&pdev->dev);
  2280. if (rc)
  2281. return rc;
  2282. }
  2283. nfit_test = to_nfit_test(&pdev->dev);
  2284. /* common alloc */
  2285. if (nfit_test->num_dcr) {
  2286. int num = nfit_test->num_dcr;
  2287. nfit_test->dimm = devm_kcalloc(dev, num, sizeof(void *),
  2288. GFP_KERNEL);
  2289. nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
  2290. GFP_KERNEL);
  2291. nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *),
  2292. GFP_KERNEL);
  2293. nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
  2294. GFP_KERNEL);
  2295. nfit_test->label = devm_kcalloc(dev, num, sizeof(void *),
  2296. GFP_KERNEL);
  2297. nfit_test->label_dma = devm_kcalloc(dev, num,
  2298. sizeof(dma_addr_t), GFP_KERNEL);
  2299. nfit_test->dcr = devm_kcalloc(dev, num,
  2300. sizeof(struct nfit_test_dcr *), GFP_KERNEL);
  2301. nfit_test->dcr_dma = devm_kcalloc(dev, num,
  2302. sizeof(dma_addr_t), GFP_KERNEL);
  2303. nfit_test->smart = devm_kcalloc(dev, num,
  2304. sizeof(struct nd_intel_smart), GFP_KERNEL);
  2305. nfit_test->smart_threshold = devm_kcalloc(dev, num,
  2306. sizeof(struct nd_intel_smart_threshold),
  2307. GFP_KERNEL);
  2308. nfit_test->fw = devm_kcalloc(dev, num,
  2309. sizeof(struct nfit_test_fw), GFP_KERNEL);
  2310. if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label
  2311. && nfit_test->label_dma && nfit_test->dcr
  2312. && nfit_test->dcr_dma && nfit_test->flush
  2313. && nfit_test->flush_dma
  2314. && nfit_test->fw)
  2315. /* pass */;
  2316. else
  2317. return -ENOMEM;
  2318. }
  2319. if (nfit_test->num_pm) {
  2320. int num = nfit_test->num_pm;
  2321. nfit_test->spa_set = devm_kcalloc(dev, num, sizeof(void *),
  2322. GFP_KERNEL);
  2323. nfit_test->spa_set_dma = devm_kcalloc(dev, num,
  2324. sizeof(dma_addr_t), GFP_KERNEL);
  2325. if (nfit_test->spa_set && nfit_test->spa_set_dma)
  2326. /* pass */;
  2327. else
  2328. return -ENOMEM;
  2329. }
  2330. /* per-nfit specific alloc */
  2331. if (nfit_test->alloc(nfit_test))
  2332. return -ENOMEM;
  2333. nfit_test->setup(nfit_test);
  2334. acpi_desc = &nfit_test->acpi_desc;
  2335. acpi_nfit_desc_init(acpi_desc, &pdev->dev);
  2336. acpi_desc->blk_do_io = nfit_test_blk_do_io;
  2337. nd_desc = &acpi_desc->nd_desc;
  2338. nd_desc->provider_name = NULL;
  2339. nd_desc->module = THIS_MODULE;
  2340. nd_desc->ndctl = nfit_test_ctl;
  2341. rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_buf,
  2342. nfit_test->nfit_filled);
  2343. if (rc)
  2344. return rc;
  2345. rc = devm_add_action_or_reset(&pdev->dev, acpi_nfit_shutdown, acpi_desc);
  2346. if (rc)
  2347. return rc;
  2348. if (nfit_test->setup != nfit_test0_setup)
  2349. return 0;
  2350. nfit_test->setup_hotplug = 1;
  2351. nfit_test->setup(nfit_test);
  2352. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  2353. if (!obj)
  2354. return -ENOMEM;
  2355. obj->type = ACPI_TYPE_BUFFER;
  2356. obj->buffer.length = nfit_test->nfit_size;
  2357. obj->buffer.pointer = nfit_test->nfit_buf;
  2358. *(nfit_test->_fit) = obj;
  2359. __acpi_nfit_notify(&pdev->dev, nfit_test, 0x80);
  2360. /* associate dimm devices with nfit_mem data for notification testing */
  2361. mutex_lock(&acpi_desc->init_mutex);
  2362. list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
  2363. u32 nfit_handle = __to_nfit_memdev(nfit_mem)->device_handle;
  2364. int i;
  2365. for (i = 0; i < ARRAY_SIZE(handle); i++)
  2366. if (nfit_handle == handle[i])
  2367. dev_set_drvdata(nfit_test->dimm_dev[i],
  2368. nfit_mem);
  2369. }
  2370. mutex_unlock(&acpi_desc->init_mutex);
  2371. return 0;
  2372. }
  2373. static int nfit_test_remove(struct platform_device *pdev)
  2374. {
  2375. return 0;
  2376. }
  2377. static void nfit_test_release(struct device *dev)
  2378. {
  2379. struct nfit_test *nfit_test = to_nfit_test(dev);
  2380. kfree(nfit_test);
  2381. }
  2382. static const struct platform_device_id nfit_test_id[] = {
  2383. { KBUILD_MODNAME },
  2384. { },
  2385. };
  2386. static struct platform_driver nfit_test_driver = {
  2387. .probe = nfit_test_probe,
  2388. .remove = nfit_test_remove,
  2389. .driver = {
  2390. .name = KBUILD_MODNAME,
  2391. },
  2392. .id_table = nfit_test_id,
  2393. };
  2394. static char mcsafe_buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
  2395. enum INJECT {
  2396. INJECT_NONE,
  2397. INJECT_SRC,
  2398. INJECT_DST,
  2399. };
  2400. static void mcsafe_test_init(char *dst, char *src, size_t size)
  2401. {
  2402. size_t i;
  2403. memset(dst, 0xff, size);
  2404. for (i = 0; i < size; i++)
  2405. src[i] = (char) i;
  2406. }
  2407. static bool mcsafe_test_validate(unsigned char *dst, unsigned char *src,
  2408. size_t size, unsigned long rem)
  2409. {
  2410. size_t i;
  2411. for (i = 0; i < size - rem; i++)
  2412. if (dst[i] != (unsigned char) i) {
  2413. pr_info_once("%s:%d: offset: %zd got: %#x expect: %#x\n",
  2414. __func__, __LINE__, i, dst[i],
  2415. (unsigned char) i);
  2416. return false;
  2417. }
  2418. for (i = size - rem; i < size; i++)
  2419. if (dst[i] != 0xffU) {
  2420. pr_info_once("%s:%d: offset: %zd got: %#x expect: 0xff\n",
  2421. __func__, __LINE__, i, dst[i]);
  2422. return false;
  2423. }
  2424. return true;
  2425. }
  2426. void mcsafe_test(void)
  2427. {
  2428. char *inject_desc[] = { "none", "source", "destination" };
  2429. enum INJECT inj;
  2430. if (IS_ENABLED(CONFIG_MCSAFE_TEST)) {
  2431. pr_info("%s: run...\n", __func__);
  2432. } else {
  2433. pr_info("%s: disabled, skip.\n", __func__);
  2434. return;
  2435. }
  2436. for (inj = INJECT_NONE; inj <= INJECT_DST; inj++) {
  2437. int i;
  2438. pr_info("%s: inject: %s\n", __func__, inject_desc[inj]);
  2439. for (i = 0; i < 512; i++) {
  2440. unsigned long expect, rem;
  2441. void *src, *dst;
  2442. bool valid;
  2443. switch (inj) {
  2444. case INJECT_NONE:
  2445. mcsafe_inject_src(NULL);
  2446. mcsafe_inject_dst(NULL);
  2447. dst = &mcsafe_buf[2048];
  2448. src = &mcsafe_buf[1024 - i];
  2449. expect = 0;
  2450. break;
  2451. case INJECT_SRC:
  2452. mcsafe_inject_src(&mcsafe_buf[1024]);
  2453. mcsafe_inject_dst(NULL);
  2454. dst = &mcsafe_buf[2048];
  2455. src = &mcsafe_buf[1024 - i];
  2456. expect = 512 - i;
  2457. break;
  2458. case INJECT_DST:
  2459. mcsafe_inject_src(NULL);
  2460. mcsafe_inject_dst(&mcsafe_buf[2048]);
  2461. dst = &mcsafe_buf[2048 - i];
  2462. src = &mcsafe_buf[1024];
  2463. expect = 512 - i;
  2464. break;
  2465. }
  2466. mcsafe_test_init(dst, src, 512);
  2467. rem = __memcpy_mcsafe(dst, src, 512);
  2468. valid = mcsafe_test_validate(dst, src, 512, expect);
  2469. if (rem == expect && valid)
  2470. continue;
  2471. pr_info("%s: copy(%#lx, %#lx, %d) off: %d rem: %ld %s expect: %ld\n",
  2472. __func__,
  2473. ((unsigned long) dst) & ~PAGE_MASK,
  2474. ((unsigned long ) src) & ~PAGE_MASK,
  2475. 512, i, rem, valid ? "valid" : "bad",
  2476. expect);
  2477. }
  2478. }
  2479. mcsafe_inject_src(NULL);
  2480. mcsafe_inject_dst(NULL);
  2481. }
  2482. static __init int nfit_test_init(void)
  2483. {
  2484. int rc, i;
  2485. pmem_test();
  2486. libnvdimm_test();
  2487. acpi_nfit_test();
  2488. device_dax_test();
  2489. mcsafe_test();
  2490. nfit_test_setup(nfit_test_lookup, nfit_test_evaluate_dsm);
  2491. nfit_wq = create_singlethread_workqueue("nfit");
  2492. if (!nfit_wq)
  2493. return -ENOMEM;
  2494. nfit_test_dimm = class_create(THIS_MODULE, "nfit_test_dimm");
  2495. if (IS_ERR(nfit_test_dimm)) {
  2496. rc = PTR_ERR(nfit_test_dimm);
  2497. goto err_register;
  2498. }
  2499. nfit_pool = gen_pool_create(ilog2(SZ_4M), NUMA_NO_NODE);
  2500. if (!nfit_pool) {
  2501. rc = -ENOMEM;
  2502. goto err_register;
  2503. }
  2504. if (gen_pool_add(nfit_pool, SZ_4G, SZ_4G, NUMA_NO_NODE)) {
  2505. rc = -ENOMEM;
  2506. goto err_register;
  2507. }
  2508. for (i = 0; i < NUM_NFITS; i++) {
  2509. struct nfit_test *nfit_test;
  2510. struct platform_device *pdev;
  2511. nfit_test = kzalloc(sizeof(*nfit_test), GFP_KERNEL);
  2512. if (!nfit_test) {
  2513. rc = -ENOMEM;
  2514. goto err_register;
  2515. }
  2516. INIT_LIST_HEAD(&nfit_test->resources);
  2517. badrange_init(&nfit_test->badrange);
  2518. switch (i) {
  2519. case 0:
  2520. nfit_test->num_pm = NUM_PM;
  2521. nfit_test->dcr_idx = 0;
  2522. nfit_test->num_dcr = NUM_DCR;
  2523. nfit_test->alloc = nfit_test0_alloc;
  2524. nfit_test->setup = nfit_test0_setup;
  2525. break;
  2526. case 1:
  2527. nfit_test->num_pm = 2;
  2528. nfit_test->dcr_idx = NUM_DCR;
  2529. nfit_test->num_dcr = 2;
  2530. nfit_test->alloc = nfit_test1_alloc;
  2531. nfit_test->setup = nfit_test1_setup;
  2532. break;
  2533. default:
  2534. rc = -EINVAL;
  2535. goto err_register;
  2536. }
  2537. pdev = &nfit_test->pdev;
  2538. pdev->name = KBUILD_MODNAME;
  2539. pdev->id = i;
  2540. pdev->dev.release = nfit_test_release;
  2541. rc = platform_device_register(pdev);
  2542. if (rc) {
  2543. put_device(&pdev->dev);
  2544. goto err_register;
  2545. }
  2546. get_device(&pdev->dev);
  2547. rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  2548. if (rc)
  2549. goto err_register;
  2550. instances[i] = nfit_test;
  2551. INIT_WORK(&nfit_test->work, uc_error_notify);
  2552. }
  2553. rc = platform_driver_register(&nfit_test_driver);
  2554. if (rc)
  2555. goto err_register;
  2556. return 0;
  2557. err_register:
  2558. if (nfit_pool)
  2559. gen_pool_destroy(nfit_pool);
  2560. destroy_workqueue(nfit_wq);
  2561. for (i = 0; i < NUM_NFITS; i++)
  2562. if (instances[i])
  2563. platform_device_unregister(&instances[i]->pdev);
  2564. nfit_test_teardown();
  2565. for (i = 0; i < NUM_NFITS; i++)
  2566. if (instances[i])
  2567. put_device(&instances[i]->pdev.dev);
  2568. return rc;
  2569. }
  2570. static __exit void nfit_test_exit(void)
  2571. {
  2572. int i;
  2573. flush_workqueue(nfit_wq);
  2574. destroy_workqueue(nfit_wq);
  2575. for (i = 0; i < NUM_NFITS; i++)
  2576. platform_device_unregister(&instances[i]->pdev);
  2577. platform_driver_unregister(&nfit_test_driver);
  2578. nfit_test_teardown();
  2579. gen_pool_destroy(nfit_pool);
  2580. for (i = 0; i < NUM_NFITS; i++)
  2581. put_device(&instances[i]->pdev.dev);
  2582. class_destroy(nfit_test_dimm);
  2583. }
  2584. module_init(nfit_test_init);
  2585. module_exit(nfit_test_exit);
  2586. MODULE_LICENSE("GPL v2");
  2587. MODULE_AUTHOR("Intel Corporation");