main.c 70 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566
  1. /*
  2. * CXL Flash Device Driver
  3. *
  4. * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
  5. * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
  6. *
  7. * Copyright (C) 2015 IBM Corporation
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version
  12. * 2 of the License, or (at your option) any later version.
  13. */
  14. #include <linux/delay.h>
  15. #include <linux/list.h>
  16. #include <linux/module.h>
  17. #include <linux/pci.h>
  18. #include <asm/unaligned.h>
  19. #include <misc/cxl.h>
  20. #include <scsi/scsi_cmnd.h>
  21. #include <scsi/scsi_host.h>
  22. #include <uapi/scsi/cxlflash_ioctl.h>
  23. #include "main.h"
  24. #include "sislite.h"
  25. #include "common.h"
  26. MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
  27. MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
  28. MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
  29. MODULE_LICENSE("GPL");
  30. /**
  31. * process_cmd_err() - command error handler
  32. * @cmd: AFU command that experienced the error.
  33. * @scp: SCSI command associated with the AFU command in error.
  34. *
  35. * Translates error bits from AFU command to SCSI command results.
  36. */
  37. static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
  38. {
  39. struct afu *afu = cmd->parent;
  40. struct cxlflash_cfg *cfg = afu->parent;
  41. struct device *dev = &cfg->dev->dev;
  42. struct sisl_ioarcb *ioarcb;
  43. struct sisl_ioasa *ioasa;
  44. u32 resid;
  45. if (unlikely(!cmd))
  46. return;
  47. ioarcb = &(cmd->rcb);
  48. ioasa = &(cmd->sa);
  49. if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
  50. resid = ioasa->resid;
  51. scsi_set_resid(scp, resid);
  52. dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
  53. __func__, cmd, scp, resid);
  54. }
  55. if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
  56. dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
  57. __func__, cmd, scp);
  58. scp->result = (DID_ERROR << 16);
  59. }
  60. dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
  61. "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
  62. ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
  63. ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
  64. if (ioasa->rc.scsi_rc) {
  65. /* We have a SCSI status */
  66. if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
  67. memcpy(scp->sense_buffer, ioasa->sense_data,
  68. SISL_SENSE_DATA_LEN);
  69. scp->result = ioasa->rc.scsi_rc;
  70. } else
  71. scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
  72. }
  73. /*
  74. * We encountered an error. Set scp->result based on nature
  75. * of error.
  76. */
  77. if (ioasa->rc.fc_rc) {
  78. /* We have an FC status */
  79. switch (ioasa->rc.fc_rc) {
  80. case SISL_FC_RC_LINKDOWN:
  81. scp->result = (DID_REQUEUE << 16);
  82. break;
  83. case SISL_FC_RC_RESID:
  84. /* This indicates an FCP resid underrun */
  85. if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
  86. /* If the SISL_RC_FLAGS_OVERRUN flag was set,
  87. * then we will handle this error else where.
  88. * If not then we must handle it here.
  89. * This is probably an AFU bug.
  90. */
  91. scp->result = (DID_ERROR << 16);
  92. }
  93. break;
  94. case SISL_FC_RC_RESIDERR:
  95. /* Resid mismatch between adapter and device */
  96. case SISL_FC_RC_TGTABORT:
  97. case SISL_FC_RC_ABORTOK:
  98. case SISL_FC_RC_ABORTFAIL:
  99. case SISL_FC_RC_NOLOGI:
  100. case SISL_FC_RC_ABORTPEND:
  101. case SISL_FC_RC_WRABORTPEND:
  102. case SISL_FC_RC_NOEXP:
  103. case SISL_FC_RC_INUSE:
  104. scp->result = (DID_ERROR << 16);
  105. break;
  106. }
  107. }
  108. if (ioasa->rc.afu_rc) {
  109. /* We have an AFU error */
  110. switch (ioasa->rc.afu_rc) {
  111. case SISL_AFU_RC_NO_CHANNELS:
  112. scp->result = (DID_NO_CONNECT << 16);
  113. break;
  114. case SISL_AFU_RC_DATA_DMA_ERR:
  115. switch (ioasa->afu_extra) {
  116. case SISL_AFU_DMA_ERR_PAGE_IN:
  117. /* Retry */
  118. scp->result = (DID_IMM_RETRY << 16);
  119. break;
  120. case SISL_AFU_DMA_ERR_INVALID_EA:
  121. default:
  122. scp->result = (DID_ERROR << 16);
  123. }
  124. break;
  125. case SISL_AFU_RC_OUT_OF_DATA_BUFS:
  126. /* Retry */
  127. scp->result = (DID_ALLOC_FAILURE << 16);
  128. break;
  129. default:
  130. scp->result = (DID_ERROR << 16);
  131. }
  132. }
  133. }
  134. /**
  135. * cmd_complete() - command completion handler
  136. * @cmd: AFU command that has completed.
  137. *
  138. * Prepares and submits command that has either completed or timed out to
  139. * the SCSI stack. Checks AFU command back into command pool for non-internal
  140. * (cmd->scp populated) commands.
  141. */
  142. static void cmd_complete(struct afu_cmd *cmd)
  143. {
  144. struct scsi_cmnd *scp;
  145. ulong lock_flags;
  146. struct afu *afu = cmd->parent;
  147. struct cxlflash_cfg *cfg = afu->parent;
  148. struct device *dev = &cfg->dev->dev;
  149. bool cmd_is_tmf;
  150. if (cmd->scp) {
  151. scp = cmd->scp;
  152. if (unlikely(cmd->sa.ioasc))
  153. process_cmd_err(cmd, scp);
  154. else
  155. scp->result = (DID_OK << 16);
  156. cmd_is_tmf = cmd->cmd_tmf;
  157. dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
  158. __func__, scp, scp->result, cmd->sa.ioasc);
  159. scsi_dma_unmap(scp);
  160. scp->scsi_done(scp);
  161. if (cmd_is_tmf) {
  162. spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
  163. cfg->tmf_active = false;
  164. wake_up_all_locked(&cfg->tmf_waitq);
  165. spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
  166. }
  167. } else
  168. complete(&cmd->cevent);
  169. }
  170. /**
  171. * context_reset() - reset command owner context via specified register
  172. * @cmd: AFU command that timed out.
  173. * @reset_reg: MMIO register to perform reset.
  174. */
  175. static void context_reset(struct afu_cmd *cmd, __be64 __iomem *reset_reg)
  176. {
  177. int nretry = 0;
  178. u64 rrin = 0x1;
  179. struct afu *afu = cmd->parent;
  180. struct cxlflash_cfg *cfg = afu->parent;
  181. struct device *dev = &cfg->dev->dev;
  182. dev_dbg(dev, "%s: cmd=%p\n", __func__, cmd);
  183. writeq_be(rrin, reset_reg);
  184. do {
  185. rrin = readq_be(reset_reg);
  186. if (rrin != 0x1)
  187. break;
  188. /* Double delay each time */
  189. udelay(1 << nretry);
  190. } while (nretry++ < MC_ROOM_RETRY_CNT);
  191. dev_dbg(dev, "%s: returning rrin=%016llx nretry=%d\n",
  192. __func__, rrin, nretry);
  193. }
  194. /**
  195. * context_reset_ioarrin() - reset command owner context via IOARRIN register
  196. * @cmd: AFU command that timed out.
  197. */
  198. static void context_reset_ioarrin(struct afu_cmd *cmd)
  199. {
  200. struct afu *afu = cmd->parent;
  201. context_reset(cmd, &afu->host_map->ioarrin);
  202. }
  203. /**
  204. * context_reset_sq() - reset command owner context w/ SQ Context Reset register
  205. * @cmd: AFU command that timed out.
  206. */
  207. static void context_reset_sq(struct afu_cmd *cmd)
  208. {
  209. struct afu *afu = cmd->parent;
  210. context_reset(cmd, &afu->host_map->sq_ctx_reset);
  211. }
  212. /**
  213. * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
  214. * @afu: AFU associated with the host.
  215. * @cmd: AFU command to send.
  216. *
  217. * Return:
  218. * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
  219. */
  220. static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
  221. {
  222. struct cxlflash_cfg *cfg = afu->parent;
  223. struct device *dev = &cfg->dev->dev;
  224. int rc = 0;
  225. s64 room;
  226. ulong lock_flags;
  227. /*
  228. * To avoid the performance penalty of MMIO, spread the update of
  229. * 'room' over multiple commands.
  230. */
  231. spin_lock_irqsave(&afu->rrin_slock, lock_flags);
  232. if (--afu->room < 0) {
  233. room = readq_be(&afu->host_map->cmd_room);
  234. if (room <= 0) {
  235. dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
  236. "0x%02X, room=0x%016llX\n",
  237. __func__, cmd->rcb.cdb[0], room);
  238. afu->room = 0;
  239. rc = SCSI_MLQUEUE_HOST_BUSY;
  240. goto out;
  241. }
  242. afu->room = room - 1;
  243. }
  244. writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
  245. out:
  246. spin_unlock_irqrestore(&afu->rrin_slock, lock_flags);
  247. dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__,
  248. cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
  249. return rc;
  250. }
  251. /**
  252. * send_cmd_sq() - sends an AFU command via SQ ring
  253. * @afu: AFU associated with the host.
  254. * @cmd: AFU command to send.
  255. *
  256. * Return:
  257. * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
  258. */
  259. static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
  260. {
  261. struct cxlflash_cfg *cfg = afu->parent;
  262. struct device *dev = &cfg->dev->dev;
  263. int rc = 0;
  264. int newval;
  265. ulong lock_flags;
  266. newval = atomic_dec_if_positive(&afu->hsq_credits);
  267. if (newval <= 0) {
  268. rc = SCSI_MLQUEUE_HOST_BUSY;
  269. goto out;
  270. }
  271. cmd->rcb.ioasa = &cmd->sa;
  272. spin_lock_irqsave(&afu->hsq_slock, lock_flags);
  273. *afu->hsq_curr = cmd->rcb;
  274. if (afu->hsq_curr < afu->hsq_end)
  275. afu->hsq_curr++;
  276. else
  277. afu->hsq_curr = afu->hsq_start;
  278. writeq_be((u64)afu->hsq_curr, &afu->host_map->sq_tail);
  279. spin_unlock_irqrestore(&afu->hsq_slock, lock_flags);
  280. out:
  281. dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
  282. "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
  283. cmd->rcb.data_ea, cmd->rcb.ioasa, rc, afu->hsq_curr,
  284. readq_be(&afu->host_map->sq_head),
  285. readq_be(&afu->host_map->sq_tail));
  286. return rc;
  287. }
  288. /**
  289. * wait_resp() - polls for a response or timeout to a sent AFU command
  290. * @afu: AFU associated with the host.
  291. * @cmd: AFU command that was sent.
  292. *
  293. * Return:
  294. * 0 on success, -1 on timeout/error
  295. */
  296. static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
  297. {
  298. struct cxlflash_cfg *cfg = afu->parent;
  299. struct device *dev = &cfg->dev->dev;
  300. int rc = 0;
  301. ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
  302. timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
  303. if (!timeout) {
  304. afu->context_reset(cmd);
  305. rc = -1;
  306. }
  307. if (unlikely(cmd->sa.ioasc != 0)) {
  308. dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
  309. __func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
  310. rc = -1;
  311. }
  312. return rc;
  313. }
  314. /**
  315. * send_tmf() - sends a Task Management Function (TMF)
  316. * @afu: AFU to checkout from.
  317. * @scp: SCSI command from stack.
  318. * @tmfcmd: TMF command to send.
  319. *
  320. * Return:
  321. * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
  322. */
  323. static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
  324. {
  325. u32 port_sel = scp->device->channel + 1;
  326. struct cxlflash_cfg *cfg = shost_priv(scp->device->host);
  327. struct afu_cmd *cmd = sc_to_afucz(scp);
  328. struct device *dev = &cfg->dev->dev;
  329. ulong lock_flags;
  330. int rc = 0;
  331. ulong to;
  332. /* When Task Management Function is active do not send another */
  333. spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
  334. if (cfg->tmf_active)
  335. wait_event_interruptible_lock_irq(cfg->tmf_waitq,
  336. !cfg->tmf_active,
  337. cfg->tmf_slock);
  338. cfg->tmf_active = true;
  339. spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
  340. cmd->scp = scp;
  341. cmd->parent = afu;
  342. cmd->cmd_tmf = true;
  343. cmd->rcb.ctx_id = afu->ctx_hndl;
  344. cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
  345. cmd->rcb.port_sel = port_sel;
  346. cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
  347. cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
  348. SISL_REQ_FLAGS_SUP_UNDERRUN |
  349. SISL_REQ_FLAGS_TMF_CMD);
  350. memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
  351. rc = afu->send_cmd(afu, cmd);
  352. if (unlikely(rc)) {
  353. spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
  354. cfg->tmf_active = false;
  355. spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
  356. goto out;
  357. }
  358. spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
  359. to = msecs_to_jiffies(5000);
  360. to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
  361. !cfg->tmf_active,
  362. cfg->tmf_slock,
  363. to);
  364. if (!to) {
  365. cfg->tmf_active = false;
  366. dev_err(dev, "%s: TMF timed out\n", __func__);
  367. rc = -1;
  368. }
  369. spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
  370. out:
  371. return rc;
  372. }
  373. /**
  374. * cxlflash_driver_info() - information handler for this host driver
  375. * @host: SCSI host associated with device.
  376. *
  377. * Return: A string describing the device.
  378. */
  379. static const char *cxlflash_driver_info(struct Scsi_Host *host)
  380. {
  381. return CXLFLASH_ADAPTER_NAME;
  382. }
  383. /**
  384. * cxlflash_queuecommand() - sends a mid-layer request
  385. * @host: SCSI host associated with device.
  386. * @scp: SCSI command to send.
  387. *
  388. * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
  389. */
  390. static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
  391. {
  392. struct cxlflash_cfg *cfg = shost_priv(host);
  393. struct afu *afu = cfg->afu;
  394. struct device *dev = &cfg->dev->dev;
  395. struct afu_cmd *cmd = sc_to_afucz(scp);
  396. struct scatterlist *sg = scsi_sglist(scp);
  397. u32 port_sel = scp->device->channel + 1;
  398. u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
  399. ulong lock_flags;
  400. int nseg = 0;
  401. int rc = 0;
  402. dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
  403. "cdb=(%08x-%08x-%08x-%08x)\n",
  404. __func__, scp, host->host_no, scp->device->channel,
  405. scp->device->id, scp->device->lun,
  406. get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
  407. get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
  408. get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
  409. get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
  410. /*
  411. * If a Task Management Function is active, wait for it to complete
  412. * before continuing with regular commands.
  413. */
  414. spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
  415. if (cfg->tmf_active) {
  416. spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
  417. rc = SCSI_MLQUEUE_HOST_BUSY;
  418. goto out;
  419. }
  420. spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
  421. switch (cfg->state) {
  422. case STATE_RESET:
  423. dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
  424. rc = SCSI_MLQUEUE_HOST_BUSY;
  425. goto out;
  426. case STATE_FAILTERM:
  427. dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
  428. scp->result = (DID_NO_CONNECT << 16);
  429. scp->scsi_done(scp);
  430. rc = 0;
  431. goto out;
  432. default:
  433. break;
  434. }
  435. if (likely(sg)) {
  436. nseg = scsi_dma_map(scp);
  437. if (unlikely(nseg < 0)) {
  438. dev_err(dev, "%s: Fail DMA map\n", __func__);
  439. rc = SCSI_MLQUEUE_HOST_BUSY;
  440. goto out;
  441. }
  442. cmd->rcb.data_len = sg_dma_len(sg);
  443. cmd->rcb.data_ea = sg_dma_address(sg);
  444. }
  445. cmd->scp = scp;
  446. cmd->parent = afu;
  447. cmd->rcb.ctx_id = afu->ctx_hndl;
  448. cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
  449. cmd->rcb.port_sel = port_sel;
  450. cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
  451. if (scp->sc_data_direction == DMA_TO_DEVICE)
  452. req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
  453. cmd->rcb.req_flags = req_flags;
  454. memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
  455. rc = afu->send_cmd(afu, cmd);
  456. if (unlikely(rc))
  457. scsi_dma_unmap(scp);
  458. out:
  459. return rc;
  460. }
  461. /**
  462. * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
  463. * @cfg: Internal structure associated with the host.
  464. */
  465. static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
  466. {
  467. struct pci_dev *pdev = cfg->dev;
  468. if (pci_channel_offline(pdev))
  469. wait_event_timeout(cfg->reset_waitq,
  470. !pci_channel_offline(pdev),
  471. CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
  472. }
  473. /**
  474. * free_mem() - free memory associated with the AFU
  475. * @cfg: Internal structure associated with the host.
  476. */
  477. static void free_mem(struct cxlflash_cfg *cfg)
  478. {
  479. struct afu *afu = cfg->afu;
  480. if (cfg->afu) {
  481. free_pages((ulong)afu, get_order(sizeof(struct afu)));
  482. cfg->afu = NULL;
  483. }
  484. }
  485. /**
  486. * stop_afu() - stops the AFU command timers and unmaps the MMIO space
  487. * @cfg: Internal structure associated with the host.
  488. *
  489. * Safe to call with AFU in a partially allocated/initialized state.
  490. *
  491. * Cancels scheduled worker threads, waits for any active internal AFU
  492. * commands to timeout and then unmaps the MMIO space.
  493. */
  494. static void stop_afu(struct cxlflash_cfg *cfg)
  495. {
  496. struct afu *afu = cfg->afu;
  497. cancel_work_sync(&cfg->work_q);
  498. if (likely(afu)) {
  499. while (atomic_read(&afu->cmds_active))
  500. ssleep(1);
  501. if (likely(afu->afu_map)) {
  502. cxl_psa_unmap((void __iomem *)afu->afu_map);
  503. afu->afu_map = NULL;
  504. }
  505. }
  506. }
  507. /**
  508. * term_intr() - disables all AFU interrupts
  509. * @cfg: Internal structure associated with the host.
  510. * @level: Depth of allocation, where to begin waterfall tear down.
  511. *
  512. * Safe to call with AFU/MC in partially allocated/initialized state.
  513. */
  514. static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level)
  515. {
  516. struct afu *afu = cfg->afu;
  517. struct device *dev = &cfg->dev->dev;
  518. if (!afu || !cfg->mcctx) {
  519. dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
  520. return;
  521. }
  522. switch (level) {
  523. case UNMAP_THREE:
  524. cxl_unmap_afu_irq(cfg->mcctx, 3, afu);
  525. case UNMAP_TWO:
  526. cxl_unmap_afu_irq(cfg->mcctx, 2, afu);
  527. case UNMAP_ONE:
  528. cxl_unmap_afu_irq(cfg->mcctx, 1, afu);
  529. case FREE_IRQ:
  530. cxl_free_afu_irqs(cfg->mcctx);
  531. /* fall through */
  532. case UNDO_NOOP:
  533. /* No action required */
  534. break;
  535. }
  536. }
  537. /**
  538. * term_mc() - terminates the master context
  539. * @cfg: Internal structure associated with the host.
  540. * @level: Depth of allocation, where to begin waterfall tear down.
  541. *
  542. * Safe to call with AFU/MC in partially allocated/initialized state.
  543. */
  544. static void term_mc(struct cxlflash_cfg *cfg)
  545. {
  546. int rc = 0;
  547. struct afu *afu = cfg->afu;
  548. struct device *dev = &cfg->dev->dev;
  549. if (!afu || !cfg->mcctx) {
  550. dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
  551. return;
  552. }
  553. rc = cxl_stop_context(cfg->mcctx);
  554. WARN_ON(rc);
  555. cfg->mcctx = NULL;
  556. }
  557. /**
  558. * term_afu() - terminates the AFU
  559. * @cfg: Internal structure associated with the host.
  560. *
  561. * Safe to call with AFU/MC in partially allocated/initialized state.
  562. */
  563. static void term_afu(struct cxlflash_cfg *cfg)
  564. {
  565. struct device *dev = &cfg->dev->dev;
  566. /*
  567. * Tear down is carefully orchestrated to ensure
  568. * no interrupts can come in when the problem state
  569. * area is unmapped.
  570. *
  571. * 1) Disable all AFU interrupts
  572. * 2) Unmap the problem state area
  573. * 3) Stop the master context
  574. */
  575. term_intr(cfg, UNMAP_THREE);
  576. if (cfg->afu)
  577. stop_afu(cfg);
  578. term_mc(cfg);
  579. dev_dbg(dev, "%s: returning\n", __func__);
  580. }
  581. /**
  582. * notify_shutdown() - notifies device of pending shutdown
  583. * @cfg: Internal structure associated with the host.
  584. * @wait: Whether to wait for shutdown processing to complete.
  585. *
  586. * This function will notify the AFU that the adapter is being shutdown
  587. * and will wait for shutdown processing to complete if wait is true.
  588. * This notification should flush pending I/Os to the device and halt
  589. * further I/Os until the next AFU reset is issued and device restarted.
  590. */
  591. static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
  592. {
  593. struct afu *afu = cfg->afu;
  594. struct device *dev = &cfg->dev->dev;
  595. struct sisl_global_map __iomem *global;
  596. struct dev_dependent_vals *ddv;
  597. u64 reg, status;
  598. int i, retry_cnt = 0;
  599. ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
  600. if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
  601. return;
  602. if (!afu || !afu->afu_map) {
  603. dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
  604. return;
  605. }
  606. global = &afu->afu_map->global;
  607. /* Notify AFU */
  608. for (i = 0; i < NUM_FC_PORTS; i++) {
  609. reg = readq_be(&global->fc_regs[i][FC_CONFIG2 / 8]);
  610. reg |= SISL_FC_SHUTDOWN_NORMAL;
  611. writeq_be(reg, &global->fc_regs[i][FC_CONFIG2 / 8]);
  612. }
  613. if (!wait)
  614. return;
  615. /* Wait up to 1.5 seconds for shutdown processing to complete */
  616. for (i = 0; i < NUM_FC_PORTS; i++) {
  617. retry_cnt = 0;
  618. while (true) {
  619. status = readq_be(&global->fc_regs[i][FC_STATUS / 8]);
  620. if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
  621. break;
  622. if (++retry_cnt >= MC_RETRY_CNT) {
  623. dev_dbg(dev, "%s: port %d shutdown processing "
  624. "not yet completed\n", __func__, i);
  625. break;
  626. }
  627. msleep(100 * retry_cnt);
  628. }
  629. }
  630. }
  631. /**
  632. * cxlflash_remove() - PCI entry point to tear down host
  633. * @pdev: PCI device associated with the host.
  634. *
  635. * Safe to use as a cleanup in partially allocated/initialized state.
  636. */
  637. static void cxlflash_remove(struct pci_dev *pdev)
  638. {
  639. struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
  640. struct device *dev = &pdev->dev;
  641. ulong lock_flags;
  642. if (!pci_is_enabled(pdev)) {
  643. dev_dbg(dev, "%s: Device is disabled\n", __func__);
  644. return;
  645. }
  646. /* If a Task Management Function is active, wait for it to complete
  647. * before continuing with remove.
  648. */
  649. spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
  650. if (cfg->tmf_active)
  651. wait_event_interruptible_lock_irq(cfg->tmf_waitq,
  652. !cfg->tmf_active,
  653. cfg->tmf_slock);
  654. spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
  655. /* Notify AFU and wait for shutdown processing to complete */
  656. notify_shutdown(cfg, true);
  657. cfg->state = STATE_FAILTERM;
  658. cxlflash_stop_term_user_contexts(cfg);
  659. switch (cfg->init_state) {
  660. case INIT_STATE_SCSI:
  661. cxlflash_term_local_luns(cfg);
  662. scsi_remove_host(cfg->host);
  663. /* fall through */
  664. case INIT_STATE_AFU:
  665. term_afu(cfg);
  666. case INIT_STATE_PCI:
  667. pci_disable_device(pdev);
  668. case INIT_STATE_NONE:
  669. free_mem(cfg);
  670. scsi_host_put(cfg->host);
  671. break;
  672. }
  673. dev_dbg(dev, "%s: returning\n", __func__);
  674. }
  675. /**
  676. * alloc_mem() - allocates the AFU and its command pool
  677. * @cfg: Internal structure associated with the host.
  678. *
  679. * A partially allocated state remains on failure.
  680. *
  681. * Return:
  682. * 0 on success
  683. * -ENOMEM on failure to allocate memory
  684. */
  685. static int alloc_mem(struct cxlflash_cfg *cfg)
  686. {
  687. int rc = 0;
  688. struct device *dev = &cfg->dev->dev;
  689. /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
  690. cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
  691. get_order(sizeof(struct afu)));
  692. if (unlikely(!cfg->afu)) {
  693. dev_err(dev, "%s: cannot get %d free pages\n",
  694. __func__, get_order(sizeof(struct afu)));
  695. rc = -ENOMEM;
  696. goto out;
  697. }
  698. cfg->afu->parent = cfg;
  699. cfg->afu->afu_map = NULL;
  700. out:
  701. return rc;
  702. }
  703. /**
  704. * init_pci() - initializes the host as a PCI device
  705. * @cfg: Internal structure associated with the host.
  706. *
  707. * Return: 0 on success, -errno on failure
  708. */
  709. static int init_pci(struct cxlflash_cfg *cfg)
  710. {
  711. struct pci_dev *pdev = cfg->dev;
  712. struct device *dev = &cfg->dev->dev;
  713. int rc = 0;
  714. rc = pci_enable_device(pdev);
  715. if (rc || pci_channel_offline(pdev)) {
  716. if (pci_channel_offline(pdev)) {
  717. cxlflash_wait_for_pci_err_recovery(cfg);
  718. rc = pci_enable_device(pdev);
  719. }
  720. if (rc) {
  721. dev_err(dev, "%s: Cannot enable adapter\n", __func__);
  722. cxlflash_wait_for_pci_err_recovery(cfg);
  723. goto out;
  724. }
  725. }
  726. out:
  727. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  728. return rc;
  729. }
  730. /**
  731. * init_scsi() - adds the host to the SCSI stack and kicks off host scan
  732. * @cfg: Internal structure associated with the host.
  733. *
  734. * Return: 0 on success, -errno on failure
  735. */
  736. static int init_scsi(struct cxlflash_cfg *cfg)
  737. {
  738. struct pci_dev *pdev = cfg->dev;
  739. struct device *dev = &cfg->dev->dev;
  740. int rc = 0;
  741. rc = scsi_add_host(cfg->host, &pdev->dev);
  742. if (rc) {
  743. dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
  744. goto out;
  745. }
  746. scsi_scan_host(cfg->host);
  747. out:
  748. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  749. return rc;
  750. }
  751. /**
  752. * set_port_online() - transitions the specified host FC port to online state
  753. * @fc_regs: Top of MMIO region defined for specified port.
  754. *
  755. * The provided MMIO region must be mapped prior to call. Online state means
  756. * that the FC link layer has synced, completed the handshaking process, and
  757. * is ready for login to start.
  758. */
  759. static void set_port_online(__be64 __iomem *fc_regs)
  760. {
  761. u64 cmdcfg;
  762. cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
  763. cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */
  764. cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */
  765. writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
  766. }
  767. /**
  768. * set_port_offline() - transitions the specified host FC port to offline state
  769. * @fc_regs: Top of MMIO region defined for specified port.
  770. *
  771. * The provided MMIO region must be mapped prior to call.
  772. */
  773. static void set_port_offline(__be64 __iomem *fc_regs)
  774. {
  775. u64 cmdcfg;
  776. cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
  777. cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */
  778. cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */
  779. writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
  780. }
  781. /**
  782. * wait_port_online() - waits for the specified host FC port come online
  783. * @fc_regs: Top of MMIO region defined for specified port.
  784. * @delay_us: Number of microseconds to delay between reading port status.
  785. * @nretry: Number of cycles to retry reading port status.
  786. *
  787. * The provided MMIO region must be mapped prior to call. This will timeout
  788. * when the cable is not plugged in.
  789. *
  790. * Return:
  791. * TRUE (1) when the specified port is online
  792. * FALSE (0) when the specified port fails to come online after timeout
  793. */
  794. static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
  795. {
  796. u64 status;
  797. WARN_ON(delay_us < 1000);
  798. do {
  799. msleep(delay_us / 1000);
  800. status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
  801. if (status == U64_MAX)
  802. nretry /= 2;
  803. } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
  804. nretry--);
  805. return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
  806. }
  807. /**
  808. * wait_port_offline() - waits for the specified host FC port go offline
  809. * @fc_regs: Top of MMIO region defined for specified port.
  810. * @delay_us: Number of microseconds to delay between reading port status.
  811. * @nretry: Number of cycles to retry reading port status.
  812. *
  813. * The provided MMIO region must be mapped prior to call.
  814. *
  815. * Return:
  816. * TRUE (1) when the specified port is offline
  817. * FALSE (0) when the specified port fails to go offline after timeout
  818. */
  819. static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
  820. {
  821. u64 status;
  822. WARN_ON(delay_us < 1000);
  823. do {
  824. msleep(delay_us / 1000);
  825. status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
  826. if (status == U64_MAX)
  827. nretry /= 2;
  828. } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
  829. nretry--);
  830. return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
  831. }
  832. /**
  833. * afu_set_wwpn() - configures the WWPN for the specified host FC port
  834. * @afu: AFU associated with the host that owns the specified FC port.
  835. * @port: Port number being configured.
  836. * @fc_regs: Top of MMIO region defined for specified port.
  837. * @wwpn: The world-wide-port-number previously discovered for port.
  838. *
  839. * The provided MMIO region must be mapped prior to call. As part of the
  840. * sequence to configure the WWPN, the port is toggled offline and then back
  841. * online. This toggling action can cause this routine to delay up to a few
  842. * seconds. When configured to use the internal LUN feature of the AFU, a
  843. * failure to come online is overridden.
  844. */
  845. static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
  846. u64 wwpn)
  847. {
  848. struct cxlflash_cfg *cfg = afu->parent;
  849. struct device *dev = &cfg->dev->dev;
  850. set_port_offline(fc_regs);
  851. if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
  852. FC_PORT_STATUS_RETRY_CNT)) {
  853. dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
  854. __func__, port);
  855. }
  856. writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
  857. set_port_online(fc_regs);
  858. if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
  859. FC_PORT_STATUS_RETRY_CNT)) {
  860. dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
  861. __func__, port);
  862. }
  863. }
  864. /**
  865. * afu_link_reset() - resets the specified host FC port
  866. * @afu: AFU associated with the host that owns the specified FC port.
  867. * @port: Port number being configured.
  868. * @fc_regs: Top of MMIO region defined for specified port.
  869. *
  870. * The provided MMIO region must be mapped prior to call. The sequence to
  871. * reset the port involves toggling it offline and then back online. This
  872. * action can cause this routine to delay up to a few seconds. An effort
  873. * is made to maintain link with the device by switching to host to use
  874. * the alternate port exclusively while the reset takes place.
  875. * failure to come online is overridden.
  876. */
  877. static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
  878. {
  879. struct cxlflash_cfg *cfg = afu->parent;
  880. struct device *dev = &cfg->dev->dev;
  881. u64 port_sel;
  882. /* first switch the AFU to the other links, if any */
  883. port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
  884. port_sel &= ~(1ULL << port);
  885. writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
  886. cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
  887. set_port_offline(fc_regs);
  888. if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
  889. FC_PORT_STATUS_RETRY_CNT))
  890. dev_err(dev, "%s: wait on port %d to go offline timed out\n",
  891. __func__, port);
  892. set_port_online(fc_regs);
  893. if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
  894. FC_PORT_STATUS_RETRY_CNT))
  895. dev_err(dev, "%s: wait on port %d to go online timed out\n",
  896. __func__, port);
  897. /* switch back to include this port */
  898. port_sel |= (1ULL << port);
  899. writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
  900. cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
  901. dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
  902. }
  903. /*
  904. * Asynchronous interrupt information table
  905. */
  906. static const struct asyc_intr_info ainfo[] = {
  907. {SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET},
  908. {SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0},
  909. {SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET},
  910. {SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, LINK_RESET},
  911. {SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
  912. {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST},
  913. {SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
  914. {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0},
  915. {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
  916. {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
  917. {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
  918. {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, LINK_RESET},
  919. {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
  920. {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST},
  921. {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
  922. {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0},
  923. {0x0, "", 0, 0} /* terminator */
  924. };
  925. /**
  926. * find_ainfo() - locates and returns asynchronous interrupt information
  927. * @status: Status code set by AFU on error.
  928. *
  929. * Return: The located information or NULL when the status code is invalid.
  930. */
  931. static const struct asyc_intr_info *find_ainfo(u64 status)
  932. {
  933. const struct asyc_intr_info *info;
  934. for (info = &ainfo[0]; info->status; info++)
  935. if (info->status == status)
  936. return info;
  937. return NULL;
  938. }
  939. /**
  940. * afu_err_intr_init() - clears and initializes the AFU for error interrupts
  941. * @afu: AFU associated with the host.
  942. */
  943. static void afu_err_intr_init(struct afu *afu)
  944. {
  945. int i;
  946. u64 reg;
  947. /* global async interrupts: AFU clears afu_ctrl on context exit
  948. * if async interrupts were sent to that context. This prevents
  949. * the AFU form sending further async interrupts when
  950. * there is
  951. * nobody to receive them.
  952. */
  953. /* mask all */
  954. writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
  955. /* set LISN# to send and point to master context */
  956. reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
  957. if (afu->internal_lun)
  958. reg |= 1; /* Bit 63 indicates local lun */
  959. writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
  960. /* clear all */
  961. writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
  962. /* unmask bits that are of interest */
  963. /* note: afu can send an interrupt after this step */
  964. writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
  965. /* clear again in case a bit came on after previous clear but before */
  966. /* unmask */
  967. writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
  968. /* Clear/Set internal lun bits */
  969. reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
  970. reg &= SISL_FC_INTERNAL_MASK;
  971. if (afu->internal_lun)
  972. reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
  973. writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
  974. /* now clear FC errors */
  975. for (i = 0; i < NUM_FC_PORTS; i++) {
  976. writeq_be(0xFFFFFFFFU,
  977. &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]);
  978. writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]);
  979. }
  980. /* sync interrupts for master's IOARRIN write */
  981. /* note that unlike asyncs, there can be no pending sync interrupts */
  982. /* at this time (this is a fresh context and master has not written */
  983. /* IOARRIN yet), so there is nothing to clear. */
  984. /* set LISN#, it is always sent to the context that wrote IOARRIN */
  985. writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl);
  986. writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask);
  987. }
  988. /**
  989. * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
  990. * @irq: Interrupt number.
  991. * @data: Private data provided at interrupt registration, the AFU.
  992. *
  993. * Return: Always return IRQ_HANDLED.
  994. */
  995. static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
  996. {
  997. struct afu *afu = (struct afu *)data;
  998. struct cxlflash_cfg *cfg = afu->parent;
  999. struct device *dev = &cfg->dev->dev;
  1000. u64 reg;
  1001. u64 reg_unmasked;
  1002. reg = readq_be(&afu->host_map->intr_status);
  1003. reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
  1004. if (reg_unmasked == 0UL) {
  1005. dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
  1006. __func__, reg);
  1007. goto cxlflash_sync_err_irq_exit;
  1008. }
  1009. dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
  1010. __func__, reg);
  1011. writeq_be(reg_unmasked, &afu->host_map->intr_clear);
  1012. cxlflash_sync_err_irq_exit:
  1013. return IRQ_HANDLED;
  1014. }
  1015. /**
  1016. * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
  1017. * @irq: Interrupt number.
  1018. * @data: Private data provided at interrupt registration, the AFU.
  1019. *
  1020. * Return: Always return IRQ_HANDLED.
  1021. */
  1022. static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
  1023. {
  1024. struct afu *afu = (struct afu *)data;
  1025. struct afu_cmd *cmd;
  1026. struct sisl_ioasa *ioasa;
  1027. struct sisl_ioarcb *ioarcb;
  1028. bool toggle = afu->toggle;
  1029. u64 entry,
  1030. *hrrq_start = afu->hrrq_start,
  1031. *hrrq_end = afu->hrrq_end,
  1032. *hrrq_curr = afu->hrrq_curr;
  1033. /* Process however many RRQ entries that are ready */
  1034. while (true) {
  1035. entry = *hrrq_curr;
  1036. if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
  1037. break;
  1038. entry &= ~SISL_RESP_HANDLE_T_BIT;
  1039. if (afu_is_sq_cmd_mode(afu)) {
  1040. ioasa = (struct sisl_ioasa *)entry;
  1041. cmd = container_of(ioasa, struct afu_cmd, sa);
  1042. } else {
  1043. ioarcb = (struct sisl_ioarcb *)entry;
  1044. cmd = container_of(ioarcb, struct afu_cmd, rcb);
  1045. }
  1046. cmd_complete(cmd);
  1047. /* Advance to next entry or wrap and flip the toggle bit */
  1048. if (hrrq_curr < hrrq_end)
  1049. hrrq_curr++;
  1050. else {
  1051. hrrq_curr = hrrq_start;
  1052. toggle ^= SISL_RESP_HANDLE_T_BIT;
  1053. }
  1054. atomic_inc(&afu->hsq_credits);
  1055. }
  1056. afu->hrrq_curr = hrrq_curr;
  1057. afu->toggle = toggle;
  1058. return IRQ_HANDLED;
  1059. }
  1060. /**
  1061. * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
  1062. * @irq: Interrupt number.
  1063. * @data: Private data provided at interrupt registration, the AFU.
  1064. *
  1065. * Return: Always return IRQ_HANDLED.
  1066. */
  1067. static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
  1068. {
  1069. struct afu *afu = (struct afu *)data;
  1070. struct cxlflash_cfg *cfg = afu->parent;
  1071. struct device *dev = &cfg->dev->dev;
  1072. u64 reg_unmasked;
  1073. const struct asyc_intr_info *info;
  1074. struct sisl_global_map __iomem *global = &afu->afu_map->global;
  1075. u64 reg;
  1076. u8 port;
  1077. int i;
  1078. reg = readq_be(&global->regs.aintr_status);
  1079. reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
  1080. if (reg_unmasked == 0) {
  1081. dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
  1082. __func__, reg);
  1083. goto out;
  1084. }
  1085. /* FYI, it is 'okay' to clear AFU status before FC_ERROR */
  1086. writeq_be(reg_unmasked, &global->regs.aintr_clear);
  1087. /* Check each bit that is on */
  1088. for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) {
  1089. info = find_ainfo(1ULL << i);
  1090. if (((reg_unmasked & 0x1) == 0) || !info)
  1091. continue;
  1092. port = info->port;
  1093. dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
  1094. __func__, port, info->desc,
  1095. readq_be(&global->fc_regs[port][FC_STATUS / 8]));
  1096. /*
  1097. * Do link reset first, some OTHER errors will set FC_ERROR
  1098. * again if cleared before or w/o a reset
  1099. */
  1100. if (info->action & LINK_RESET) {
  1101. dev_err(dev, "%s: FC Port %d: resetting link\n",
  1102. __func__, port);
  1103. cfg->lr_state = LINK_RESET_REQUIRED;
  1104. cfg->lr_port = port;
  1105. schedule_work(&cfg->work_q);
  1106. }
  1107. if (info->action & CLR_FC_ERROR) {
  1108. reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]);
  1109. /*
  1110. * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
  1111. * should be the same and tracing one is sufficient.
  1112. */
  1113. dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
  1114. __func__, port, reg);
  1115. writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]);
  1116. writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]);
  1117. }
  1118. if (info->action & SCAN_HOST) {
  1119. atomic_inc(&cfg->scan_host_needed);
  1120. schedule_work(&cfg->work_q);
  1121. }
  1122. }
  1123. out:
  1124. return IRQ_HANDLED;
  1125. }
  1126. /**
  1127. * start_context() - starts the master context
  1128. * @cfg: Internal structure associated with the host.
  1129. *
  1130. * Return: A success or failure value from CXL services.
  1131. */
  1132. static int start_context(struct cxlflash_cfg *cfg)
  1133. {
  1134. struct device *dev = &cfg->dev->dev;
  1135. int rc = 0;
  1136. rc = cxl_start_context(cfg->mcctx,
  1137. cfg->afu->work.work_element_descriptor,
  1138. NULL);
  1139. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  1140. return rc;
  1141. }
  1142. /**
  1143. * read_vpd() - obtains the WWPNs from VPD
  1144. * @cfg: Internal structure associated with the host.
  1145. * @wwpn: Array of size NUM_FC_PORTS to pass back WWPNs
  1146. *
  1147. * Return: 0 on success, -errno on failure
  1148. */
  1149. static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
  1150. {
  1151. struct device *dev = &cfg->dev->dev;
  1152. struct pci_dev *pdev = cfg->dev;
  1153. int rc = 0;
  1154. int ro_start, ro_size, i, j, k;
  1155. ssize_t vpd_size;
  1156. char vpd_data[CXLFLASH_VPD_LEN];
  1157. char tmp_buf[WWPN_BUF_LEN] = { 0 };
  1158. char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" };
  1159. /* Get the VPD data from the device */
  1160. vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
  1161. if (unlikely(vpd_size <= 0)) {
  1162. dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
  1163. __func__, vpd_size);
  1164. rc = -ENODEV;
  1165. goto out;
  1166. }
  1167. /* Get the read only section offset */
  1168. ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
  1169. PCI_VPD_LRDT_RO_DATA);
  1170. if (unlikely(ro_start < 0)) {
  1171. dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
  1172. rc = -ENODEV;
  1173. goto out;
  1174. }
  1175. /* Get the read only section size, cap when extends beyond read VPD */
  1176. ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
  1177. j = ro_size;
  1178. i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
  1179. if (unlikely((i + j) > vpd_size)) {
  1180. dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n",
  1181. __func__, (i + j), vpd_size);
  1182. ro_size = vpd_size - i;
  1183. }
  1184. /*
  1185. * Find the offset of the WWPN tag within the read only
  1186. * VPD data and validate the found field (partials are
  1187. * no good to us). Convert the ASCII data to an integer
  1188. * value. Note that we must copy to a temporary buffer
  1189. * because the conversion service requires that the ASCII
  1190. * string be terminated.
  1191. */
  1192. for (k = 0; k < NUM_FC_PORTS; k++) {
  1193. j = ro_size;
  1194. i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
  1195. i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
  1196. if (unlikely(i < 0)) {
  1197. dev_err(dev, "%s: Port %d WWPN not found in VPD\n",
  1198. __func__, k);
  1199. rc = -ENODEV;
  1200. goto out;
  1201. }
  1202. j = pci_vpd_info_field_size(&vpd_data[i]);
  1203. i += PCI_VPD_INFO_FLD_HDR_SIZE;
  1204. if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
  1205. dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
  1206. __func__, k);
  1207. rc = -ENODEV;
  1208. goto out;
  1209. }
  1210. memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
  1211. rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
  1212. if (unlikely(rc)) {
  1213. dev_err(dev, "%s: WWPN conversion failed for port %d\n",
  1214. __func__, k);
  1215. rc = -ENODEV;
  1216. goto out;
  1217. }
  1218. }
  1219. out:
  1220. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  1221. return rc;
  1222. }
  1223. /**
  1224. * init_pcr() - initialize the provisioning and control registers
  1225. * @cfg: Internal structure associated with the host.
  1226. *
  1227. * Also sets up fast access to the mapped registers and initializes AFU
  1228. * command fields that never change.
  1229. */
  1230. static void init_pcr(struct cxlflash_cfg *cfg)
  1231. {
  1232. struct afu *afu = cfg->afu;
  1233. struct sisl_ctrl_map __iomem *ctrl_map;
  1234. int i;
  1235. for (i = 0; i < MAX_CONTEXT; i++) {
  1236. ctrl_map = &afu->afu_map->ctrls[i].ctrl;
  1237. /* Disrupt any clients that could be running */
  1238. /* e.g. clients that survived a master restart */
  1239. writeq_be(0, &ctrl_map->rht_start);
  1240. writeq_be(0, &ctrl_map->rht_cnt_id);
  1241. writeq_be(0, &ctrl_map->ctx_cap);
  1242. }
  1243. /* Copy frequently used fields into afu */
  1244. afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx);
  1245. afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host;
  1246. afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl;
  1247. /* Program the Endian Control for the master context */
  1248. writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
  1249. }
  1250. /**
  1251. * init_global() - initialize AFU global registers
  1252. * @cfg: Internal structure associated with the host.
  1253. */
  1254. static int init_global(struct cxlflash_cfg *cfg)
  1255. {
  1256. struct afu *afu = cfg->afu;
  1257. struct device *dev = &cfg->dev->dev;
  1258. u64 wwpn[NUM_FC_PORTS]; /* wwpn of AFU ports */
  1259. int i = 0, num_ports = 0;
  1260. int rc = 0;
  1261. u64 reg;
  1262. rc = read_vpd(cfg, &wwpn[0]);
  1263. if (rc) {
  1264. dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
  1265. goto out;
  1266. }
  1267. dev_dbg(dev, "%s: wwpn0=%016llx wwpn1=%016llx\n",
  1268. __func__, wwpn[0], wwpn[1]);
  1269. /* Set up RRQ and SQ in AFU for master issued cmds */
  1270. writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
  1271. writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);
  1272. if (afu_is_sq_cmd_mode(afu)) {
  1273. writeq_be((u64)afu->hsq_start, &afu->host_map->sq_start);
  1274. writeq_be((u64)afu->hsq_end, &afu->host_map->sq_end);
  1275. }
  1276. /* AFU configuration */
  1277. reg = readq_be(&afu->afu_map->global.regs.afu_config);
  1278. reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
  1279. /* enable all auto retry options and control endianness */
  1280. /* leave others at default: */
  1281. /* CTX_CAP write protected, mbox_r does not clear on read and */
  1282. /* checker on if dual afu */
  1283. writeq_be(reg, &afu->afu_map->global.regs.afu_config);
  1284. /* Global port select: select either port */
  1285. if (afu->internal_lun) {
  1286. /* Only use port 0 */
  1287. writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
  1288. num_ports = NUM_FC_PORTS - 1;
  1289. } else {
  1290. writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel);
  1291. num_ports = NUM_FC_PORTS;
  1292. }
  1293. for (i = 0; i < num_ports; i++) {
  1294. /* Unmask all errors (but they are still masked at AFU) */
  1295. writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]);
  1296. /* Clear CRC error cnt & set a threshold */
  1297. (void)readq_be(&afu->afu_map->global.
  1298. fc_regs[i][FC_CNT_CRCERR / 8]);
  1299. writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i]
  1300. [FC_CRC_THRESH / 8]);
  1301. /* Set WWPNs. If already programmed, wwpn[i] is 0 */
  1302. if (wwpn[i] != 0)
  1303. afu_set_wwpn(afu, i,
  1304. &afu->afu_map->global.fc_regs[i][0],
  1305. wwpn[i]);
  1306. /* Programming WWPN back to back causes additional
  1307. * offline/online transitions and a PLOGI
  1308. */
  1309. msleep(100);
  1310. }
  1311. /* Set up master's own CTX_CAP to allow real mode, host translation */
  1312. /* tables, afu cmds and read/write GSCSI cmds. */
  1313. /* First, unlock ctx_cap write by reading mbox */
  1314. (void)readq_be(&afu->ctrl_map->mbox_r); /* unlock ctx_cap */
  1315. writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
  1316. SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
  1317. SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
  1318. &afu->ctrl_map->ctx_cap);
  1319. /* Initialize heartbeat */
  1320. afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
  1321. out:
  1322. return rc;
  1323. }
  1324. /**
  1325. * start_afu() - initializes and starts the AFU
  1326. * @cfg: Internal structure associated with the host.
  1327. */
  1328. static int start_afu(struct cxlflash_cfg *cfg)
  1329. {
  1330. struct afu *afu = cfg->afu;
  1331. struct device *dev = &cfg->dev->dev;
  1332. int rc = 0;
  1333. init_pcr(cfg);
  1334. /* After an AFU reset, RRQ entries are stale, clear them */
  1335. memset(&afu->rrq_entry, 0, sizeof(afu->rrq_entry));
  1336. /* Initialize RRQ pointers */
  1337. afu->hrrq_start = &afu->rrq_entry[0];
  1338. afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1];
  1339. afu->hrrq_curr = afu->hrrq_start;
  1340. afu->toggle = 1;
  1341. /* Initialize SQ */
  1342. if (afu_is_sq_cmd_mode(afu)) {
  1343. memset(&afu->sq, 0, sizeof(afu->sq));
  1344. afu->hsq_start = &afu->sq[0];
  1345. afu->hsq_end = &afu->sq[NUM_SQ_ENTRY - 1];
  1346. afu->hsq_curr = afu->hsq_start;
  1347. spin_lock_init(&afu->hsq_slock);
  1348. atomic_set(&afu->hsq_credits, NUM_SQ_ENTRY - 1);
  1349. }
  1350. rc = init_global(cfg);
  1351. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  1352. return rc;
  1353. }
  1354. /**
  1355. * init_intr() - setup interrupt handlers for the master context
  1356. * @cfg: Internal structure associated with the host.
  1357. *
  1358. * Return: 0 on success, -errno on failure
  1359. */
  1360. static enum undo_level init_intr(struct cxlflash_cfg *cfg,
  1361. struct cxl_context *ctx)
  1362. {
  1363. struct afu *afu = cfg->afu;
  1364. struct device *dev = &cfg->dev->dev;
  1365. int rc = 0;
  1366. enum undo_level level = UNDO_NOOP;
  1367. rc = cxl_allocate_afu_irqs(ctx, 3);
  1368. if (unlikely(rc)) {
  1369. dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
  1370. __func__, rc);
  1371. level = UNDO_NOOP;
  1372. goto out;
  1373. }
  1374. rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu,
  1375. "SISL_MSI_SYNC_ERROR");
  1376. if (unlikely(rc <= 0)) {
  1377. dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
  1378. level = FREE_IRQ;
  1379. goto out;
  1380. }
  1381. rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu,
  1382. "SISL_MSI_RRQ_UPDATED");
  1383. if (unlikely(rc <= 0)) {
  1384. dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
  1385. level = UNMAP_ONE;
  1386. goto out;
  1387. }
  1388. rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu,
  1389. "SISL_MSI_ASYNC_ERROR");
  1390. if (unlikely(rc <= 0)) {
  1391. dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
  1392. level = UNMAP_TWO;
  1393. goto out;
  1394. }
  1395. out:
  1396. return level;
  1397. }
  1398. /**
  1399. * init_mc() - create and register as the master context
  1400. * @cfg: Internal structure associated with the host.
  1401. *
  1402. * Return: 0 on success, -errno on failure
  1403. */
  1404. static int init_mc(struct cxlflash_cfg *cfg)
  1405. {
  1406. struct cxl_context *ctx;
  1407. struct device *dev = &cfg->dev->dev;
  1408. int rc = 0;
  1409. enum undo_level level;
  1410. ctx = cxl_get_context(cfg->dev);
  1411. if (unlikely(!ctx)) {
  1412. rc = -ENOMEM;
  1413. goto ret;
  1414. }
  1415. cfg->mcctx = ctx;
  1416. /* Set it up as a master with the CXL */
  1417. cxl_set_master(ctx);
  1418. /* During initialization reset the AFU to start from a clean slate */
  1419. rc = cxl_afu_reset(cfg->mcctx);
  1420. if (unlikely(rc)) {
  1421. dev_err(dev, "%s: AFU reset failed rc=%d\n", __func__, rc);
  1422. goto ret;
  1423. }
  1424. level = init_intr(cfg, ctx);
  1425. if (unlikely(level)) {
  1426. dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
  1427. goto out;
  1428. }
  1429. /* This performs the equivalent of the CXL_IOCTL_START_WORK.
  1430. * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
  1431. * element (pe) that is embedded in the context (ctx)
  1432. */
  1433. rc = start_context(cfg);
  1434. if (unlikely(rc)) {
  1435. dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
  1436. level = UNMAP_THREE;
  1437. goto out;
  1438. }
  1439. ret:
  1440. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  1441. return rc;
  1442. out:
  1443. term_intr(cfg, level);
  1444. goto ret;
  1445. }
  1446. /**
  1447. * init_afu() - setup as master context and start AFU
  1448. * @cfg: Internal structure associated with the host.
  1449. *
  1450. * This routine is a higher level of control for configuring the
  1451. * AFU on probe and reset paths.
  1452. *
  1453. * Return: 0 on success, -errno on failure
  1454. */
  1455. static int init_afu(struct cxlflash_cfg *cfg)
  1456. {
  1457. u64 reg;
  1458. int rc = 0;
  1459. struct afu *afu = cfg->afu;
  1460. struct device *dev = &cfg->dev->dev;
  1461. cxl_perst_reloads_same_image(cfg->cxl_afu, true);
  1462. rc = init_mc(cfg);
  1463. if (rc) {
  1464. dev_err(dev, "%s: init_mc failed rc=%d\n",
  1465. __func__, rc);
  1466. goto out;
  1467. }
  1468. /* Map the entire MMIO space of the AFU */
  1469. afu->afu_map = cxl_psa_map(cfg->mcctx);
  1470. if (!afu->afu_map) {
  1471. dev_err(dev, "%s: cxl_psa_map failed\n", __func__);
  1472. rc = -ENOMEM;
  1473. goto err1;
  1474. }
  1475. /* No byte reverse on reading afu_version or string will be backwards */
  1476. reg = readq(&afu->afu_map->global.regs.afu_version);
  1477. memcpy(afu->version, &reg, sizeof(reg));
  1478. afu->interface_version =
  1479. readq_be(&afu->afu_map->global.regs.interface_version);
  1480. if ((afu->interface_version + 1) == 0) {
  1481. dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
  1482. "interface version %016llx\n", afu->version,
  1483. afu->interface_version);
  1484. rc = -EINVAL;
  1485. goto err1;
  1486. }
  1487. if (afu_is_sq_cmd_mode(afu)) {
  1488. afu->send_cmd = send_cmd_sq;
  1489. afu->context_reset = context_reset_sq;
  1490. } else {
  1491. afu->send_cmd = send_cmd_ioarrin;
  1492. afu->context_reset = context_reset_ioarrin;
  1493. }
  1494. dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
  1495. afu->version, afu->interface_version);
  1496. rc = start_afu(cfg);
  1497. if (rc) {
  1498. dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
  1499. goto err1;
  1500. }
  1501. afu_err_intr_init(cfg->afu);
  1502. spin_lock_init(&afu->rrin_slock);
  1503. afu->room = readq_be(&afu->host_map->cmd_room);
  1504. /* Restore the LUN mappings */
  1505. cxlflash_restore_luntable(cfg);
  1506. out:
  1507. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  1508. return rc;
  1509. err1:
  1510. term_intr(cfg, UNMAP_THREE);
  1511. term_mc(cfg);
  1512. goto out;
  1513. }
  1514. /**
  1515. * cxlflash_afu_sync() - builds and sends an AFU sync command
  1516. * @afu: AFU associated with the host.
  1517. * @ctx_hndl_u: Identifies context requesting sync.
  1518. * @res_hndl_u: Identifies resource requesting sync.
  1519. * @mode: Type of sync to issue (lightweight, heavyweight, global).
  1520. *
  1521. * The AFU can only take 1 sync command at a time. This routine enforces this
  1522. * limitation by using a mutex to provide exclusive access to the AFU during
  1523. * the sync. This design point requires calling threads to not be on interrupt
  1524. * context due to the possibility of sleeping during concurrent sync operations.
  1525. *
  1526. * AFU sync operations are only necessary and allowed when the device is
  1527. * operating normally. When not operating normally, sync requests can occur as
  1528. * part of cleaning up resources associated with an adapter prior to removal.
  1529. * In this scenario, these requests are simply ignored (safe due to the AFU
  1530. * going away).
  1531. *
  1532. * Return:
  1533. * 0 on success
  1534. * -1 on failure
  1535. */
  1536. int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
  1537. res_hndl_t res_hndl_u, u8 mode)
  1538. {
  1539. struct cxlflash_cfg *cfg = afu->parent;
  1540. struct device *dev = &cfg->dev->dev;
  1541. struct afu_cmd *cmd = NULL;
  1542. char *buf = NULL;
  1543. int rc = 0;
  1544. static DEFINE_MUTEX(sync_active);
  1545. if (cfg->state != STATE_NORMAL) {
  1546. dev_dbg(dev, "%s: Sync not required state=%u\n",
  1547. __func__, cfg->state);
  1548. return 0;
  1549. }
  1550. mutex_lock(&sync_active);
  1551. atomic_inc(&afu->cmds_active);
  1552. buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
  1553. if (unlikely(!buf)) {
  1554. dev_err(dev, "%s: no memory for command\n", __func__);
  1555. rc = -1;
  1556. goto out;
  1557. }
  1558. cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
  1559. init_completion(&cmd->cevent);
  1560. cmd->parent = afu;
  1561. dev_dbg(dev, "%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
  1562. cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
  1563. cmd->rcb.ctx_id = afu->ctx_hndl;
  1564. cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
  1565. cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
  1566. cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */
  1567. cmd->rcb.cdb[1] = mode;
  1568. /* The cdb is aligned, no unaligned accessors required */
  1569. *((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u);
  1570. *((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u);
  1571. rc = afu->send_cmd(afu, cmd);
  1572. if (unlikely(rc))
  1573. goto out;
  1574. rc = wait_resp(afu, cmd);
  1575. if (unlikely(rc))
  1576. rc = -1;
  1577. out:
  1578. atomic_dec(&afu->cmds_active);
  1579. mutex_unlock(&sync_active);
  1580. kfree(buf);
  1581. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  1582. return rc;
  1583. }
  1584. /**
  1585. * afu_reset() - resets the AFU
  1586. * @cfg: Internal structure associated with the host.
  1587. *
  1588. * Return: 0 on success, -errno on failure
  1589. */
  1590. static int afu_reset(struct cxlflash_cfg *cfg)
  1591. {
  1592. struct device *dev = &cfg->dev->dev;
  1593. int rc = 0;
  1594. /* Stop the context before the reset. Since the context is
  1595. * no longer available restart it after the reset is complete
  1596. */
  1597. term_afu(cfg);
  1598. rc = init_afu(cfg);
  1599. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  1600. return rc;
  1601. }
  1602. /**
  1603. * drain_ioctls() - wait until all currently executing ioctls have completed
  1604. * @cfg: Internal structure associated with the host.
  1605. *
  1606. * Obtain write access to read/write semaphore that wraps ioctl
  1607. * handling to 'drain' ioctls currently executing.
  1608. */
  1609. static void drain_ioctls(struct cxlflash_cfg *cfg)
  1610. {
  1611. down_write(&cfg->ioctl_rwsem);
  1612. up_write(&cfg->ioctl_rwsem);
  1613. }
  1614. /**
  1615. * cxlflash_eh_device_reset_handler() - reset a single LUN
  1616. * @scp: SCSI command to send.
  1617. *
  1618. * Return:
  1619. * SUCCESS as defined in scsi/scsi.h
  1620. * FAILED as defined in scsi/scsi.h
  1621. */
  1622. static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
  1623. {
  1624. int rc = SUCCESS;
  1625. struct Scsi_Host *host = scp->device->host;
  1626. struct cxlflash_cfg *cfg = shost_priv(host);
  1627. struct device *dev = &cfg->dev->dev;
  1628. struct afu *afu = cfg->afu;
  1629. int rcr = 0;
  1630. dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
  1631. "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
  1632. scp->device->channel, scp->device->id, scp->device->lun,
  1633. get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
  1634. get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
  1635. get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
  1636. get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
  1637. retry:
  1638. switch (cfg->state) {
  1639. case STATE_NORMAL:
  1640. rcr = send_tmf(afu, scp, TMF_LUN_RESET);
  1641. if (unlikely(rcr))
  1642. rc = FAILED;
  1643. break;
  1644. case STATE_RESET:
  1645. wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
  1646. goto retry;
  1647. default:
  1648. rc = FAILED;
  1649. break;
  1650. }
  1651. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  1652. return rc;
  1653. }
  1654. /**
  1655. * cxlflash_eh_host_reset_handler() - reset the host adapter
  1656. * @scp: SCSI command from stack identifying host.
  1657. *
  1658. * Following a reset, the state is evaluated again in case an EEH occurred
  1659. * during the reset. In such a scenario, the host reset will either yield
  1660. * until the EEH recovery is complete or return success or failure based
  1661. * upon the current device state.
  1662. *
  1663. * Return:
  1664. * SUCCESS as defined in scsi/scsi.h
  1665. * FAILED as defined in scsi/scsi.h
  1666. */
  1667. static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
  1668. {
  1669. int rc = SUCCESS;
  1670. int rcr = 0;
  1671. struct Scsi_Host *host = scp->device->host;
  1672. struct cxlflash_cfg *cfg = shost_priv(host);
  1673. struct device *dev = &cfg->dev->dev;
  1674. dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
  1675. "cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
  1676. scp->device->channel, scp->device->id, scp->device->lun,
  1677. get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
  1678. get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
  1679. get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
  1680. get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
  1681. switch (cfg->state) {
  1682. case STATE_NORMAL:
  1683. cfg->state = STATE_RESET;
  1684. drain_ioctls(cfg);
  1685. cxlflash_mark_contexts_error(cfg);
  1686. rcr = afu_reset(cfg);
  1687. if (rcr) {
  1688. rc = FAILED;
  1689. cfg->state = STATE_FAILTERM;
  1690. } else
  1691. cfg->state = STATE_NORMAL;
  1692. wake_up_all(&cfg->reset_waitq);
  1693. ssleep(1);
  1694. /* fall through */
  1695. case STATE_RESET:
  1696. wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
  1697. if (cfg->state == STATE_NORMAL)
  1698. break;
  1699. /* fall through */
  1700. default:
  1701. rc = FAILED;
  1702. break;
  1703. }
  1704. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  1705. return rc;
  1706. }
  1707. /**
  1708. * cxlflash_change_queue_depth() - change the queue depth for the device
  1709. * @sdev: SCSI device destined for queue depth change.
  1710. * @qdepth: Requested queue depth value to set.
  1711. *
  1712. * The requested queue depth is capped to the maximum supported value.
  1713. *
  1714. * Return: The actual queue depth set.
  1715. */
  1716. static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
  1717. {
  1718. if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
  1719. qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
  1720. scsi_change_queue_depth(sdev, qdepth);
  1721. return sdev->queue_depth;
  1722. }
  1723. /**
  1724. * cxlflash_show_port_status() - queries and presents the current port status
  1725. * @port: Desired port for status reporting.
  1726. * @afu: AFU owning the specified port.
  1727. * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
  1728. *
  1729. * Return: The size of the ASCII string returned in @buf.
  1730. */
  1731. static ssize_t cxlflash_show_port_status(u32 port, struct afu *afu, char *buf)
  1732. {
  1733. char *disp_status;
  1734. u64 status;
  1735. __be64 __iomem *fc_regs;
  1736. if (port >= NUM_FC_PORTS)
  1737. return 0;
  1738. fc_regs = &afu->afu_map->global.fc_regs[port][0];
  1739. status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
  1740. status &= FC_MTIP_STATUS_MASK;
  1741. if (status == FC_MTIP_STATUS_ONLINE)
  1742. disp_status = "online";
  1743. else if (status == FC_MTIP_STATUS_OFFLINE)
  1744. disp_status = "offline";
  1745. else
  1746. disp_status = "unknown";
  1747. return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
  1748. }
  1749. /**
  1750. * port0_show() - queries and presents the current status of port 0
  1751. * @dev: Generic device associated with the host owning the port.
  1752. * @attr: Device attribute representing the port.
  1753. * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
  1754. *
  1755. * Return: The size of the ASCII string returned in @buf.
  1756. */
  1757. static ssize_t port0_show(struct device *dev,
  1758. struct device_attribute *attr,
  1759. char *buf)
  1760. {
  1761. struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
  1762. struct afu *afu = cfg->afu;
  1763. return cxlflash_show_port_status(0, afu, buf);
  1764. }
  1765. /**
  1766. * port1_show() - queries and presents the current status of port 1
  1767. * @dev: Generic device associated with the host owning the port.
  1768. * @attr: Device attribute representing the port.
  1769. * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
  1770. *
  1771. * Return: The size of the ASCII string returned in @buf.
  1772. */
  1773. static ssize_t port1_show(struct device *dev,
  1774. struct device_attribute *attr,
  1775. char *buf)
  1776. {
  1777. struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
  1778. struct afu *afu = cfg->afu;
  1779. return cxlflash_show_port_status(1, afu, buf);
  1780. }
  1781. /**
  1782. * lun_mode_show() - presents the current LUN mode of the host
  1783. * @dev: Generic device associated with the host.
  1784. * @attr: Device attribute representing the LUN mode.
  1785. * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
  1786. *
  1787. * Return: The size of the ASCII string returned in @buf.
  1788. */
  1789. static ssize_t lun_mode_show(struct device *dev,
  1790. struct device_attribute *attr, char *buf)
  1791. {
  1792. struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
  1793. struct afu *afu = cfg->afu;
  1794. return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
  1795. }
  1796. /**
  1797. * lun_mode_store() - sets the LUN mode of the host
  1798. * @dev: Generic device associated with the host.
  1799. * @attr: Device attribute representing the LUN mode.
  1800. * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
  1801. * @count: Length of data resizing in @buf.
  1802. *
  1803. * The CXL Flash AFU supports a dummy LUN mode where the external
  1804. * links and storage are not required. Space on the FPGA is used
  1805. * to create 1 or 2 small LUNs which are presented to the system
  1806. * as if they were a normal storage device. This feature is useful
  1807. * during development and also provides manufacturing with a way
  1808. * to test the AFU without an actual device.
  1809. *
  1810. * 0 = external LUN[s] (default)
  1811. * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
  1812. * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
  1813. * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
  1814. * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
  1815. *
  1816. * Return: The size of the ASCII string returned in @buf.
  1817. */
  1818. static ssize_t lun_mode_store(struct device *dev,
  1819. struct device_attribute *attr,
  1820. const char *buf, size_t count)
  1821. {
  1822. struct Scsi_Host *shost = class_to_shost(dev);
  1823. struct cxlflash_cfg *cfg = shost_priv(shost);
  1824. struct afu *afu = cfg->afu;
  1825. int rc;
  1826. u32 lun_mode;
  1827. rc = kstrtouint(buf, 10, &lun_mode);
  1828. if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
  1829. afu->internal_lun = lun_mode;
  1830. /*
  1831. * When configured for internal LUN, there is only one channel,
  1832. * channel number 0, else there will be 2 (default).
  1833. */
  1834. if (afu->internal_lun)
  1835. shost->max_channel = 0;
  1836. else
  1837. shost->max_channel = NUM_FC_PORTS - 1;
  1838. afu_reset(cfg);
  1839. scsi_scan_host(cfg->host);
  1840. }
  1841. return count;
  1842. }
  1843. /**
  1844. * ioctl_version_show() - presents the current ioctl version of the host
  1845. * @dev: Generic device associated with the host.
  1846. * @attr: Device attribute representing the ioctl version.
  1847. * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
  1848. *
  1849. * Return: The size of the ASCII string returned in @buf.
  1850. */
  1851. static ssize_t ioctl_version_show(struct device *dev,
  1852. struct device_attribute *attr, char *buf)
  1853. {
  1854. return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
  1855. }
  1856. /**
  1857. * cxlflash_show_port_lun_table() - queries and presents the port LUN table
  1858. * @port: Desired port for status reporting.
  1859. * @afu: AFU owning the specified port.
  1860. * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
  1861. *
  1862. * Return: The size of the ASCII string returned in @buf.
  1863. */
  1864. static ssize_t cxlflash_show_port_lun_table(u32 port,
  1865. struct afu *afu,
  1866. char *buf)
  1867. {
  1868. int i;
  1869. ssize_t bytes = 0;
  1870. __be64 __iomem *fc_port;
  1871. if (port >= NUM_FC_PORTS)
  1872. return 0;
  1873. fc_port = &afu->afu_map->global.fc_port[port][0];
  1874. for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
  1875. bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
  1876. "%03d: %016llx\n", i, readq_be(&fc_port[i]));
  1877. return bytes;
  1878. }
  1879. /**
  1880. * port0_lun_table_show() - presents the current LUN table of port 0
  1881. * @dev: Generic device associated with the host owning the port.
  1882. * @attr: Device attribute representing the port.
  1883. * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
  1884. *
  1885. * Return: The size of the ASCII string returned in @buf.
  1886. */
  1887. static ssize_t port0_lun_table_show(struct device *dev,
  1888. struct device_attribute *attr,
  1889. char *buf)
  1890. {
  1891. struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
  1892. struct afu *afu = cfg->afu;
  1893. return cxlflash_show_port_lun_table(0, afu, buf);
  1894. }
  1895. /**
  1896. * port1_lun_table_show() - presents the current LUN table of port 1
  1897. * @dev: Generic device associated with the host owning the port.
  1898. * @attr: Device attribute representing the port.
  1899. * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
  1900. *
  1901. * Return: The size of the ASCII string returned in @buf.
  1902. */
  1903. static ssize_t port1_lun_table_show(struct device *dev,
  1904. struct device_attribute *attr,
  1905. char *buf)
  1906. {
  1907. struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
  1908. struct afu *afu = cfg->afu;
  1909. return cxlflash_show_port_lun_table(1, afu, buf);
  1910. }
  1911. /**
  1912. * mode_show() - presents the current mode of the device
  1913. * @dev: Generic device associated with the device.
  1914. * @attr: Device attribute representing the device mode.
  1915. * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
  1916. *
  1917. * Return: The size of the ASCII string returned in @buf.
  1918. */
  1919. static ssize_t mode_show(struct device *dev,
  1920. struct device_attribute *attr, char *buf)
  1921. {
  1922. struct scsi_device *sdev = to_scsi_device(dev);
  1923. return scnprintf(buf, PAGE_SIZE, "%s\n",
  1924. sdev->hostdata ? "superpipe" : "legacy");
  1925. }
  1926. /*
  1927. * Host attributes
  1928. */
  1929. static DEVICE_ATTR_RO(port0);
  1930. static DEVICE_ATTR_RO(port1);
  1931. static DEVICE_ATTR_RW(lun_mode);
  1932. static DEVICE_ATTR_RO(ioctl_version);
  1933. static DEVICE_ATTR_RO(port0_lun_table);
  1934. static DEVICE_ATTR_RO(port1_lun_table);
  1935. static struct device_attribute *cxlflash_host_attrs[] = {
  1936. &dev_attr_port0,
  1937. &dev_attr_port1,
  1938. &dev_attr_lun_mode,
  1939. &dev_attr_ioctl_version,
  1940. &dev_attr_port0_lun_table,
  1941. &dev_attr_port1_lun_table,
  1942. NULL
  1943. };
  1944. /*
  1945. * Device attributes
  1946. */
  1947. static DEVICE_ATTR_RO(mode);
  1948. static struct device_attribute *cxlflash_dev_attrs[] = {
  1949. &dev_attr_mode,
  1950. NULL
  1951. };
  1952. /*
  1953. * Host template
  1954. */
  1955. static struct scsi_host_template driver_template = {
  1956. .module = THIS_MODULE,
  1957. .name = CXLFLASH_ADAPTER_NAME,
  1958. .info = cxlflash_driver_info,
  1959. .ioctl = cxlflash_ioctl,
  1960. .proc_name = CXLFLASH_NAME,
  1961. .queuecommand = cxlflash_queuecommand,
  1962. .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
  1963. .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
  1964. .change_queue_depth = cxlflash_change_queue_depth,
  1965. .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
  1966. .can_queue = CXLFLASH_MAX_CMDS,
  1967. .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
  1968. .this_id = -1,
  1969. .sg_tablesize = 1, /* No scatter gather support */
  1970. .max_sectors = CXLFLASH_MAX_SECTORS,
  1971. .use_clustering = ENABLE_CLUSTERING,
  1972. .shost_attrs = cxlflash_host_attrs,
  1973. .sdev_attrs = cxlflash_dev_attrs,
  1974. };
  1975. /*
  1976. * Device dependent values
  1977. */
  1978. static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
  1979. 0ULL };
  1980. static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
  1981. CXLFLASH_NOTIFY_SHUTDOWN };
  1982. static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
  1983. CXLFLASH_NOTIFY_SHUTDOWN };
  1984. /*
  1985. * PCI device binding table
  1986. */
  1987. static struct pci_device_id cxlflash_pci_table[] = {
  1988. {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
  1989. PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
  1990. {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
  1991. PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
  1992. {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
  1993. PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
  1994. {}
  1995. };
  1996. MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
  1997. /**
  1998. * cxlflash_worker_thread() - work thread handler for the AFU
  1999. * @work: Work structure contained within cxlflash associated with host.
  2000. *
  2001. * Handles the following events:
  2002. * - Link reset which cannot be performed on interrupt context due to
  2003. * blocking up to a few seconds
  2004. * - Rescan the host
  2005. */
  2006. static void cxlflash_worker_thread(struct work_struct *work)
  2007. {
  2008. struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
  2009. work_q);
  2010. struct afu *afu = cfg->afu;
  2011. struct device *dev = &cfg->dev->dev;
  2012. int port;
  2013. ulong lock_flags;
  2014. /* Avoid MMIO if the device has failed */
  2015. if (cfg->state != STATE_NORMAL)
  2016. return;
  2017. spin_lock_irqsave(cfg->host->host_lock, lock_flags);
  2018. if (cfg->lr_state == LINK_RESET_REQUIRED) {
  2019. port = cfg->lr_port;
  2020. if (port < 0)
  2021. dev_err(dev, "%s: invalid port index %d\n",
  2022. __func__, port);
  2023. else {
  2024. spin_unlock_irqrestore(cfg->host->host_lock,
  2025. lock_flags);
  2026. /* The reset can block... */
  2027. afu_link_reset(afu, port,
  2028. &afu->afu_map->global.fc_regs[port][0]);
  2029. spin_lock_irqsave(cfg->host->host_lock, lock_flags);
  2030. }
  2031. cfg->lr_state = LINK_RESET_COMPLETE;
  2032. }
  2033. spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
  2034. if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
  2035. scsi_scan_host(cfg->host);
  2036. }
  2037. /**
  2038. * cxlflash_probe() - PCI entry point to add host
  2039. * @pdev: PCI device associated with the host.
  2040. * @dev_id: PCI device id associated with device.
  2041. *
  2042. * Return: 0 on success, -errno on failure
  2043. */
  2044. static int cxlflash_probe(struct pci_dev *pdev,
  2045. const struct pci_device_id *dev_id)
  2046. {
  2047. struct Scsi_Host *host;
  2048. struct cxlflash_cfg *cfg = NULL;
  2049. struct device *dev = &pdev->dev;
  2050. struct dev_dependent_vals *ddv;
  2051. int rc = 0;
  2052. dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
  2053. __func__, pdev->irq);
  2054. ddv = (struct dev_dependent_vals *)dev_id->driver_data;
  2055. driver_template.max_sectors = ddv->max_sectors;
  2056. host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
  2057. if (!host) {
  2058. dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
  2059. rc = -ENOMEM;
  2060. goto out;
  2061. }
  2062. host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
  2063. host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
  2064. host->max_channel = NUM_FC_PORTS - 1;
  2065. host->unique_id = host->host_no;
  2066. host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
  2067. cfg = shost_priv(host);
  2068. cfg->host = host;
  2069. rc = alloc_mem(cfg);
  2070. if (rc) {
  2071. dev_err(dev, "%s: alloc_mem failed\n", __func__);
  2072. rc = -ENOMEM;
  2073. scsi_host_put(cfg->host);
  2074. goto out;
  2075. }
  2076. cfg->init_state = INIT_STATE_NONE;
  2077. cfg->dev = pdev;
  2078. cfg->cxl_fops = cxlflash_cxl_fops;
  2079. /*
  2080. * The promoted LUNs move to the top of the LUN table. The rest stay
  2081. * on the bottom half. The bottom half grows from the end
  2082. * (index = 255), whereas the top half grows from the beginning
  2083. * (index = 0).
  2084. */
  2085. cfg->promote_lun_index = 0;
  2086. cfg->last_lun_index[0] = CXLFLASH_NUM_VLUNS/2 - 1;
  2087. cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1;
  2088. cfg->dev_id = (struct pci_device_id *)dev_id;
  2089. init_waitqueue_head(&cfg->tmf_waitq);
  2090. init_waitqueue_head(&cfg->reset_waitq);
  2091. INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
  2092. cfg->lr_state = LINK_RESET_INVALID;
  2093. cfg->lr_port = -1;
  2094. spin_lock_init(&cfg->tmf_slock);
  2095. mutex_init(&cfg->ctx_tbl_list_mutex);
  2096. mutex_init(&cfg->ctx_recovery_mutex);
  2097. init_rwsem(&cfg->ioctl_rwsem);
  2098. INIT_LIST_HEAD(&cfg->ctx_err_recovery);
  2099. INIT_LIST_HEAD(&cfg->lluns);
  2100. pci_set_drvdata(pdev, cfg);
  2101. cfg->cxl_afu = cxl_pci_to_afu(pdev);
  2102. rc = init_pci(cfg);
  2103. if (rc) {
  2104. dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
  2105. goto out_remove;
  2106. }
  2107. cfg->init_state = INIT_STATE_PCI;
  2108. rc = init_afu(cfg);
  2109. if (rc) {
  2110. dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
  2111. goto out_remove;
  2112. }
  2113. cfg->init_state = INIT_STATE_AFU;
  2114. rc = init_scsi(cfg);
  2115. if (rc) {
  2116. dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
  2117. goto out_remove;
  2118. }
  2119. cfg->init_state = INIT_STATE_SCSI;
  2120. out:
  2121. dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
  2122. return rc;
  2123. out_remove:
  2124. cxlflash_remove(pdev);
  2125. goto out;
  2126. }
  2127. /**
  2128. * cxlflash_pci_error_detected() - called when a PCI error is detected
  2129. * @pdev: PCI device struct.
  2130. * @state: PCI channel state.
  2131. *
  2132. * When an EEH occurs during an active reset, wait until the reset is
  2133. * complete and then take action based upon the device state.
  2134. *
  2135. * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
  2136. */
  2137. static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
  2138. pci_channel_state_t state)
  2139. {
  2140. int rc = 0;
  2141. struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
  2142. struct device *dev = &cfg->dev->dev;
  2143. dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
  2144. switch (state) {
  2145. case pci_channel_io_frozen:
  2146. wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
  2147. if (cfg->state == STATE_FAILTERM)
  2148. return PCI_ERS_RESULT_DISCONNECT;
  2149. cfg->state = STATE_RESET;
  2150. scsi_block_requests(cfg->host);
  2151. drain_ioctls(cfg);
  2152. rc = cxlflash_mark_contexts_error(cfg);
  2153. if (unlikely(rc))
  2154. dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
  2155. __func__, rc);
  2156. term_afu(cfg);
  2157. return PCI_ERS_RESULT_NEED_RESET;
  2158. case pci_channel_io_perm_failure:
  2159. cfg->state = STATE_FAILTERM;
  2160. wake_up_all(&cfg->reset_waitq);
  2161. scsi_unblock_requests(cfg->host);
  2162. return PCI_ERS_RESULT_DISCONNECT;
  2163. default:
  2164. break;
  2165. }
  2166. return PCI_ERS_RESULT_NEED_RESET;
  2167. }
  2168. /**
  2169. * cxlflash_pci_slot_reset() - called when PCI slot has been reset
  2170. * @pdev: PCI device struct.
  2171. *
  2172. * This routine is called by the pci error recovery code after the PCI
  2173. * slot has been reset, just before we should resume normal operations.
  2174. *
  2175. * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
  2176. */
  2177. static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
  2178. {
  2179. int rc = 0;
  2180. struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
  2181. struct device *dev = &cfg->dev->dev;
  2182. dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
  2183. rc = init_afu(cfg);
  2184. if (unlikely(rc)) {
  2185. dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
  2186. return PCI_ERS_RESULT_DISCONNECT;
  2187. }
  2188. return PCI_ERS_RESULT_RECOVERED;
  2189. }
  2190. /**
  2191. * cxlflash_pci_resume() - called when normal operation can resume
  2192. * @pdev: PCI device struct
  2193. */
  2194. static void cxlflash_pci_resume(struct pci_dev *pdev)
  2195. {
  2196. struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
  2197. struct device *dev = &cfg->dev->dev;
  2198. dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
  2199. cfg->state = STATE_NORMAL;
  2200. wake_up_all(&cfg->reset_waitq);
  2201. scsi_unblock_requests(cfg->host);
  2202. }
  2203. static const struct pci_error_handlers cxlflash_err_handler = {
  2204. .error_detected = cxlflash_pci_error_detected,
  2205. .slot_reset = cxlflash_pci_slot_reset,
  2206. .resume = cxlflash_pci_resume,
  2207. };
  2208. /*
  2209. * PCI device structure
  2210. */
  2211. static struct pci_driver cxlflash_driver = {
  2212. .name = CXLFLASH_NAME,
  2213. .id_table = cxlflash_pci_table,
  2214. .probe = cxlflash_probe,
  2215. .remove = cxlflash_remove,
  2216. .shutdown = cxlflash_remove,
  2217. .err_handler = &cxlflash_err_handler,
  2218. };
  2219. /**
  2220. * init_cxlflash() - module entry point
  2221. *
  2222. * Return: 0 on success, -errno on failure
  2223. */
  2224. static int __init init_cxlflash(void)
  2225. {
  2226. cxlflash_list_init();
  2227. return pci_register_driver(&cxlflash_driver);
  2228. }
  2229. /**
  2230. * exit_cxlflash() - module exit point
  2231. */
  2232. static void __exit exit_cxlflash(void)
  2233. {
  2234. cxlflash_term_global_luns();
  2235. cxlflash_free_errpage();
  2236. pci_unregister_driver(&cxlflash_driver);
  2237. }
  2238. module_init(init_cxlflash);
  2239. module_exit(exit_cxlflash);