native.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116
  1. /*
  2. * Copyright 2014 IBM Corp.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/spinlock.h>
  10. #include <linux/sched.h>
  11. #include <linux/slab.h>
  12. #include <linux/sched.h>
  13. #include <linux/mutex.h>
  14. #include <linux/mm.h>
  15. #include <linux/uaccess.h>
  16. #include <asm/synch.h>
  17. #include <misc/cxl-base.h>
  18. #include "cxl.h"
  19. #include "trace.h"
  20. static int afu_control(struct cxl_afu *afu, u64 command,
  21. u64 result, u64 mask, bool enabled)
  22. {
  23. u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
  24. unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
  25. int rc = 0;
  26. spin_lock(&afu->afu_cntl_lock);
  27. pr_devel("AFU command starting: %llx\n", command);
  28. trace_cxl_afu_ctrl(afu, command);
  29. cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl | command);
  30. AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
  31. while ((AFU_Cntl & mask) != result) {
  32. if (time_after_eq(jiffies, timeout)) {
  33. dev_warn(&afu->dev, "WARNING: AFU control timed out!\n");
  34. rc = -EBUSY;
  35. goto out;
  36. }
  37. if (!cxl_ops->link_ok(afu->adapter, afu)) {
  38. afu->enabled = enabled;
  39. rc = -EIO;
  40. goto out;
  41. }
  42. pr_devel_ratelimited("AFU control... (0x%016llx)\n",
  43. AFU_Cntl | command);
  44. cpu_relax();
  45. AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
  46. };
  47. pr_devel("AFU command complete: %llx\n", command);
  48. afu->enabled = enabled;
  49. out:
  50. trace_cxl_afu_ctrl_done(afu, command, rc);
  51. spin_unlock(&afu->afu_cntl_lock);
  52. return rc;
  53. }
  54. static int afu_enable(struct cxl_afu *afu)
  55. {
  56. pr_devel("AFU enable request\n");
  57. return afu_control(afu, CXL_AFU_Cntl_An_E,
  58. CXL_AFU_Cntl_An_ES_Enabled,
  59. CXL_AFU_Cntl_An_ES_MASK, true);
  60. }
  61. int cxl_afu_disable(struct cxl_afu *afu)
  62. {
  63. pr_devel("AFU disable request\n");
  64. return afu_control(afu, 0, CXL_AFU_Cntl_An_ES_Disabled,
  65. CXL_AFU_Cntl_An_ES_MASK, false);
  66. }
  67. /* This will disable as well as reset */
  68. static int native_afu_reset(struct cxl_afu *afu)
  69. {
  70. pr_devel("AFU reset request\n");
  71. return afu_control(afu, CXL_AFU_Cntl_An_RA,
  72. CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled,
  73. CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
  74. false);
  75. }
  76. static int native_afu_check_and_enable(struct cxl_afu *afu)
  77. {
  78. if (!cxl_ops->link_ok(afu->adapter, afu)) {
  79. WARN(1, "Refusing to enable afu while link down!\n");
  80. return -EIO;
  81. }
  82. if (afu->enabled)
  83. return 0;
  84. return afu_enable(afu);
  85. }
  86. int cxl_psl_purge(struct cxl_afu *afu)
  87. {
  88. u64 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
  89. u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
  90. u64 dsisr, dar;
  91. u64 start, end;
  92. unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
  93. int rc = 0;
  94. trace_cxl_psl_ctrl(afu, CXL_PSL_SCNTL_An_Pc);
  95. pr_devel("PSL purge request\n");
  96. if (!cxl_ops->link_ok(afu->adapter, afu)) {
  97. dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n");
  98. rc = -EIO;
  99. goto out;
  100. }
  101. if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
  102. WARN(1, "psl_purge request while AFU not disabled!\n");
  103. cxl_afu_disable(afu);
  104. }
  105. cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
  106. PSL_CNTL | CXL_PSL_SCNTL_An_Pc);
  107. start = local_clock();
  108. PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
  109. while ((PSL_CNTL & CXL_PSL_SCNTL_An_Ps_MASK)
  110. == CXL_PSL_SCNTL_An_Ps_Pending) {
  111. if (time_after_eq(jiffies, timeout)) {
  112. dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n");
  113. rc = -EBUSY;
  114. goto out;
  115. }
  116. if (!cxl_ops->link_ok(afu->adapter, afu)) {
  117. rc = -EIO;
  118. goto out;
  119. }
  120. dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
  121. pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx PSL_DSISR: 0x%016llx\n", PSL_CNTL, dsisr);
  122. if (dsisr & CXL_PSL_DSISR_TRANS) {
  123. dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
  124. dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n", dsisr, dar);
  125. cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
  126. } else if (dsisr) {
  127. dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n", dsisr);
  128. cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
  129. } else {
  130. cpu_relax();
  131. }
  132. PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
  133. };
  134. end = local_clock();
  135. pr_devel("PSL purged in %lld ns\n", end - start);
  136. cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
  137. PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc);
  138. out:
  139. trace_cxl_psl_ctrl_done(afu, CXL_PSL_SCNTL_An_Pc, rc);
  140. return rc;
  141. }
  142. static int spa_max_procs(int spa_size)
  143. {
  144. /*
  145. * From the CAIA:
  146. * end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255
  147. * Most of that junk is really just an overly-complicated way of saying
  148. * the last 256 bytes are __aligned(128), so it's really:
  149. * end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255
  150. * and
  151. * end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1
  152. * so
  153. * sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256
  154. * Ignore the alignment (which is safe in this case as long as we are
  155. * careful with our rounding) and solve for n:
  156. */
  157. return ((spa_size / 8) - 96) / 17;
  158. }
  159. int cxl_alloc_spa(struct cxl_afu *afu)
  160. {
  161. unsigned spa_size;
  162. /* Work out how many pages to allocate */
  163. afu->native->spa_order = 0;
  164. do {
  165. afu->native->spa_order++;
  166. spa_size = (1 << afu->native->spa_order) * PAGE_SIZE;
  167. if (spa_size > 0x100000) {
  168. dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n",
  169. afu->native->spa_max_procs, afu->native->spa_size);
  170. afu->num_procs = afu->native->spa_max_procs;
  171. break;
  172. }
  173. afu->native->spa_size = spa_size;
  174. afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size);
  175. } while (afu->native->spa_max_procs < afu->num_procs);
  176. if (!(afu->native->spa = (struct cxl_process_element *)
  177. __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) {
  178. pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
  179. return -ENOMEM;
  180. }
  181. pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n",
  182. 1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs);
  183. return 0;
  184. }
  185. static void attach_spa(struct cxl_afu *afu)
  186. {
  187. u64 spap;
  188. afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa +
  189. ((afu->native->spa_max_procs + 3) * 128));
  190. spap = virt_to_phys(afu->native->spa) & CXL_PSL_SPAP_Addr;
  191. spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size;
  192. spap |= CXL_PSL_SPAP_V;
  193. pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n",
  194. afu->native->spa, afu->native->spa_max_procs,
  195. afu->native->sw_command_status, spap);
  196. cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
  197. }
  198. static inline void detach_spa(struct cxl_afu *afu)
  199. {
  200. cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0);
  201. }
  202. void cxl_release_spa(struct cxl_afu *afu)
  203. {
  204. if (afu->native->spa) {
  205. free_pages((unsigned long) afu->native->spa,
  206. afu->native->spa_order);
  207. afu->native->spa = NULL;
  208. }
  209. }
  210. int cxl_tlb_slb_invalidate(struct cxl *adapter)
  211. {
  212. unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
  213. pr_devel("CXL adapter wide TLBIA & SLBIA\n");
  214. cxl_p1_write(adapter, CXL_PSL_AFUSEL, CXL_PSL_AFUSEL_A);
  215. cxl_p1_write(adapter, CXL_PSL_TLBIA, CXL_TLB_SLB_IQ_ALL);
  216. while (cxl_p1_read(adapter, CXL_PSL_TLBIA) & CXL_TLB_SLB_P) {
  217. if (time_after_eq(jiffies, timeout)) {
  218. dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n");
  219. return -EBUSY;
  220. }
  221. if (!cxl_ops->link_ok(adapter, NULL))
  222. return -EIO;
  223. cpu_relax();
  224. }
  225. cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_ALL);
  226. while (cxl_p1_read(adapter, CXL_PSL_SLBIA) & CXL_TLB_SLB_P) {
  227. if (time_after_eq(jiffies, timeout)) {
  228. dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n");
  229. return -EBUSY;
  230. }
  231. if (!cxl_ops->link_ok(adapter, NULL))
  232. return -EIO;
  233. cpu_relax();
  234. }
  235. return 0;
  236. }
  237. static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1)
  238. {
  239. int rc;
  240. /* 1. Disable SSTP by writing 0 to SSTP1[V] */
  241. cxl_p2n_write(afu, CXL_SSTP1_An, 0);
  242. /* 2. Invalidate all SLB entries */
  243. if ((rc = cxl_afu_slbia(afu)))
  244. return rc;
  245. /* 3. Set SSTP0_An */
  246. cxl_p2n_write(afu, CXL_SSTP0_An, sstp0);
  247. /* 4. Set SSTP1_An */
  248. cxl_p2n_write(afu, CXL_SSTP1_An, sstp1);
  249. return 0;
  250. }
  251. /* Using per slice version may improve performance here. (ie. SLBIA_An) */
  252. static void slb_invalid(struct cxl_context *ctx)
  253. {
  254. struct cxl *adapter = ctx->afu->adapter;
  255. u64 slbia;
  256. WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex));
  257. cxl_p1_write(adapter, CXL_PSL_LBISEL,
  258. ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) |
  259. be32_to_cpu(ctx->elem->lpid));
  260. cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID);
  261. while (1) {
  262. if (!cxl_ops->link_ok(adapter, NULL))
  263. break;
  264. slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA);
  265. if (!(slbia & CXL_TLB_SLB_P))
  266. break;
  267. cpu_relax();
  268. }
  269. }
  270. static int do_process_element_cmd(struct cxl_context *ctx,
  271. u64 cmd, u64 pe_state)
  272. {
  273. u64 state;
  274. unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
  275. int rc = 0;
  276. trace_cxl_llcmd(ctx, cmd);
  277. WARN_ON(!ctx->afu->enabled);
  278. ctx->elem->software_state = cpu_to_be32(pe_state);
  279. smp_wmb();
  280. *(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe);
  281. smp_mb();
  282. cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
  283. while (1) {
  284. if (time_after_eq(jiffies, timeout)) {
  285. dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n");
  286. rc = -EBUSY;
  287. goto out;
  288. }
  289. if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
  290. dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n");
  291. rc = -EIO;
  292. goto out;
  293. }
  294. state = be64_to_cpup(ctx->afu->native->sw_command_status);
  295. if (state == ~0ULL) {
  296. pr_err("cxl: Error adding process element to AFU\n");
  297. rc = -1;
  298. goto out;
  299. }
  300. if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK | CXL_SPA_SW_LINK_MASK)) ==
  301. (cmd | (cmd >> 16) | ctx->pe))
  302. break;
  303. /*
  304. * The command won't finish in the PSL if there are
  305. * outstanding DSIs. Hence we need to yield here in
  306. * case there are outstanding DSIs that we need to
  307. * service. Tuning possiblity: we could wait for a
  308. * while before sched
  309. */
  310. schedule();
  311. }
  312. out:
  313. trace_cxl_llcmd_done(ctx, cmd, rc);
  314. return rc;
  315. }
  316. static int add_process_element(struct cxl_context *ctx)
  317. {
  318. int rc = 0;
  319. mutex_lock(&ctx->afu->native->spa_mutex);
  320. pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe);
  321. if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V)))
  322. ctx->pe_inserted = true;
  323. pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe);
  324. mutex_unlock(&ctx->afu->native->spa_mutex);
  325. return rc;
  326. }
  327. static int terminate_process_element(struct cxl_context *ctx)
  328. {
  329. int rc = 0;
  330. /* fast path terminate if it's already invalid */
  331. if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V)))
  332. return rc;
  333. mutex_lock(&ctx->afu->native->spa_mutex);
  334. pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe);
  335. /* We could be asked to terminate when the hw is down. That
  336. * should always succeed: it's not running if the hw has gone
  337. * away and is being reset.
  338. */
  339. if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
  340. rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
  341. CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
  342. ctx->elem->software_state = 0; /* Remove Valid bit */
  343. pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe);
  344. mutex_unlock(&ctx->afu->native->spa_mutex);
  345. return rc;
  346. }
  347. static int remove_process_element(struct cxl_context *ctx)
  348. {
  349. int rc = 0;
  350. mutex_lock(&ctx->afu->native->spa_mutex);
  351. pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe);
  352. /* We could be asked to remove when the hw is down. Again, if
  353. * the hw is down, the PE is gone, so we succeed.
  354. */
  355. if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
  356. rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0);
  357. if (!rc)
  358. ctx->pe_inserted = false;
  359. slb_invalid(ctx);
  360. pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
  361. mutex_unlock(&ctx->afu->native->spa_mutex);
  362. return rc;
  363. }
  364. void cxl_assign_psn_space(struct cxl_context *ctx)
  365. {
  366. if (!ctx->afu->pp_size || ctx->master) {
  367. ctx->psn_phys = ctx->afu->psn_phys;
  368. ctx->psn_size = ctx->afu->adapter->ps_size;
  369. } else {
  370. ctx->psn_phys = ctx->afu->psn_phys +
  371. (ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe);
  372. ctx->psn_size = ctx->afu->pp_size;
  373. }
  374. }
  375. static int activate_afu_directed(struct cxl_afu *afu)
  376. {
  377. int rc;
  378. dev_info(&afu->dev, "Activating AFU directed mode\n");
  379. afu->num_procs = afu->max_procs_virtualised;
  380. if (afu->native->spa == NULL) {
  381. if (cxl_alloc_spa(afu))
  382. return -ENOMEM;
  383. }
  384. attach_spa(afu);
  385. cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU);
  386. cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
  387. cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
  388. afu->current_mode = CXL_MODE_DIRECTED;
  389. if ((rc = cxl_chardev_m_afu_add(afu)))
  390. return rc;
  391. if ((rc = cxl_sysfs_afu_m_add(afu)))
  392. goto err;
  393. if ((rc = cxl_chardev_s_afu_add(afu)))
  394. goto err1;
  395. return 0;
  396. err1:
  397. cxl_sysfs_afu_m_remove(afu);
  398. err:
  399. cxl_chardev_afu_remove(afu);
  400. return rc;
  401. }
  402. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  403. #define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE)
  404. #else
  405. #define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE))
  406. #endif
  407. static u64 calculate_sr(struct cxl_context *ctx)
  408. {
  409. u64 sr = 0;
  410. set_endian(sr);
  411. if (ctx->master)
  412. sr |= CXL_PSL_SR_An_MP;
  413. if (mfspr(SPRN_LPCR) & LPCR_TC)
  414. sr |= CXL_PSL_SR_An_TC;
  415. if (ctx->kernel) {
  416. if (!ctx->real_mode)
  417. sr |= CXL_PSL_SR_An_R;
  418. sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV;
  419. } else {
  420. sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
  421. sr &= ~(CXL_PSL_SR_An_HV);
  422. if (!test_tsk_thread_flag(current, TIF_32BIT))
  423. sr |= CXL_PSL_SR_An_SF;
  424. }
  425. return sr;
  426. }
  427. static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
  428. {
  429. u32 pid;
  430. int r, result;
  431. cxl_assign_psn_space(ctx);
  432. ctx->elem->ctxtime = 0; /* disable */
  433. ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
  434. ctx->elem->haurp = 0; /* disable */
  435. ctx->elem->sdr = cpu_to_be64(mfspr(SPRN_SDR1));
  436. pid = current->pid;
  437. if (ctx->kernel)
  438. pid = 0;
  439. ctx->elem->common.tid = 0;
  440. ctx->elem->common.pid = cpu_to_be32(pid);
  441. ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
  442. ctx->elem->common.csrp = 0; /* disable */
  443. ctx->elem->common.aurp0 = 0; /* disable */
  444. ctx->elem->common.aurp1 = 0; /* disable */
  445. cxl_prefault(ctx, wed);
  446. ctx->elem->common.sstp0 = cpu_to_be64(ctx->sstp0);
  447. ctx->elem->common.sstp1 = cpu_to_be64(ctx->sstp1);
  448. /*
  449. * Ensure we have the multiplexed PSL interrupt set up to take faults
  450. * for kernel contexts that may not have allocated any AFU IRQs at all:
  451. */
  452. if (ctx->irqs.range[0] == 0) {
  453. ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
  454. ctx->irqs.range[0] = 1;
  455. }
  456. for (r = 0; r < CXL_IRQ_RANGES; r++) {
  457. ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
  458. ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
  459. }
  460. ctx->elem->common.amr = cpu_to_be64(amr);
  461. ctx->elem->common.wed = cpu_to_be64(wed);
  462. /* first guy needs to enable */
  463. if ((result = cxl_ops->afu_check_and_enable(ctx->afu)))
  464. return result;
  465. return add_process_element(ctx);
  466. }
  467. static int deactivate_afu_directed(struct cxl_afu *afu)
  468. {
  469. dev_info(&afu->dev, "Deactivating AFU directed mode\n");
  470. afu->current_mode = 0;
  471. afu->num_procs = 0;
  472. cxl_sysfs_afu_m_remove(afu);
  473. cxl_chardev_afu_remove(afu);
  474. cxl_ops->afu_reset(afu);
  475. cxl_afu_disable(afu);
  476. cxl_psl_purge(afu);
  477. return 0;
  478. }
  479. static int activate_dedicated_process(struct cxl_afu *afu)
  480. {
  481. dev_info(&afu->dev, "Activating dedicated process mode\n");
  482. cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
  483. cxl_p1n_write(afu, CXL_PSL_CtxTime_An, 0); /* disable */
  484. cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0); /* disable */
  485. cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
  486. cxl_p1n_write(afu, CXL_PSL_LPID_An, mfspr(SPRN_LPID));
  487. cxl_p1n_write(afu, CXL_HAURP_An, 0); /* disable */
  488. cxl_p1n_write(afu, CXL_PSL_SDR_An, mfspr(SPRN_SDR1));
  489. cxl_p2n_write(afu, CXL_CSRP_An, 0); /* disable */
  490. cxl_p2n_write(afu, CXL_AURP0_An, 0); /* disable */
  491. cxl_p2n_write(afu, CXL_AURP1_An, 0); /* disable */
  492. afu->current_mode = CXL_MODE_DEDICATED;
  493. afu->num_procs = 1;
  494. return cxl_chardev_d_afu_add(afu);
  495. }
  496. static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr)
  497. {
  498. struct cxl_afu *afu = ctx->afu;
  499. u64 pid;
  500. int rc;
  501. pid = (u64)current->pid << 32;
  502. if (ctx->kernel)
  503. pid = 0;
  504. cxl_p2n_write(afu, CXL_PSL_PID_TID_An, pid);
  505. cxl_p1n_write(afu, CXL_PSL_SR_An, calculate_sr(ctx));
  506. if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1)))
  507. return rc;
  508. cxl_prefault(ctx, wed);
  509. cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An,
  510. (((u64)ctx->irqs.offset[0] & 0xffff) << 48) |
  511. (((u64)ctx->irqs.offset[1] & 0xffff) << 32) |
  512. (((u64)ctx->irqs.offset[2] & 0xffff) << 16) |
  513. ((u64)ctx->irqs.offset[3] & 0xffff));
  514. cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64)
  515. (((u64)ctx->irqs.range[0] & 0xffff) << 48) |
  516. (((u64)ctx->irqs.range[1] & 0xffff) << 32) |
  517. (((u64)ctx->irqs.range[2] & 0xffff) << 16) |
  518. ((u64)ctx->irqs.range[3] & 0xffff));
  519. cxl_p2n_write(afu, CXL_PSL_AMR_An, amr);
  520. /* master only context for dedicated */
  521. cxl_assign_psn_space(ctx);
  522. if ((rc = cxl_ops->afu_reset(afu)))
  523. return rc;
  524. cxl_p2n_write(afu, CXL_PSL_WED_An, wed);
  525. return afu_enable(afu);
  526. }
  527. static int deactivate_dedicated_process(struct cxl_afu *afu)
  528. {
  529. dev_info(&afu->dev, "Deactivating dedicated process mode\n");
  530. afu->current_mode = 0;
  531. afu->num_procs = 0;
  532. cxl_chardev_afu_remove(afu);
  533. return 0;
  534. }
  535. static int native_afu_deactivate_mode(struct cxl_afu *afu, int mode)
  536. {
  537. if (mode == CXL_MODE_DIRECTED)
  538. return deactivate_afu_directed(afu);
  539. if (mode == CXL_MODE_DEDICATED)
  540. return deactivate_dedicated_process(afu);
  541. return 0;
  542. }
  543. static int native_afu_activate_mode(struct cxl_afu *afu, int mode)
  544. {
  545. if (!mode)
  546. return 0;
  547. if (!(mode & afu->modes_supported))
  548. return -EINVAL;
  549. if (!cxl_ops->link_ok(afu->adapter, afu)) {
  550. WARN(1, "Device link is down, refusing to activate!\n");
  551. return -EIO;
  552. }
  553. if (mode == CXL_MODE_DIRECTED)
  554. return activate_afu_directed(afu);
  555. if (mode == CXL_MODE_DEDICATED)
  556. return activate_dedicated_process(afu);
  557. return -EINVAL;
  558. }
  559. static int native_attach_process(struct cxl_context *ctx, bool kernel,
  560. u64 wed, u64 amr)
  561. {
  562. if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
  563. WARN(1, "Device link is down, refusing to attach process!\n");
  564. return -EIO;
  565. }
  566. ctx->kernel = kernel;
  567. if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
  568. return attach_afu_directed(ctx, wed, amr);
  569. if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
  570. return attach_dedicated(ctx, wed, amr);
  571. return -EINVAL;
  572. }
  573. static inline int detach_process_native_dedicated(struct cxl_context *ctx)
  574. {
  575. cxl_ops->afu_reset(ctx->afu);
  576. cxl_afu_disable(ctx->afu);
  577. cxl_psl_purge(ctx->afu);
  578. return 0;
  579. }
  580. static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
  581. {
  582. if (!ctx->pe_inserted)
  583. return 0;
  584. if (terminate_process_element(ctx))
  585. return -1;
  586. if (remove_process_element(ctx))
  587. return -1;
  588. return 0;
  589. }
  590. static int native_detach_process(struct cxl_context *ctx)
  591. {
  592. trace_cxl_detach(ctx);
  593. if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
  594. return detach_process_native_dedicated(ctx);
  595. return detach_process_native_afu_directed(ctx);
  596. }
  597. static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info)
  598. {
  599. u64 pidtid;
  600. /* If the adapter has gone away, we can't get any meaningful
  601. * information.
  602. */
  603. if (!cxl_ops->link_ok(afu->adapter, afu))
  604. return -EIO;
  605. info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
  606. info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
  607. info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An);
  608. pidtid = cxl_p2n_read(afu, CXL_PSL_PID_TID_An);
  609. info->pid = pidtid >> 32;
  610. info->tid = pidtid & 0xffffffff;
  611. info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An);
  612. info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
  613. info->proc_handle = 0;
  614. return 0;
  615. }
  616. static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
  617. u64 dsisr, u64 errstat)
  618. {
  619. u64 fir1, fir2, fir_slice, serr, afu_debug;
  620. fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
  621. fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
  622. fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
  623. serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
  624. afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
  625. dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
  626. dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
  627. dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
  628. dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
  629. dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
  630. dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
  631. dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
  632. cxl_stop_trace(ctx->afu->adapter);
  633. return cxl_ops->ack_irq(ctx, 0, errstat);
  634. }
  635. static irqreturn_t fail_psl_irq(struct cxl_afu *afu, struct cxl_irq_info *irq_info)
  636. {
  637. if (irq_info->dsisr & CXL_PSL_DSISR_TRANS)
  638. cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
  639. else
  640. cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
  641. return IRQ_HANDLED;
  642. }
  643. static irqreturn_t native_irq_multiplexed(int irq, void *data)
  644. {
  645. struct cxl_afu *afu = data;
  646. struct cxl_context *ctx;
  647. struct cxl_irq_info irq_info;
  648. int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff;
  649. int ret;
  650. if ((ret = native_get_irq_info(afu, &irq_info))) {
  651. WARN(1, "Unable to get CXL IRQ Info: %i\n", ret);
  652. return fail_psl_irq(afu, &irq_info);
  653. }
  654. rcu_read_lock();
  655. ctx = idr_find(&afu->contexts_idr, ph);
  656. if (ctx) {
  657. ret = cxl_irq(irq, ctx, &irq_info);
  658. rcu_read_unlock();
  659. return ret;
  660. }
  661. rcu_read_unlock();
  662. WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
  663. " %016llx\n(Possible AFU HW issue - was a term/remove acked"
  664. " with outstanding transactions?)\n", ph, irq_info.dsisr,
  665. irq_info.dar);
  666. return fail_psl_irq(afu, &irq_info);
  667. }
  668. static irqreturn_t native_slice_irq_err(int irq, void *data)
  669. {
  670. struct cxl_afu *afu = data;
  671. u64 fir_slice, errstat, serr, afu_debug;
  672. WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq);
  673. serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
  674. fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
  675. errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
  676. afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
  677. dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
  678. dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
  679. dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
  680. dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
  681. cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
  682. return IRQ_HANDLED;
  683. }
  684. static irqreturn_t native_irq_err(int irq, void *data)
  685. {
  686. struct cxl *adapter = data;
  687. u64 fir1, fir2, err_ivte;
  688. WARN(1, "CXL ERROR interrupt %i\n", irq);
  689. err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
  690. dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
  691. dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
  692. cxl_stop_trace(adapter);
  693. fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
  694. fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
  695. dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2);
  696. return IRQ_HANDLED;
  697. }
  698. int cxl_native_register_psl_err_irq(struct cxl *adapter)
  699. {
  700. int rc;
  701. adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
  702. dev_name(&adapter->dev));
  703. if (!adapter->irq_name)
  704. return -ENOMEM;
  705. if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter,
  706. &adapter->native->err_hwirq,
  707. &adapter->native->err_virq,
  708. adapter->irq_name))) {
  709. kfree(adapter->irq_name);
  710. adapter->irq_name = NULL;
  711. return rc;
  712. }
  713. cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->native->err_hwirq & 0xffff);
  714. return 0;
  715. }
  716. void cxl_native_release_psl_err_irq(struct cxl *adapter)
  717. {
  718. if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq))
  719. return;
  720. cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
  721. cxl_unmap_irq(adapter->native->err_virq, adapter);
  722. cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
  723. kfree(adapter->irq_name);
  724. }
  725. int cxl_native_register_serr_irq(struct cxl_afu *afu)
  726. {
  727. u64 serr;
  728. int rc;
  729. afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
  730. dev_name(&afu->dev));
  731. if (!afu->err_irq_name)
  732. return -ENOMEM;
  733. if ((rc = cxl_register_one_irq(afu->adapter, native_slice_irq_err, afu,
  734. &afu->serr_hwirq,
  735. &afu->serr_virq, afu->err_irq_name))) {
  736. kfree(afu->err_irq_name);
  737. afu->err_irq_name = NULL;
  738. return rc;
  739. }
  740. serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
  741. serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
  742. cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
  743. return 0;
  744. }
  745. void cxl_native_release_serr_irq(struct cxl_afu *afu)
  746. {
  747. if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
  748. return;
  749. cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
  750. cxl_unmap_irq(afu->serr_virq, afu);
  751. cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
  752. kfree(afu->err_irq_name);
  753. }
  754. int cxl_native_register_psl_irq(struct cxl_afu *afu)
  755. {
  756. int rc;
  757. afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s",
  758. dev_name(&afu->dev));
  759. if (!afu->psl_irq_name)
  760. return -ENOMEM;
  761. if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed,
  762. afu, &afu->native->psl_hwirq, &afu->native->psl_virq,
  763. afu->psl_irq_name))) {
  764. kfree(afu->psl_irq_name);
  765. afu->psl_irq_name = NULL;
  766. }
  767. return rc;
  768. }
  769. void cxl_native_release_psl_irq(struct cxl_afu *afu)
  770. {
  771. if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq))
  772. return;
  773. cxl_unmap_irq(afu->native->psl_virq, afu);
  774. cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
  775. kfree(afu->psl_irq_name);
  776. }
  777. static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
  778. {
  779. u64 dsisr;
  780. pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat);
  781. /* Clear PSL_DSISR[PE] */
  782. dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
  783. cxl_p2n_write(afu, CXL_PSL_DSISR_An, dsisr & ~CXL_PSL_DSISR_An_PE);
  784. /* Write 1s to clear error status bits */
  785. cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat);
  786. }
  787. static int native_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
  788. {
  789. trace_cxl_psl_irq_ack(ctx, tfc);
  790. if (tfc)
  791. cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc);
  792. if (psl_reset_mask)
  793. recover_psl_err(ctx->afu, psl_reset_mask);
  794. return 0;
  795. }
  796. int cxl_check_error(struct cxl_afu *afu)
  797. {
  798. return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL);
  799. }
  800. static bool native_support_attributes(const char *attr_name,
  801. enum cxl_attrs type)
  802. {
  803. return true;
  804. }
  805. static int native_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out)
  806. {
  807. if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
  808. return -EIO;
  809. if (unlikely(off >= afu->crs_len))
  810. return -ERANGE;
  811. *out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset +
  812. (cr * afu->crs_len) + off);
  813. return 0;
  814. }
  815. static int native_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out)
  816. {
  817. if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
  818. return -EIO;
  819. if (unlikely(off >= afu->crs_len))
  820. return -ERANGE;
  821. *out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset +
  822. (cr * afu->crs_len) + off);
  823. return 0;
  824. }
  825. static int native_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off, u16 *out)
  826. {
  827. u64 aligned_off = off & ~0x3L;
  828. u32 val;
  829. int rc;
  830. rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
  831. if (!rc)
  832. *out = (val >> ((off & 0x3) * 8)) & 0xffff;
  833. return rc;
  834. }
  835. static int native_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off, u8 *out)
  836. {
  837. u64 aligned_off = off & ~0x3L;
  838. u32 val;
  839. int rc;
  840. rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
  841. if (!rc)
  842. *out = (val >> ((off & 0x3) * 8)) & 0xff;
  843. return rc;
  844. }
  845. static int native_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
  846. {
  847. if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
  848. return -EIO;
  849. if (unlikely(off >= afu->crs_len))
  850. return -ERANGE;
  851. out_le32(afu->native->afu_desc_mmio + afu->crs_offset +
  852. (cr * afu->crs_len) + off, in);
  853. return 0;
  854. }
  855. static int native_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
  856. {
  857. u64 aligned_off = off & ~0x3L;
  858. u32 val32, mask, shift;
  859. int rc;
  860. rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
  861. if (rc)
  862. return rc;
  863. shift = (off & 0x3) * 8;
  864. WARN_ON(shift == 24);
  865. mask = 0xffff << shift;
  866. val32 = (val32 & ~mask) | (in << shift);
  867. rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
  868. return rc;
  869. }
  870. static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
  871. {
  872. u64 aligned_off = off & ~0x3L;
  873. u32 val32, mask, shift;
  874. int rc;
  875. rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
  876. if (rc)
  877. return rc;
  878. shift = (off & 0x3) * 8;
  879. mask = 0xff << shift;
  880. val32 = (val32 & ~mask) | (in << shift);
  881. rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
  882. return rc;
  883. }
  884. const struct cxl_backend_ops cxl_native_ops = {
  885. .module = THIS_MODULE,
  886. .adapter_reset = cxl_pci_reset,
  887. .alloc_one_irq = cxl_pci_alloc_one_irq,
  888. .release_one_irq = cxl_pci_release_one_irq,
  889. .alloc_irq_ranges = cxl_pci_alloc_irq_ranges,
  890. .release_irq_ranges = cxl_pci_release_irq_ranges,
  891. .setup_irq = cxl_pci_setup_irq,
  892. .handle_psl_slice_error = native_handle_psl_slice_error,
  893. .psl_interrupt = NULL,
  894. .ack_irq = native_ack_irq,
  895. .attach_process = native_attach_process,
  896. .detach_process = native_detach_process,
  897. .support_attributes = native_support_attributes,
  898. .link_ok = cxl_adapter_link_ok,
  899. .release_afu = cxl_pci_release_afu,
  900. .afu_read_err_buffer = cxl_pci_afu_read_err_buffer,
  901. .afu_check_and_enable = native_afu_check_and_enable,
  902. .afu_activate_mode = native_afu_activate_mode,
  903. .afu_deactivate_mode = native_afu_deactivate_mode,
  904. .afu_reset = native_afu_reset,
  905. .afu_cr_read8 = native_afu_cr_read8,
  906. .afu_cr_read16 = native_afu_cr_read16,
  907. .afu_cr_read32 = native_afu_cr_read32,
  908. .afu_cr_read64 = native_afu_cr_read64,
  909. .afu_cr_write8 = native_afu_cr_write8,
  910. .afu_cr_write16 = native_afu_cr_write16,
  911. .afu_cr_write32 = native_afu_cr_write32,
  912. .read_adapter_vpd = cxl_pci_read_adapter_vpd,
  913. };