native.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681
  1. /*
  2. * Copyright 2014 IBM Corp.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/spinlock.h>
  10. #include <linux/sched.h>
  11. #include <linux/slab.h>
  12. #include <linux/sched.h>
  13. #include <linux/mutex.h>
  14. #include <linux/mm.h>
  15. #include <linux/uaccess.h>
  16. #include <asm/synch.h>
  17. #include <misc/cxl.h>
  18. #include "cxl.h"
  19. static int afu_control(struct cxl_afu *afu, u64 command,
  20. u64 result, u64 mask, bool enabled)
  21. {
  22. u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
  23. unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
  24. spin_lock(&afu->afu_cntl_lock);
  25. pr_devel("AFU command starting: %llx\n", command);
  26. cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl | command);
  27. AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
  28. while ((AFU_Cntl & mask) != result) {
  29. if (time_after_eq(jiffies, timeout)) {
  30. dev_warn(&afu->dev, "WARNING: AFU control timed out!\n");
  31. spin_unlock(&afu->afu_cntl_lock);
  32. return -EBUSY;
  33. }
  34. pr_devel_ratelimited("AFU control... (0x%.16llx)\n",
  35. AFU_Cntl | command);
  36. cpu_relax();
  37. AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
  38. };
  39. pr_devel("AFU command complete: %llx\n", command);
  40. afu->enabled = enabled;
  41. spin_unlock(&afu->afu_cntl_lock);
  42. return 0;
  43. }
  44. static int afu_enable(struct cxl_afu *afu)
  45. {
  46. pr_devel("AFU enable request\n");
  47. return afu_control(afu, CXL_AFU_Cntl_An_E,
  48. CXL_AFU_Cntl_An_ES_Enabled,
  49. CXL_AFU_Cntl_An_ES_MASK, true);
  50. }
  51. int cxl_afu_disable(struct cxl_afu *afu)
  52. {
  53. pr_devel("AFU disable request\n");
  54. return afu_control(afu, 0, CXL_AFU_Cntl_An_ES_Disabled,
  55. CXL_AFU_Cntl_An_ES_MASK, false);
  56. }
  57. /* This will disable as well as reset */
  58. int cxl_afu_reset(struct cxl_afu *afu)
  59. {
  60. pr_devel("AFU reset request\n");
  61. return afu_control(afu, CXL_AFU_Cntl_An_RA,
  62. CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled,
  63. CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
  64. false);
  65. }
  66. static int afu_check_and_enable(struct cxl_afu *afu)
  67. {
  68. if (afu->enabled)
  69. return 0;
  70. return afu_enable(afu);
  71. }
  72. int cxl_psl_purge(struct cxl_afu *afu)
  73. {
  74. u64 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
  75. u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
  76. u64 dsisr, dar;
  77. u64 start, end;
  78. unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
  79. pr_devel("PSL purge request\n");
  80. if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
  81. WARN(1, "psl_purge request while AFU not disabled!\n");
  82. cxl_afu_disable(afu);
  83. }
  84. cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
  85. PSL_CNTL | CXL_PSL_SCNTL_An_Pc);
  86. start = local_clock();
  87. PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
  88. while ((PSL_CNTL & CXL_PSL_SCNTL_An_Ps_MASK)
  89. == CXL_PSL_SCNTL_An_Ps_Pending) {
  90. if (time_after_eq(jiffies, timeout)) {
  91. dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n");
  92. return -EBUSY;
  93. }
  94. dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
  95. pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%.16llx PSL_DSISR: 0x%.16llx\n", PSL_CNTL, dsisr);
  96. if (dsisr & CXL_PSL_DSISR_TRANS) {
  97. dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
  98. dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%.16llx, DAR: 0x%.16llx\n", dsisr, dar);
  99. cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
  100. } else if (dsisr) {
  101. dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%.16llx\n", dsisr);
  102. cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
  103. } else {
  104. cpu_relax();
  105. }
  106. PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
  107. };
  108. end = local_clock();
  109. pr_devel("PSL purged in %lld ns\n", end - start);
  110. cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
  111. PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc);
  112. return 0;
  113. }
  114. static int spa_max_procs(int spa_size)
  115. {
  116. /*
  117. * From the CAIA:
  118. * end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255
  119. * Most of that junk is really just an overly-complicated way of saying
  120. * the last 256 bytes are __aligned(128), so it's really:
  121. * end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255
  122. * and
  123. * end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1
  124. * so
  125. * sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256
  126. * Ignore the alignment (which is safe in this case as long as we are
  127. * careful with our rounding) and solve for n:
  128. */
  129. return ((spa_size / 8) - 96) / 17;
  130. }
  131. static int alloc_spa(struct cxl_afu *afu)
  132. {
  133. u64 spap;
  134. /* Work out how many pages to allocate */
  135. afu->spa_order = 0;
  136. do {
  137. afu->spa_order++;
  138. afu->spa_size = (1 << afu->spa_order) * PAGE_SIZE;
  139. afu->spa_max_procs = spa_max_procs(afu->spa_size);
  140. } while (afu->spa_max_procs < afu->num_procs);
  141. WARN_ON(afu->spa_size > 0x100000); /* Max size supported by the hardware */
  142. if (!(afu->spa = (struct cxl_process_element *)
  143. __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->spa_order))) {
  144. pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
  145. return -ENOMEM;
  146. }
  147. pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n",
  148. 1<<afu->spa_order, afu->spa_max_procs, afu->num_procs);
  149. afu->sw_command_status = (__be64 *)((char *)afu->spa +
  150. ((afu->spa_max_procs + 3) * 128));
  151. spap = virt_to_phys(afu->spa) & CXL_PSL_SPAP_Addr;
  152. spap |= ((afu->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size;
  153. spap |= CXL_PSL_SPAP_V;
  154. pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", afu->spa, afu->spa_max_procs, afu->sw_command_status, spap);
  155. cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
  156. return 0;
  157. }
  158. static void release_spa(struct cxl_afu *afu)
  159. {
  160. free_pages((unsigned long) afu->spa, afu->spa_order);
  161. }
  162. int cxl_tlb_slb_invalidate(struct cxl *adapter)
  163. {
  164. unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
  165. pr_devel("CXL adapter wide TLBIA & SLBIA\n");
  166. cxl_p1_write(adapter, CXL_PSL_AFUSEL, CXL_PSL_AFUSEL_A);
  167. cxl_p1_write(adapter, CXL_PSL_TLBIA, CXL_TLB_SLB_IQ_ALL);
  168. while (cxl_p1_read(adapter, CXL_PSL_TLBIA) & CXL_TLB_SLB_P) {
  169. if (time_after_eq(jiffies, timeout)) {
  170. dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n");
  171. return -EBUSY;
  172. }
  173. cpu_relax();
  174. }
  175. cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_ALL);
  176. while (cxl_p1_read(adapter, CXL_PSL_SLBIA) & CXL_TLB_SLB_P) {
  177. if (time_after_eq(jiffies, timeout)) {
  178. dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n");
  179. return -EBUSY;
  180. }
  181. cpu_relax();
  182. }
  183. return 0;
  184. }
  185. int cxl_afu_slbia(struct cxl_afu *afu)
  186. {
  187. unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
  188. pr_devel("cxl_afu_slbia issuing SLBIA command\n");
  189. cxl_p2n_write(afu, CXL_SLBIA_An, CXL_TLB_SLB_IQ_ALL);
  190. while (cxl_p2n_read(afu, CXL_SLBIA_An) & CXL_TLB_SLB_P) {
  191. if (time_after_eq(jiffies, timeout)) {
  192. dev_warn(&afu->dev, "WARNING: CXL AFU SLBIA timed out!\n");
  193. return -EBUSY;
  194. }
  195. cpu_relax();
  196. }
  197. return 0;
  198. }
  199. static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1)
  200. {
  201. int rc;
  202. /* 1. Disable SSTP by writing 0 to SSTP1[V] */
  203. cxl_p2n_write(afu, CXL_SSTP1_An, 0);
  204. /* 2. Invalidate all SLB entries */
  205. if ((rc = cxl_afu_slbia(afu)))
  206. return rc;
  207. /* 3. Set SSTP0_An */
  208. cxl_p2n_write(afu, CXL_SSTP0_An, sstp0);
  209. /* 4. Set SSTP1_An */
  210. cxl_p2n_write(afu, CXL_SSTP1_An, sstp1);
  211. return 0;
  212. }
  213. /* Using per slice version may improve performance here. (ie. SLBIA_An) */
  214. static void slb_invalid(struct cxl_context *ctx)
  215. {
  216. struct cxl *adapter = ctx->afu->adapter;
  217. u64 slbia;
  218. WARN_ON(!mutex_is_locked(&ctx->afu->spa_mutex));
  219. cxl_p1_write(adapter, CXL_PSL_LBISEL,
  220. ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) |
  221. be32_to_cpu(ctx->elem->lpid));
  222. cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID);
  223. while (1) {
  224. slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA);
  225. if (!(slbia & CXL_TLB_SLB_P))
  226. break;
  227. cpu_relax();
  228. }
  229. }
  230. static int do_process_element_cmd(struct cxl_context *ctx,
  231. u64 cmd, u64 pe_state)
  232. {
  233. u64 state;
  234. unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
  235. WARN_ON(!ctx->afu->enabled);
  236. ctx->elem->software_state = cpu_to_be32(pe_state);
  237. smp_wmb();
  238. *(ctx->afu->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe);
  239. smp_mb();
  240. cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
  241. while (1) {
  242. if (time_after_eq(jiffies, timeout)) {
  243. dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n");
  244. return -EBUSY;
  245. }
  246. state = be64_to_cpup(ctx->afu->sw_command_status);
  247. if (state == ~0ULL) {
  248. pr_err("cxl: Error adding process element to AFU\n");
  249. return -1;
  250. }
  251. if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK | CXL_SPA_SW_LINK_MASK)) ==
  252. (cmd | (cmd >> 16) | ctx->pe))
  253. break;
  254. /*
  255. * The command won't finish in the PSL if there are
  256. * outstanding DSIs. Hence we need to yield here in
  257. * case there are outstanding DSIs that we need to
  258. * service. Tuning possiblity: we could wait for a
  259. * while before sched
  260. */
  261. schedule();
  262. }
  263. return 0;
  264. }
  265. static int add_process_element(struct cxl_context *ctx)
  266. {
  267. int rc = 0;
  268. mutex_lock(&ctx->afu->spa_mutex);
  269. pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe);
  270. if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V)))
  271. ctx->pe_inserted = true;
  272. pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe);
  273. mutex_unlock(&ctx->afu->spa_mutex);
  274. return rc;
  275. }
  276. static int terminate_process_element(struct cxl_context *ctx)
  277. {
  278. int rc = 0;
  279. /* fast path terminate if it's already invalid */
  280. if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V)))
  281. return rc;
  282. mutex_lock(&ctx->afu->spa_mutex);
  283. pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe);
  284. rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
  285. CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
  286. ctx->elem->software_state = 0; /* Remove Valid bit */
  287. pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe);
  288. mutex_unlock(&ctx->afu->spa_mutex);
  289. return rc;
  290. }
  291. static int remove_process_element(struct cxl_context *ctx)
  292. {
  293. int rc = 0;
  294. mutex_lock(&ctx->afu->spa_mutex);
  295. pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe);
  296. if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0)))
  297. ctx->pe_inserted = false;
  298. slb_invalid(ctx);
  299. pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
  300. mutex_unlock(&ctx->afu->spa_mutex);
  301. return rc;
  302. }
  303. static void assign_psn_space(struct cxl_context *ctx)
  304. {
  305. if (!ctx->afu->pp_size || ctx->master) {
  306. ctx->psn_phys = ctx->afu->psn_phys;
  307. ctx->psn_size = ctx->afu->adapter->ps_size;
  308. } else {
  309. ctx->psn_phys = ctx->afu->psn_phys +
  310. (ctx->afu->pp_offset + ctx->afu->pp_size * ctx->pe);
  311. ctx->psn_size = ctx->afu->pp_size;
  312. }
  313. }
  314. static int activate_afu_directed(struct cxl_afu *afu)
  315. {
  316. int rc;
  317. dev_info(&afu->dev, "Activating AFU directed mode\n");
  318. if (alloc_spa(afu))
  319. return -ENOMEM;
  320. cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU);
  321. cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
  322. cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
  323. afu->current_mode = CXL_MODE_DIRECTED;
  324. afu->num_procs = afu->max_procs_virtualised;
  325. if ((rc = cxl_chardev_m_afu_add(afu)))
  326. return rc;
  327. if ((rc = cxl_sysfs_afu_m_add(afu)))
  328. goto err;
  329. if ((rc = cxl_chardev_s_afu_add(afu)))
  330. goto err1;
  331. return 0;
  332. err1:
  333. cxl_sysfs_afu_m_remove(afu);
  334. err:
  335. cxl_chardev_afu_remove(afu);
  336. return rc;
  337. }
  338. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  339. #define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE)
  340. #else
  341. #define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE))
  342. #endif
  343. static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
  344. {
  345. u64 sr;
  346. int r, result;
  347. assign_psn_space(ctx);
  348. ctx->elem->ctxtime = 0; /* disable */
  349. ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
  350. ctx->elem->haurp = 0; /* disable */
  351. ctx->elem->sdr = cpu_to_be64(mfspr(SPRN_SDR1));
  352. sr = 0;
  353. if (ctx->master)
  354. sr |= CXL_PSL_SR_An_MP;
  355. if (mfspr(SPRN_LPCR) & LPCR_TC)
  356. sr |= CXL_PSL_SR_An_TC;
  357. /* HV=0, PR=1, R=1 for userspace
  358. * For kernel contexts: this would need to change
  359. */
  360. sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
  361. set_endian(sr);
  362. sr &= ~(CXL_PSL_SR_An_HV);
  363. if (!test_tsk_thread_flag(current, TIF_32BIT))
  364. sr |= CXL_PSL_SR_An_SF;
  365. ctx->elem->common.pid = cpu_to_be32(current->pid);
  366. ctx->elem->common.tid = 0;
  367. ctx->elem->sr = cpu_to_be64(sr);
  368. ctx->elem->common.csrp = 0; /* disable */
  369. ctx->elem->common.aurp0 = 0; /* disable */
  370. ctx->elem->common.aurp1 = 0; /* disable */
  371. cxl_prefault(ctx, wed);
  372. ctx->elem->common.sstp0 = cpu_to_be64(ctx->sstp0);
  373. ctx->elem->common.sstp1 = cpu_to_be64(ctx->sstp1);
  374. for (r = 0; r < CXL_IRQ_RANGES; r++) {
  375. ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
  376. ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
  377. }
  378. ctx->elem->common.amr = cpu_to_be64(amr);
  379. ctx->elem->common.wed = cpu_to_be64(wed);
  380. /* first guy needs to enable */
  381. if ((result = afu_check_and_enable(ctx->afu)))
  382. return result;
  383. add_process_element(ctx);
  384. return 0;
  385. }
  386. static int deactivate_afu_directed(struct cxl_afu *afu)
  387. {
  388. dev_info(&afu->dev, "Deactivating AFU directed mode\n");
  389. afu->current_mode = 0;
  390. afu->num_procs = 0;
  391. cxl_sysfs_afu_m_remove(afu);
  392. cxl_chardev_afu_remove(afu);
  393. cxl_afu_reset(afu);
  394. cxl_afu_disable(afu);
  395. cxl_psl_purge(afu);
  396. release_spa(afu);
  397. return 0;
  398. }
  399. static int activate_dedicated_process(struct cxl_afu *afu)
  400. {
  401. dev_info(&afu->dev, "Activating dedicated process mode\n");
  402. cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
  403. cxl_p1n_write(afu, CXL_PSL_CtxTime_An, 0); /* disable */
  404. cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0); /* disable */
  405. cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
  406. cxl_p1n_write(afu, CXL_PSL_LPID_An, mfspr(SPRN_LPID));
  407. cxl_p1n_write(afu, CXL_HAURP_An, 0); /* disable */
  408. cxl_p1n_write(afu, CXL_PSL_SDR_An, mfspr(SPRN_SDR1));
  409. cxl_p2n_write(afu, CXL_CSRP_An, 0); /* disable */
  410. cxl_p2n_write(afu, CXL_AURP0_An, 0); /* disable */
  411. cxl_p2n_write(afu, CXL_AURP1_An, 0); /* disable */
  412. afu->current_mode = CXL_MODE_DEDICATED;
  413. afu->num_procs = 1;
  414. return cxl_chardev_d_afu_add(afu);
  415. }
  416. static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr)
  417. {
  418. struct cxl_afu *afu = ctx->afu;
  419. u64 sr;
  420. int rc;
  421. sr = 0;
  422. set_endian(sr);
  423. if (ctx->master)
  424. sr |= CXL_PSL_SR_An_MP;
  425. if (mfspr(SPRN_LPCR) & LPCR_TC)
  426. sr |= CXL_PSL_SR_An_TC;
  427. sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
  428. if (!test_tsk_thread_flag(current, TIF_32BIT))
  429. sr |= CXL_PSL_SR_An_SF;
  430. cxl_p2n_write(afu, CXL_PSL_PID_TID_An, (u64)current->pid << 32);
  431. cxl_p1n_write(afu, CXL_PSL_SR_An, sr);
  432. if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1)))
  433. return rc;
  434. cxl_prefault(ctx, wed);
  435. cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An,
  436. (((u64)ctx->irqs.offset[0] & 0xffff) << 48) |
  437. (((u64)ctx->irqs.offset[1] & 0xffff) << 32) |
  438. (((u64)ctx->irqs.offset[2] & 0xffff) << 16) |
  439. ((u64)ctx->irqs.offset[3] & 0xffff));
  440. cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64)
  441. (((u64)ctx->irqs.range[0] & 0xffff) << 48) |
  442. (((u64)ctx->irqs.range[1] & 0xffff) << 32) |
  443. (((u64)ctx->irqs.range[2] & 0xffff) << 16) |
  444. ((u64)ctx->irqs.range[3] & 0xffff));
  445. cxl_p2n_write(afu, CXL_PSL_AMR_An, amr);
  446. /* master only context for dedicated */
  447. assign_psn_space(ctx);
  448. if ((rc = cxl_afu_reset(afu)))
  449. return rc;
  450. cxl_p2n_write(afu, CXL_PSL_WED_An, wed);
  451. return afu_enable(afu);
  452. }
  453. static int deactivate_dedicated_process(struct cxl_afu *afu)
  454. {
  455. dev_info(&afu->dev, "Deactivating dedicated process mode\n");
  456. afu->current_mode = 0;
  457. afu->num_procs = 0;
  458. cxl_chardev_afu_remove(afu);
  459. return 0;
  460. }
  461. int _cxl_afu_deactivate_mode(struct cxl_afu *afu, int mode)
  462. {
  463. if (mode == CXL_MODE_DIRECTED)
  464. return deactivate_afu_directed(afu);
  465. if (mode == CXL_MODE_DEDICATED)
  466. return deactivate_dedicated_process(afu);
  467. return 0;
  468. }
  469. int cxl_afu_deactivate_mode(struct cxl_afu *afu)
  470. {
  471. return _cxl_afu_deactivate_mode(afu, afu->current_mode);
  472. }
  473. int cxl_afu_activate_mode(struct cxl_afu *afu, int mode)
  474. {
  475. if (!mode)
  476. return 0;
  477. if (!(mode & afu->modes_supported))
  478. return -EINVAL;
  479. if (mode == CXL_MODE_DIRECTED)
  480. return activate_afu_directed(afu);
  481. if (mode == CXL_MODE_DEDICATED)
  482. return activate_dedicated_process(afu);
  483. return -EINVAL;
  484. }
  485. int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
  486. {
  487. ctx->kernel = kernel;
  488. if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
  489. return attach_afu_directed(ctx, wed, amr);
  490. if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
  491. return attach_dedicated(ctx, wed, amr);
  492. return -EINVAL;
  493. }
  494. static inline int detach_process_native_dedicated(struct cxl_context *ctx)
  495. {
  496. cxl_afu_reset(ctx->afu);
  497. cxl_afu_disable(ctx->afu);
  498. cxl_psl_purge(ctx->afu);
  499. return 0;
  500. }
  501. static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
  502. {
  503. if (!ctx->pe_inserted)
  504. return 0;
  505. if (terminate_process_element(ctx))
  506. return -1;
  507. if (remove_process_element(ctx))
  508. return -1;
  509. return 0;
  510. }
  511. int cxl_detach_process(struct cxl_context *ctx)
  512. {
  513. if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
  514. return detach_process_native_dedicated(ctx);
  515. return detach_process_native_afu_directed(ctx);
  516. }
  517. int cxl_get_irq(struct cxl_afu *afu, struct cxl_irq_info *info)
  518. {
  519. u64 pidtid;
  520. info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
  521. info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
  522. info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An);
  523. pidtid = cxl_p2n_read(afu, CXL_PSL_PID_TID_An);
  524. info->pid = pidtid >> 32;
  525. info->tid = pidtid & 0xffffffff;
  526. info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An);
  527. info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
  528. return 0;
  529. }
  530. static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
  531. {
  532. u64 dsisr;
  533. pr_devel("RECOVERING FROM PSL ERROR... (0x%.16llx)\n", errstat);
  534. /* Clear PSL_DSISR[PE] */
  535. dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
  536. cxl_p2n_write(afu, CXL_PSL_DSISR_An, dsisr & ~CXL_PSL_DSISR_An_PE);
  537. /* Write 1s to clear error status bits */
  538. cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat);
  539. }
  540. int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
  541. {
  542. if (tfc)
  543. cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc);
  544. if (psl_reset_mask)
  545. recover_psl_err(ctx->afu, psl_reset_mask);
  546. return 0;
  547. }
  548. int cxl_check_error(struct cxl_afu *afu)
  549. {
  550. return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL);
  551. }