vfio_ccw_cp.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * channel program interfaces
  4. *
  5. * Copyright IBM Corp. 2017
  6. *
  7. * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
  8. * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
  9. */
  10. #include <linux/mm.h>
  11. #include <linux/slab.h>
  12. #include <linux/iommu.h>
  13. #include <linux/vfio.h>
  14. #include <asm/idals.h>
  15. #include "vfio_ccw_cp.h"
  16. /*
  17. * Max length for ccw chain.
  18. * XXX: Limit to 256, need to check more?
  19. */
  20. #define CCWCHAIN_LEN_MAX 256
  21. struct pfn_array {
  22. /* Starting guest physical I/O address. */
  23. unsigned long pa_iova;
  24. /* Array that stores PFNs of the pages need to pin. */
  25. unsigned long *pa_iova_pfn;
  26. /* Array that receives PFNs of the pages pinned. */
  27. unsigned long *pa_pfn;
  28. /* Number of pages pinned from @pa_iova. */
  29. int pa_nr;
  30. };
  31. struct pfn_array_table {
  32. struct pfn_array *pat_pa;
  33. int pat_nr;
  34. };
  35. struct ccwchain {
  36. struct list_head next;
  37. struct ccw1 *ch_ccw;
  38. /* Guest physical address of the current chain. */
  39. u64 ch_iova;
  40. /* Count of the valid ccws in chain. */
  41. int ch_len;
  42. /* Pinned PAGEs for the original data. */
  43. struct pfn_array_table *ch_pat;
  44. };
  45. /*
  46. * pfn_array_alloc_pin() - alloc memory for PFNs, then pin user pages in memory
  47. * @pa: pfn_array on which to perform the operation
  48. * @mdev: the mediated device to perform pin/unpin operations
  49. * @iova: target guest physical address
  50. * @len: number of bytes that should be pinned from @iova
  51. *
  52. * Attempt to allocate memory for PFNs, and pin user pages in memory.
  53. *
  54. * Usage of pfn_array:
  55. * We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in
  56. * this structure will be filled in by this function.
  57. *
  58. * Returns:
  59. * Number of pages pinned on success.
  60. * If @pa->pa_nr is not 0, or @pa->pa_iova_pfn is not NULL initially,
  61. * returns -EINVAL.
  62. * If no pages were pinned, returns -errno.
  63. */
  64. static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
  65. u64 iova, unsigned int len)
  66. {
  67. int i, ret = 0;
  68. if (!len)
  69. return 0;
  70. if (pa->pa_nr || pa->pa_iova_pfn)
  71. return -EINVAL;
  72. pa->pa_iova = iova;
  73. pa->pa_nr = ((iova & ~PAGE_MASK) + len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  74. if (!pa->pa_nr)
  75. return -EINVAL;
  76. pa->pa_iova_pfn = kcalloc(pa->pa_nr,
  77. sizeof(*pa->pa_iova_pfn) +
  78. sizeof(*pa->pa_pfn),
  79. GFP_KERNEL);
  80. if (unlikely(!pa->pa_iova_pfn))
  81. return -ENOMEM;
  82. pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
  83. pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
  84. for (i = 1; i < pa->pa_nr; i++)
  85. pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
  86. ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
  87. IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
  88. if (ret < 0) {
  89. goto err_out;
  90. } else if (ret > 0 && ret != pa->pa_nr) {
  91. vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
  92. ret = -EINVAL;
  93. goto err_out;
  94. }
  95. return ret;
  96. err_out:
  97. pa->pa_nr = 0;
  98. kfree(pa->pa_iova_pfn);
  99. pa->pa_iova_pfn = NULL;
  100. return ret;
  101. }
  102. /* Unpin the pages before releasing the memory. */
  103. static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
  104. {
  105. vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
  106. pa->pa_nr = 0;
  107. kfree(pa->pa_iova_pfn);
  108. }
  109. static int pfn_array_table_init(struct pfn_array_table *pat, int nr)
  110. {
  111. pat->pat_pa = kcalloc(nr, sizeof(*pat->pat_pa), GFP_KERNEL);
  112. if (unlikely(ZERO_OR_NULL_PTR(pat->pat_pa))) {
  113. pat->pat_nr = 0;
  114. return -ENOMEM;
  115. }
  116. pat->pat_nr = nr;
  117. return 0;
  118. }
  119. static void pfn_array_table_unpin_free(struct pfn_array_table *pat,
  120. struct device *mdev)
  121. {
  122. int i;
  123. for (i = 0; i < pat->pat_nr; i++)
  124. pfn_array_unpin_free(pat->pat_pa + i, mdev);
  125. if (pat->pat_nr) {
  126. kfree(pat->pat_pa);
  127. pat->pat_pa = NULL;
  128. pat->pat_nr = 0;
  129. }
  130. }
  131. static bool pfn_array_table_iova_pinned(struct pfn_array_table *pat,
  132. unsigned long iova)
  133. {
  134. struct pfn_array *pa = pat->pat_pa;
  135. unsigned long iova_pfn = iova >> PAGE_SHIFT;
  136. int i, j;
  137. for (i = 0; i < pat->pat_nr; i++, pa++)
  138. for (j = 0; j < pa->pa_nr; j++)
  139. if (pa->pa_iova_pfn[j] == iova_pfn)
  140. return true;
  141. return false;
  142. }
  143. /* Create the list idal words for a pfn_array_table. */
  144. static inline void pfn_array_table_idal_create_words(
  145. struct pfn_array_table *pat,
  146. unsigned long *idaws)
  147. {
  148. struct pfn_array *pa;
  149. int i, j, k;
  150. /*
  151. * Idal words (execept the first one) rely on the memory being 4k
  152. * aligned. If a user virtual address is 4K aligned, then it's
  153. * corresponding kernel physical address will also be 4K aligned. Thus
  154. * there will be no problem here to simply use the phys to create an
  155. * idaw.
  156. */
  157. k = 0;
  158. for (i = 0; i < pat->pat_nr; i++) {
  159. pa = pat->pat_pa + i;
  160. for (j = 0; j < pa->pa_nr; j++) {
  161. idaws[k] = pa->pa_pfn[j] << PAGE_SHIFT;
  162. if (k == 0)
  163. idaws[k] += pa->pa_iova & (PAGE_SIZE - 1);
  164. k++;
  165. }
  166. }
  167. }
  168. /*
  169. * Within the domain (@mdev), copy @n bytes from a guest physical
  170. * address (@iova) to a host physical address (@to).
  171. */
  172. static long copy_from_iova(struct device *mdev,
  173. void *to, u64 iova,
  174. unsigned long n)
  175. {
  176. struct pfn_array pa = {0};
  177. u64 from;
  178. int i, ret;
  179. unsigned long l, m;
  180. ret = pfn_array_alloc_pin(&pa, mdev, iova, n);
  181. if (ret <= 0)
  182. return ret;
  183. l = n;
  184. for (i = 0; i < pa.pa_nr; i++) {
  185. from = pa.pa_pfn[i] << PAGE_SHIFT;
  186. m = PAGE_SIZE;
  187. if (i == 0) {
  188. from += iova & (PAGE_SIZE - 1);
  189. m -= iova & (PAGE_SIZE - 1);
  190. }
  191. m = min(l, m);
  192. memcpy(to + (n - l), (void *)from, m);
  193. l -= m;
  194. if (l == 0)
  195. break;
  196. }
  197. pfn_array_unpin_free(&pa, mdev);
  198. return l;
  199. }
  200. static long copy_ccw_from_iova(struct channel_program *cp,
  201. struct ccw1 *to, u64 iova,
  202. unsigned long len)
  203. {
  204. struct ccw0 ccw0;
  205. struct ccw1 *pccw1;
  206. int ret;
  207. int i;
  208. ret = copy_from_iova(cp->mdev, to, iova, len * sizeof(struct ccw1));
  209. if (ret)
  210. return ret;
  211. if (!cp->orb.cmd.fmt) {
  212. pccw1 = to;
  213. for (i = 0; i < len; i++) {
  214. ccw0 = *(struct ccw0 *)pccw1;
  215. if ((pccw1->cmd_code & 0x0f) == CCW_CMD_TIC) {
  216. pccw1->cmd_code = CCW_CMD_TIC;
  217. pccw1->flags = 0;
  218. pccw1->count = 0;
  219. } else {
  220. pccw1->cmd_code = ccw0.cmd_code;
  221. pccw1->flags = ccw0.flags;
  222. pccw1->count = ccw0.count;
  223. }
  224. pccw1->cda = ccw0.cda;
  225. pccw1++;
  226. }
  227. }
  228. return ret;
  229. }
  230. /*
  231. * Helpers to operate ccwchain.
  232. */
  233. #define ccw_is_test(_ccw) (((_ccw)->cmd_code & 0x0F) == 0)
  234. #define ccw_is_noop(_ccw) ((_ccw)->cmd_code == CCW_CMD_NOOP)
  235. #define ccw_is_tic(_ccw) ((_ccw)->cmd_code == CCW_CMD_TIC)
  236. #define ccw_is_idal(_ccw) ((_ccw)->flags & CCW_FLAG_IDA)
  237. #define ccw_is_chain(_ccw) ((_ccw)->flags & (CCW_FLAG_CC | CCW_FLAG_DC))
  238. static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len)
  239. {
  240. struct ccwchain *chain;
  241. void *data;
  242. size_t size;
  243. /* Make ccw address aligned to 8. */
  244. size = ((sizeof(*chain) + 7L) & -8L) +
  245. sizeof(*chain->ch_ccw) * len +
  246. sizeof(*chain->ch_pat) * len;
  247. chain = kzalloc(size, GFP_DMA | GFP_KERNEL);
  248. if (!chain)
  249. return NULL;
  250. data = (u8 *)chain + ((sizeof(*chain) + 7L) & -8L);
  251. chain->ch_ccw = (struct ccw1 *)data;
  252. data = (u8 *)(chain->ch_ccw) + sizeof(*chain->ch_ccw) * len;
  253. chain->ch_pat = (struct pfn_array_table *)data;
  254. chain->ch_len = len;
  255. list_add_tail(&chain->next, &cp->ccwchain_list);
  256. return chain;
  257. }
  258. static void ccwchain_free(struct ccwchain *chain)
  259. {
  260. list_del(&chain->next);
  261. kfree(chain);
  262. }
  263. /* Free resource for a ccw that allocated memory for its cda. */
  264. static void ccwchain_cda_free(struct ccwchain *chain, int idx)
  265. {
  266. struct ccw1 *ccw = chain->ch_ccw + idx;
  267. if (ccw_is_test(ccw) || ccw_is_noop(ccw) || ccw_is_tic(ccw))
  268. return;
  269. if (!ccw->count)
  270. return;
  271. kfree((void *)(u64)ccw->cda);
  272. }
  273. /* Unpin the pages then free the memory resources. */
  274. static void cp_unpin_free(struct channel_program *cp)
  275. {
  276. struct ccwchain *chain, *temp;
  277. int i;
  278. list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
  279. for (i = 0; i < chain->ch_len; i++) {
  280. pfn_array_table_unpin_free(chain->ch_pat + i,
  281. cp->mdev);
  282. ccwchain_cda_free(chain, i);
  283. }
  284. ccwchain_free(chain);
  285. }
  286. }
  287. /**
  288. * ccwchain_calc_length - calculate the length of the ccw chain.
  289. * @iova: guest physical address of the target ccw chain
  290. * @cp: channel_program on which to perform the operation
  291. *
  292. * This is the chain length not considering any TICs.
  293. * You need to do a new round for each TIC target.
  294. *
  295. * The program is also validated for absence of not yet supported
  296. * indirect data addressing scenarios.
  297. *
  298. * Returns: the length of the ccw chain or -errno.
  299. */
  300. static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
  301. {
  302. struct ccw1 *ccw, *p;
  303. int cnt;
  304. /*
  305. * Copy current chain from guest to host kernel.
  306. * Currently the chain length is limited to CCWCHAIN_LEN_MAX (256).
  307. * So copying 2K is enough (safe).
  308. */
  309. p = ccw = kcalloc(CCWCHAIN_LEN_MAX, sizeof(*ccw), GFP_KERNEL);
  310. if (!ccw)
  311. return -ENOMEM;
  312. cnt = copy_ccw_from_iova(cp, ccw, iova, CCWCHAIN_LEN_MAX);
  313. if (cnt) {
  314. kfree(ccw);
  315. return cnt;
  316. }
  317. cnt = 0;
  318. do {
  319. cnt++;
  320. /*
  321. * As we don't want to fail direct addressing even if the
  322. * orb specified one of the unsupported formats, we defer
  323. * checking for IDAWs in unsupported formats to here.
  324. */
  325. if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw)) {
  326. kfree(p);
  327. return -EOPNOTSUPP;
  328. }
  329. if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw)))
  330. break;
  331. ccw++;
  332. } while (cnt < CCWCHAIN_LEN_MAX + 1);
  333. if (cnt == CCWCHAIN_LEN_MAX + 1)
  334. cnt = -EINVAL;
  335. kfree(p);
  336. return cnt;
  337. }
  338. static int tic_target_chain_exists(struct ccw1 *tic, struct channel_program *cp)
  339. {
  340. struct ccwchain *chain;
  341. u32 ccw_head, ccw_tail;
  342. list_for_each_entry(chain, &cp->ccwchain_list, next) {
  343. ccw_head = chain->ch_iova;
  344. ccw_tail = ccw_head + (chain->ch_len - 1) * sizeof(struct ccw1);
  345. if ((ccw_head <= tic->cda) && (tic->cda <= ccw_tail))
  346. return 1;
  347. }
  348. return 0;
  349. }
  350. static int ccwchain_loop_tic(struct ccwchain *chain,
  351. struct channel_program *cp);
  352. static int ccwchain_handle_tic(struct ccw1 *tic, struct channel_program *cp)
  353. {
  354. struct ccwchain *chain;
  355. int len, ret;
  356. /* May transfer to an existing chain. */
  357. if (tic_target_chain_exists(tic, cp))
  358. return 0;
  359. /* Get chain length. */
  360. len = ccwchain_calc_length(tic->cda, cp);
  361. if (len < 0)
  362. return len;
  363. /* Need alloc a new chain for this one. */
  364. chain = ccwchain_alloc(cp, len);
  365. if (!chain)
  366. return -ENOMEM;
  367. chain->ch_iova = tic->cda;
  368. /* Copy the new chain from user. */
  369. ret = copy_ccw_from_iova(cp, chain->ch_ccw, tic->cda, len);
  370. if (ret) {
  371. ccwchain_free(chain);
  372. return ret;
  373. }
  374. /* Loop for tics on this new chain. */
  375. return ccwchain_loop_tic(chain, cp);
  376. }
  377. /* Loop for TICs. */
  378. static int ccwchain_loop_tic(struct ccwchain *chain, struct channel_program *cp)
  379. {
  380. struct ccw1 *tic;
  381. int i, ret;
  382. for (i = 0; i < chain->ch_len; i++) {
  383. tic = chain->ch_ccw + i;
  384. if (!ccw_is_tic(tic))
  385. continue;
  386. ret = ccwchain_handle_tic(tic, cp);
  387. if (ret)
  388. return ret;
  389. }
  390. return 0;
  391. }
  392. static int ccwchain_fetch_tic(struct ccwchain *chain,
  393. int idx,
  394. struct channel_program *cp)
  395. {
  396. struct ccw1 *ccw = chain->ch_ccw + idx;
  397. struct ccwchain *iter;
  398. u32 ccw_head, ccw_tail;
  399. list_for_each_entry(iter, &cp->ccwchain_list, next) {
  400. ccw_head = iter->ch_iova;
  401. ccw_tail = ccw_head + (iter->ch_len - 1) * sizeof(struct ccw1);
  402. if ((ccw_head <= ccw->cda) && (ccw->cda <= ccw_tail)) {
  403. ccw->cda = (__u32) (addr_t) (((char *)iter->ch_ccw) +
  404. (ccw->cda - ccw_head));
  405. return 0;
  406. }
  407. }
  408. return -EFAULT;
  409. }
  410. static int ccwchain_fetch_direct(struct ccwchain *chain,
  411. int idx,
  412. struct channel_program *cp)
  413. {
  414. struct ccw1 *ccw;
  415. struct pfn_array_table *pat;
  416. unsigned long *idaws;
  417. int ret;
  418. ccw = chain->ch_ccw + idx;
  419. if (!ccw->count) {
  420. /*
  421. * We just want the translation result of any direct ccw
  422. * to be an IDA ccw, so let's add the IDA flag for it.
  423. * Although the flag will be ignored by firmware.
  424. */
  425. ccw->flags |= CCW_FLAG_IDA;
  426. return 0;
  427. }
  428. /*
  429. * Pin data page(s) in memory.
  430. * The number of pages actually is the count of the idaws which will be
  431. * needed when translating a direct ccw to a idal ccw.
  432. */
  433. pat = chain->ch_pat + idx;
  434. ret = pfn_array_table_init(pat, 1);
  435. if (ret)
  436. goto out_init;
  437. ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count);
  438. if (ret < 0)
  439. goto out_unpin;
  440. /* Translate this direct ccw to a idal ccw. */
  441. idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
  442. if (!idaws) {
  443. ret = -ENOMEM;
  444. goto out_unpin;
  445. }
  446. ccw->cda = (__u32) virt_to_phys(idaws);
  447. ccw->flags |= CCW_FLAG_IDA;
  448. pfn_array_table_idal_create_words(pat, idaws);
  449. return 0;
  450. out_unpin:
  451. pfn_array_table_unpin_free(pat, cp->mdev);
  452. out_init:
  453. ccw->cda = 0;
  454. return ret;
  455. }
  456. static int ccwchain_fetch_idal(struct ccwchain *chain,
  457. int idx,
  458. struct channel_program *cp)
  459. {
  460. struct ccw1 *ccw;
  461. struct pfn_array_table *pat;
  462. unsigned long *idaws;
  463. u64 idaw_iova;
  464. unsigned int idaw_nr, idaw_len;
  465. int i, ret;
  466. ccw = chain->ch_ccw + idx;
  467. if (!ccw->count)
  468. return 0;
  469. /* Calculate size of idaws. */
  470. ret = copy_from_iova(cp->mdev, &idaw_iova, ccw->cda, sizeof(idaw_iova));
  471. if (ret)
  472. return ret;
  473. idaw_nr = idal_nr_words((void *)(idaw_iova), ccw->count);
  474. idaw_len = idaw_nr * sizeof(*idaws);
  475. /* Pin data page(s) in memory. */
  476. pat = chain->ch_pat + idx;
  477. ret = pfn_array_table_init(pat, idaw_nr);
  478. if (ret)
  479. goto out_init;
  480. /* Translate idal ccw to use new allocated idaws. */
  481. idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL);
  482. if (!idaws) {
  483. ret = -ENOMEM;
  484. goto out_unpin;
  485. }
  486. ret = copy_from_iova(cp->mdev, idaws, ccw->cda, idaw_len);
  487. if (ret)
  488. goto out_free_idaws;
  489. ccw->cda = virt_to_phys(idaws);
  490. for (i = 0; i < idaw_nr; i++) {
  491. idaw_iova = *(idaws + i);
  492. ret = pfn_array_alloc_pin(pat->pat_pa + i, cp->mdev,
  493. idaw_iova, 1);
  494. if (ret < 0)
  495. goto out_free_idaws;
  496. }
  497. pfn_array_table_idal_create_words(pat, idaws);
  498. return 0;
  499. out_free_idaws:
  500. kfree(idaws);
  501. out_unpin:
  502. pfn_array_table_unpin_free(pat, cp->mdev);
  503. out_init:
  504. ccw->cda = 0;
  505. return ret;
  506. }
  507. /*
  508. * Fetch one ccw.
  509. * To reduce memory copy, we'll pin the cda page in memory,
  510. * and to get rid of the cda 2G limitiaion of ccw1, we'll translate
  511. * direct ccws to idal ccws.
  512. */
  513. static int ccwchain_fetch_one(struct ccwchain *chain,
  514. int idx,
  515. struct channel_program *cp)
  516. {
  517. struct ccw1 *ccw = chain->ch_ccw + idx;
  518. if (ccw_is_test(ccw) || ccw_is_noop(ccw))
  519. return 0;
  520. if (ccw_is_tic(ccw))
  521. return ccwchain_fetch_tic(chain, idx, cp);
  522. if (ccw_is_idal(ccw))
  523. return ccwchain_fetch_idal(chain, idx, cp);
  524. return ccwchain_fetch_direct(chain, idx, cp);
  525. }
  526. /**
  527. * cp_init() - allocate ccwchains for a channel program.
  528. * @cp: channel_program on which to perform the operation
  529. * @mdev: the mediated device to perform pin/unpin operations
  530. * @orb: control block for the channel program from the guest
  531. *
  532. * This creates one or more ccwchain(s), and copies the raw data of
  533. * the target channel program from @orb->cmd.iova to the new ccwchain(s).
  534. *
  535. * Limitations:
  536. * 1. Supports only prefetch enabled mode.
  537. * 2. Supports idal(c64) ccw chaining.
  538. * 3. Supports 4k idaw.
  539. *
  540. * Returns:
  541. * %0 on success and a negative error value on failure.
  542. */
  543. int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
  544. {
  545. u64 iova = orb->cmd.cpa;
  546. struct ccwchain *chain;
  547. int len, ret;
  548. /*
  549. * XXX:
  550. * Only support prefetch enable mode now.
  551. */
  552. if (!orb->cmd.pfch)
  553. return -EOPNOTSUPP;
  554. INIT_LIST_HEAD(&cp->ccwchain_list);
  555. memcpy(&cp->orb, orb, sizeof(*orb));
  556. cp->mdev = mdev;
  557. /* Get chain length. */
  558. len = ccwchain_calc_length(iova, cp);
  559. if (len < 0)
  560. return len;
  561. /* Alloc mem for the head chain. */
  562. chain = ccwchain_alloc(cp, len);
  563. if (!chain)
  564. return -ENOMEM;
  565. chain->ch_iova = iova;
  566. /* Copy the head chain from guest. */
  567. ret = copy_ccw_from_iova(cp, chain->ch_ccw, iova, len);
  568. if (ret) {
  569. ccwchain_free(chain);
  570. return ret;
  571. }
  572. /* Now loop for its TICs. */
  573. ret = ccwchain_loop_tic(chain, cp);
  574. if (ret)
  575. cp_unpin_free(cp);
  576. /* It is safe to force: if not set but idals used
  577. * ccwchain_calc_length returns an error.
  578. */
  579. cp->orb.cmd.c64 = 1;
  580. return ret;
  581. }
  582. /**
  583. * cp_free() - free resources for channel program.
  584. * @cp: channel_program on which to perform the operation
  585. *
  586. * This unpins the memory pages and frees the memory space occupied by
  587. * @cp, which must have been returned by a previous call to cp_init().
  588. * Otherwise, undefined behavior occurs.
  589. */
  590. void cp_free(struct channel_program *cp)
  591. {
  592. cp_unpin_free(cp);
  593. }
  594. /**
  595. * cp_prefetch() - translate a guest physical address channel program to
  596. * a real-device runnable channel program.
  597. * @cp: channel_program on which to perform the operation
  598. *
  599. * This function translates the guest-physical-address channel program
  600. * and stores the result to ccwchain list. @cp must have been
  601. * initialized by a previous call with cp_init(). Otherwise, undefined
  602. * behavior occurs.
  603. * For each chain composing the channel program:
  604. * - On entry ch_len holds the count of CCWs to be translated.
  605. * - On exit ch_len is adjusted to the count of successfully translated CCWs.
  606. * This allows cp_free to find in ch_len the count of CCWs to free in a chain.
  607. *
  608. * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced
  609. * as helpers to do ccw chain translation inside the kernel. Basically
  610. * they accept a channel program issued by a virtual machine, and
  611. * translate the channel program to a real-device runnable channel
  612. * program.
  613. *
  614. * These APIs will copy the ccws into kernel-space buffers, and update
  615. * the guest phsical addresses with their corresponding host physical
  616. * addresses. Then channel I/O device drivers could issue the
  617. * translated channel program to real devices to perform an I/O
  618. * operation.
  619. *
  620. * These interfaces are designed to support translation only for
  621. * channel programs, which are generated and formatted by a
  622. * guest. Thus this will make it possible for things like VFIO to
  623. * leverage the interfaces to passthrough a channel I/O mediated
  624. * device in QEMU.
  625. *
  626. * We support direct ccw chaining by translating them to idal ccws.
  627. *
  628. * Returns:
  629. * %0 on success and a negative error value on failure.
  630. */
  631. int cp_prefetch(struct channel_program *cp)
  632. {
  633. struct ccwchain *chain;
  634. int len, idx, ret;
  635. list_for_each_entry(chain, &cp->ccwchain_list, next) {
  636. len = chain->ch_len;
  637. for (idx = 0; idx < len; idx++) {
  638. ret = ccwchain_fetch_one(chain, idx, cp);
  639. if (ret)
  640. goto out_err;
  641. }
  642. }
  643. return 0;
  644. out_err:
  645. /* Only cleanup the chain elements that were actually translated. */
  646. chain->ch_len = idx;
  647. list_for_each_entry_continue(chain, &cp->ccwchain_list, next) {
  648. chain->ch_len = 0;
  649. }
  650. return ret;
  651. }
  652. /**
  653. * cp_get_orb() - get the orb of the channel program
  654. * @cp: channel_program on which to perform the operation
  655. * @intparm: new intparm for the returned orb
  656. * @lpm: candidate value of the logical-path mask for the returned orb
  657. *
  658. * This function returns the address of the updated orb of the channel
  659. * program. Channel I/O device drivers could use this orb to issue a
  660. * ssch.
  661. */
  662. union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm)
  663. {
  664. union orb *orb;
  665. struct ccwchain *chain;
  666. struct ccw1 *cpa;
  667. orb = &cp->orb;
  668. orb->cmd.intparm = intparm;
  669. orb->cmd.fmt = 1;
  670. orb->cmd.key = PAGE_DEFAULT_KEY >> 4;
  671. if (orb->cmd.lpm == 0)
  672. orb->cmd.lpm = lpm;
  673. chain = list_first_entry(&cp->ccwchain_list, struct ccwchain, next);
  674. cpa = chain->ch_ccw;
  675. orb->cmd.cpa = (__u32) __pa(cpa);
  676. return orb;
  677. }
  678. /**
  679. * cp_update_scsw() - update scsw for a channel program.
  680. * @cp: channel_program on which to perform the operation
  681. * @scsw: I/O results of the channel program and also the target to be
  682. * updated
  683. *
  684. * @scsw contains the I/O results of the channel program that pointed
  685. * to by @cp. However what @scsw->cpa stores is a host physical
  686. * address, which is meaningless for the guest, which is waiting for
  687. * the I/O results.
  688. *
  689. * This function updates @scsw->cpa to its coressponding guest physical
  690. * address.
  691. */
  692. void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
  693. {
  694. struct ccwchain *chain;
  695. u32 cpa = scsw->cmd.cpa;
  696. u32 ccw_head, ccw_tail;
  697. /*
  698. * LATER:
  699. * For now, only update the cmd.cpa part. We may need to deal with
  700. * other portions of the schib as well, even if we don't return them
  701. * in the ioctl directly. Path status changes etc.
  702. */
  703. list_for_each_entry(chain, &cp->ccwchain_list, next) {
  704. ccw_head = (u32)(u64)chain->ch_ccw;
  705. ccw_tail = (u32)(u64)(chain->ch_ccw + chain->ch_len - 1);
  706. if ((ccw_head <= cpa) && (cpa <= ccw_tail)) {
  707. /*
  708. * (cpa - ccw_head) is the offset value of the host
  709. * physical ccw to its chain head.
  710. * Adding this value to the guest physical ccw chain
  711. * head gets us the guest cpa.
  712. */
  713. cpa = chain->ch_iova + (cpa - ccw_head);
  714. break;
  715. }
  716. }
  717. scsw->cmd.cpa = cpa;
  718. }
  719. /**
  720. * cp_iova_pinned() - check if an iova is pinned for a ccw chain.
  721. * @cp: channel_program on which to perform the operation
  722. * @iova: the iova to check
  723. *
  724. * If the @iova is currently pinned for the ccw chain, return true;
  725. * else return false.
  726. */
  727. bool cp_iova_pinned(struct channel_program *cp, u64 iova)
  728. {
  729. struct ccwchain *chain;
  730. int i;
  731. list_for_each_entry(chain, &cp->ccwchain_list, next) {
  732. for (i = 0; i < chain->ch_len; i++)
  733. if (pfn_array_table_iova_pinned(chain->ch_pat + i,
  734. iova))
  735. return true;
  736. }
  737. return false;
  738. }