vfio_ap_ops.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Adjunct processor matrix VFIO device driver callbacks.
  4. *
  5. * Copyright IBM Corp. 2018
  6. *
  7. * Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
  8. * Halil Pasic <pasic@linux.ibm.com>
  9. * Pierre Morel <pmorel@linux.ibm.com>
  10. */
  11. #include <linux/string.h>
  12. #include <linux/vfio.h>
  13. #include <linux/device.h>
  14. #include <linux/list.h>
  15. #include <linux/ctype.h>
  16. #include <linux/bitops.h>
  17. #include <linux/kvm_host.h>
  18. #include <linux/module.h>
  19. #include <asm/kvm.h>
  20. #include <asm/zcrypt.h>
  21. #include "vfio_ap_private.h"
  22. #define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough"
  23. #define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
  24. static void vfio_ap_matrix_init(struct ap_config_info *info,
  25. struct ap_matrix *matrix)
  26. {
  27. matrix->apm_max = info->apxa ? info->Na : 63;
  28. matrix->aqm_max = info->apxa ? info->Nd : 15;
  29. matrix->adm_max = info->apxa ? info->Nd : 15;
  30. }
  31. static int vfio_ap_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
  32. {
  33. struct ap_matrix_mdev *matrix_mdev;
  34. if ((atomic_dec_if_positive(&matrix_dev->available_instances) < 0))
  35. return -EPERM;
  36. matrix_mdev = kzalloc(sizeof(*matrix_mdev), GFP_KERNEL);
  37. if (!matrix_mdev) {
  38. atomic_inc(&matrix_dev->available_instances);
  39. return -ENOMEM;
  40. }
  41. vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
  42. mdev_set_drvdata(mdev, matrix_mdev);
  43. mutex_lock(&matrix_dev->lock);
  44. list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
  45. mutex_unlock(&matrix_dev->lock);
  46. return 0;
  47. }
  48. static int vfio_ap_mdev_remove(struct mdev_device *mdev)
  49. {
  50. struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
  51. if (matrix_mdev->kvm)
  52. return -EBUSY;
  53. mutex_lock(&matrix_dev->lock);
  54. list_del(&matrix_mdev->node);
  55. mutex_unlock(&matrix_dev->lock);
  56. kfree(matrix_mdev);
  57. mdev_set_drvdata(mdev, NULL);
  58. atomic_inc(&matrix_dev->available_instances);
  59. return 0;
  60. }
  61. static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
  62. {
  63. return sprintf(buf, "%s\n", VFIO_AP_MDEV_NAME_HWVIRT);
  64. }
  65. MDEV_TYPE_ATTR_RO(name);
  66. static ssize_t available_instances_show(struct kobject *kobj,
  67. struct device *dev, char *buf)
  68. {
  69. return sprintf(buf, "%d\n",
  70. atomic_read(&matrix_dev->available_instances));
  71. }
  72. MDEV_TYPE_ATTR_RO(available_instances);
  73. static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
  74. char *buf)
  75. {
  76. return sprintf(buf, "%s\n", VFIO_DEVICE_API_AP_STRING);
  77. }
  78. MDEV_TYPE_ATTR_RO(device_api);
  79. static struct attribute *vfio_ap_mdev_type_attrs[] = {
  80. &mdev_type_attr_name.attr,
  81. &mdev_type_attr_device_api.attr,
  82. &mdev_type_attr_available_instances.attr,
  83. NULL,
  84. };
  85. static struct attribute_group vfio_ap_mdev_hwvirt_type_group = {
  86. .name = VFIO_AP_MDEV_TYPE_HWVIRT,
  87. .attrs = vfio_ap_mdev_type_attrs,
  88. };
  89. static struct attribute_group *vfio_ap_mdev_type_groups[] = {
  90. &vfio_ap_mdev_hwvirt_type_group,
  91. NULL,
  92. };
  93. struct vfio_ap_queue_reserved {
  94. unsigned long *apid;
  95. unsigned long *apqi;
  96. bool reserved;
  97. };
  98. /**
  99. * vfio_ap_has_queue
  100. *
  101. * @dev: an AP queue device
  102. * @data: a struct vfio_ap_queue_reserved reference
  103. *
  104. * Flags whether the AP queue device (@dev) has a queue ID containing the APQN,
  105. * apid or apqi specified in @data:
  106. *
  107. * - If @data contains both an apid and apqi value, then @data will be flagged
  108. * as reserved if the APID and APQI fields for the AP queue device matches
  109. *
  110. * - If @data contains only an apid value, @data will be flagged as
  111. * reserved if the APID field in the AP queue device matches
  112. *
  113. * - If @data contains only an apqi value, @data will be flagged as
  114. * reserved if the APQI field in the AP queue device matches
  115. *
  116. * Returns 0 to indicate the input to function succeeded. Returns -EINVAL if
  117. * @data does not contain either an apid or apqi.
  118. */
  119. static int vfio_ap_has_queue(struct device *dev, void *data)
  120. {
  121. struct vfio_ap_queue_reserved *qres = data;
  122. struct ap_queue *ap_queue = to_ap_queue(dev);
  123. ap_qid_t qid;
  124. unsigned long id;
  125. if (qres->apid && qres->apqi) {
  126. qid = AP_MKQID(*qres->apid, *qres->apqi);
  127. if (qid == ap_queue->qid)
  128. qres->reserved = true;
  129. } else if (qres->apid && !qres->apqi) {
  130. id = AP_QID_CARD(ap_queue->qid);
  131. if (id == *qres->apid)
  132. qres->reserved = true;
  133. } else if (!qres->apid && qres->apqi) {
  134. id = AP_QID_QUEUE(ap_queue->qid);
  135. if (id == *qres->apqi)
  136. qres->reserved = true;
  137. } else {
  138. return -EINVAL;
  139. }
  140. return 0;
  141. }
  142. /**
  143. * vfio_ap_verify_queue_reserved
  144. *
  145. * @matrix_dev: a mediated matrix device
  146. * @apid: an AP adapter ID
  147. * @apqi: an AP queue index
  148. *
  149. * Verifies that the AP queue with @apid/@apqi is reserved by the VFIO AP device
  150. * driver according to the following rules:
  151. *
  152. * - If both @apid and @apqi are not NULL, then there must be an AP queue
  153. * device bound to the vfio_ap driver with the APQN identified by @apid and
  154. * @apqi
  155. *
  156. * - If only @apid is not NULL, then there must be an AP queue device bound
  157. * to the vfio_ap driver with an APQN containing @apid
  158. *
  159. * - If only @apqi is not NULL, then there must be an AP queue device bound
  160. * to the vfio_ap driver with an APQN containing @apqi
  161. *
  162. * Returns 0 if the AP queue is reserved; otherwise, returns -EADDRNOTAVAIL.
  163. */
  164. static int vfio_ap_verify_queue_reserved(unsigned long *apid,
  165. unsigned long *apqi)
  166. {
  167. int ret;
  168. struct vfio_ap_queue_reserved qres;
  169. qres.apid = apid;
  170. qres.apqi = apqi;
  171. qres.reserved = false;
  172. ret = driver_for_each_device(matrix_dev->device.driver, NULL, &qres,
  173. vfio_ap_has_queue);
  174. if (ret)
  175. return ret;
  176. if (qres.reserved)
  177. return 0;
  178. return -EADDRNOTAVAIL;
  179. }
  180. static int
  181. vfio_ap_mdev_verify_queues_reserved_for_apid(struct ap_matrix_mdev *matrix_mdev,
  182. unsigned long apid)
  183. {
  184. int ret;
  185. unsigned long apqi;
  186. unsigned long nbits = matrix_mdev->matrix.aqm_max + 1;
  187. if (find_first_bit_inv(matrix_mdev->matrix.aqm, nbits) >= nbits)
  188. return vfio_ap_verify_queue_reserved(&apid, NULL);
  189. for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, nbits) {
  190. ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
  191. if (ret)
  192. return ret;
  193. }
  194. return 0;
  195. }
  196. /**
  197. * vfio_ap_mdev_verify_no_sharing
  198. *
  199. * Verifies that the APQNs derived from the cross product of the AP adapter IDs
  200. * and AP queue indexes comprising the AP matrix are not configured for another
  201. * mediated device. AP queue sharing is not allowed.
  202. *
  203. * @matrix_mdev: the mediated matrix device
  204. *
  205. * Returns 0 if the APQNs are not shared, otherwise; returns -EADDRINUSE.
  206. */
  207. static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
  208. {
  209. struct ap_matrix_mdev *lstdev;
  210. DECLARE_BITMAP(apm, AP_DEVICES);
  211. DECLARE_BITMAP(aqm, AP_DOMAINS);
  212. list_for_each_entry(lstdev, &matrix_dev->mdev_list, node) {
  213. if (matrix_mdev == lstdev)
  214. continue;
  215. memset(apm, 0, sizeof(apm));
  216. memset(aqm, 0, sizeof(aqm));
  217. /*
  218. * We work on full longs, as we can only exclude the leftover
  219. * bits in non-inverse order. The leftover is all zeros.
  220. */
  221. if (!bitmap_and(apm, matrix_mdev->matrix.apm,
  222. lstdev->matrix.apm, AP_DEVICES))
  223. continue;
  224. if (!bitmap_and(aqm, matrix_mdev->matrix.aqm,
  225. lstdev->matrix.aqm, AP_DOMAINS))
  226. continue;
  227. return -EADDRINUSE;
  228. }
  229. return 0;
  230. }
  231. /**
  232. * assign_adapter_store
  233. *
  234. * @dev: the matrix device
  235. * @attr: the mediated matrix device's assign_adapter attribute
  236. * @buf: a buffer containing the AP adapter number (APID) to
  237. * be assigned
  238. * @count: the number of bytes in @buf
  239. *
  240. * Parses the APID from @buf and sets the corresponding bit in the mediated
  241. * matrix device's APM.
  242. *
  243. * Returns the number of bytes processed if the APID is valid; otherwise,
  244. * returns one of the following errors:
  245. *
  246. * 1. -EINVAL
  247. * The APID is not a valid number
  248. *
  249. * 2. -ENODEV
  250. * The APID exceeds the maximum value configured for the system
  251. *
  252. * 3. -EADDRNOTAVAIL
  253. * An APQN derived from the cross product of the APID being assigned
  254. * and the APQIs previously assigned is not bound to the vfio_ap device
  255. * driver; or, if no APQIs have yet been assigned, the APID is not
  256. * contained in an APQN bound to the vfio_ap device driver.
  257. *
  258. * 4. -EADDRINUSE
  259. * An APQN derived from the cross product of the APID being assigned
  260. * and the APQIs previously assigned is being used by another mediated
  261. * matrix device
  262. */
  263. static ssize_t assign_adapter_store(struct device *dev,
  264. struct device_attribute *attr,
  265. const char *buf, size_t count)
  266. {
  267. int ret;
  268. unsigned long apid;
  269. struct mdev_device *mdev = mdev_from_dev(dev);
  270. struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
  271. /* If the guest is running, disallow assignment of adapter */
  272. if (matrix_mdev->kvm)
  273. return -EBUSY;
  274. ret = kstrtoul(buf, 0, &apid);
  275. if (ret)
  276. return ret;
  277. if (apid > matrix_mdev->matrix.apm_max)
  278. return -ENODEV;
  279. /*
  280. * Set the bit in the AP mask (APM) corresponding to the AP adapter
  281. * number (APID). The bits in the mask, from most significant to least
  282. * significant bit, correspond to APIDs 0-255.
  283. */
  284. mutex_lock(&matrix_dev->lock);
  285. ret = vfio_ap_mdev_verify_queues_reserved_for_apid(matrix_mdev, apid);
  286. if (ret)
  287. goto done;
  288. set_bit_inv(apid, matrix_mdev->matrix.apm);
  289. ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
  290. if (ret)
  291. goto share_err;
  292. ret = count;
  293. goto done;
  294. share_err:
  295. clear_bit_inv(apid, matrix_mdev->matrix.apm);
  296. done:
  297. mutex_unlock(&matrix_dev->lock);
  298. return ret;
  299. }
  300. static DEVICE_ATTR_WO(assign_adapter);
  301. /**
  302. * unassign_adapter_store
  303. *
  304. * @dev: the matrix device
  305. * @attr: the mediated matrix device's unassign_adapter attribute
  306. * @buf: a buffer containing the adapter number (APID) to be unassigned
  307. * @count: the number of bytes in @buf
  308. *
  309. * Parses the APID from @buf and clears the corresponding bit in the mediated
  310. * matrix device's APM.
  311. *
  312. * Returns the number of bytes processed if the APID is valid; otherwise,
  313. * returns one of the following errors:
  314. * -EINVAL if the APID is not a number
  315. * -ENODEV if the APID it exceeds the maximum value configured for the
  316. * system
  317. */
  318. static ssize_t unassign_adapter_store(struct device *dev,
  319. struct device_attribute *attr,
  320. const char *buf, size_t count)
  321. {
  322. int ret;
  323. unsigned long apid;
  324. struct mdev_device *mdev = mdev_from_dev(dev);
  325. struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
  326. /* If the guest is running, disallow un-assignment of adapter */
  327. if (matrix_mdev->kvm)
  328. return -EBUSY;
  329. ret = kstrtoul(buf, 0, &apid);
  330. if (ret)
  331. return ret;
  332. if (apid > matrix_mdev->matrix.apm_max)
  333. return -ENODEV;
  334. mutex_lock(&matrix_dev->lock);
  335. clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
  336. mutex_unlock(&matrix_dev->lock);
  337. return count;
  338. }
  339. DEVICE_ATTR_WO(unassign_adapter);
  340. static int
  341. vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
  342. unsigned long apqi)
  343. {
  344. int ret;
  345. unsigned long apid;
  346. unsigned long nbits = matrix_mdev->matrix.apm_max + 1;
  347. if (find_first_bit_inv(matrix_mdev->matrix.apm, nbits) >= nbits)
  348. return vfio_ap_verify_queue_reserved(NULL, &apqi);
  349. for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, nbits) {
  350. ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
  351. if (ret)
  352. return ret;
  353. }
  354. return 0;
  355. }
  356. /**
  357. * assign_domain_store
  358. *
  359. * @dev: the matrix device
  360. * @attr: the mediated matrix device's assign_domain attribute
  361. * @buf: a buffer containing the AP queue index (APQI) of the domain to
  362. * be assigned
  363. * @count: the number of bytes in @buf
  364. *
  365. * Parses the APQI from @buf and sets the corresponding bit in the mediated
  366. * matrix device's AQM.
  367. *
  368. * Returns the number of bytes processed if the APQI is valid; otherwise returns
  369. * one of the following errors:
  370. *
  371. * 1. -EINVAL
  372. * The APQI is not a valid number
  373. *
  374. * 2. -ENODEV
  375. * The APQI exceeds the maximum value configured for the system
  376. *
  377. * 3. -EADDRNOTAVAIL
  378. * An APQN derived from the cross product of the APQI being assigned
  379. * and the APIDs previously assigned is not bound to the vfio_ap device
  380. * driver; or, if no APIDs have yet been assigned, the APQI is not
  381. * contained in an APQN bound to the vfio_ap device driver.
  382. *
  383. * 4. -EADDRINUSE
  384. * An APQN derived from the cross product of the APQI being assigned
  385. * and the APIDs previously assigned is being used by another mediated
  386. * matrix device
  387. */
  388. static ssize_t assign_domain_store(struct device *dev,
  389. struct device_attribute *attr,
  390. const char *buf, size_t count)
  391. {
  392. int ret;
  393. unsigned long apqi;
  394. struct mdev_device *mdev = mdev_from_dev(dev);
  395. struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
  396. unsigned long max_apqi = matrix_mdev->matrix.aqm_max;
  397. /* If the guest is running, disallow assignment of domain */
  398. if (matrix_mdev->kvm)
  399. return -EBUSY;
  400. ret = kstrtoul(buf, 0, &apqi);
  401. if (ret)
  402. return ret;
  403. if (apqi > max_apqi)
  404. return -ENODEV;
  405. mutex_lock(&matrix_dev->lock);
  406. ret = vfio_ap_mdev_verify_queues_reserved_for_apqi(matrix_mdev, apqi);
  407. if (ret)
  408. goto done;
  409. set_bit_inv(apqi, matrix_mdev->matrix.aqm);
  410. ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
  411. if (ret)
  412. goto share_err;
  413. ret = count;
  414. goto done;
  415. share_err:
  416. clear_bit_inv(apqi, matrix_mdev->matrix.aqm);
  417. done:
  418. mutex_unlock(&matrix_dev->lock);
  419. return ret;
  420. }
  421. DEVICE_ATTR_WO(assign_domain);
  422. /**
  423. * unassign_domain_store
  424. *
  425. * @dev: the matrix device
  426. * @attr: the mediated matrix device's unassign_domain attribute
  427. * @buf: a buffer containing the AP queue index (APQI) of the domain to
  428. * be unassigned
  429. * @count: the number of bytes in @buf
  430. *
  431. * Parses the APQI from @buf and clears the corresponding bit in the
  432. * mediated matrix device's AQM.
  433. *
  434. * Returns the number of bytes processed if the APQI is valid; otherwise,
  435. * returns one of the following errors:
  436. * -EINVAL if the APQI is not a number
  437. * -ENODEV if the APQI exceeds the maximum value configured for the system
  438. */
  439. static ssize_t unassign_domain_store(struct device *dev,
  440. struct device_attribute *attr,
  441. const char *buf, size_t count)
  442. {
  443. int ret;
  444. unsigned long apqi;
  445. struct mdev_device *mdev = mdev_from_dev(dev);
  446. struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
  447. /* If the guest is running, disallow un-assignment of domain */
  448. if (matrix_mdev->kvm)
  449. return -EBUSY;
  450. ret = kstrtoul(buf, 0, &apqi);
  451. if (ret)
  452. return ret;
  453. if (apqi > matrix_mdev->matrix.aqm_max)
  454. return -ENODEV;
  455. mutex_lock(&matrix_dev->lock);
  456. clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
  457. mutex_unlock(&matrix_dev->lock);
  458. return count;
  459. }
  460. DEVICE_ATTR_WO(unassign_domain);
  461. /**
  462. * assign_control_domain_store
  463. *
  464. * @dev: the matrix device
  465. * @attr: the mediated matrix device's assign_control_domain attribute
  466. * @buf: a buffer containing the domain ID to be assigned
  467. * @count: the number of bytes in @buf
  468. *
  469. * Parses the domain ID from @buf and sets the corresponding bit in the mediated
  470. * matrix device's ADM.
  471. *
  472. * Returns the number of bytes processed if the domain ID is valid; otherwise,
  473. * returns one of the following errors:
  474. * -EINVAL if the ID is not a number
  475. * -ENODEV if the ID exceeds the maximum value configured for the system
  476. */
  477. static ssize_t assign_control_domain_store(struct device *dev,
  478. struct device_attribute *attr,
  479. const char *buf, size_t count)
  480. {
  481. int ret;
  482. unsigned long id;
  483. struct mdev_device *mdev = mdev_from_dev(dev);
  484. struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
  485. /* If the guest is running, disallow assignment of control domain */
  486. if (matrix_mdev->kvm)
  487. return -EBUSY;
  488. ret = kstrtoul(buf, 0, &id);
  489. if (ret)
  490. return ret;
  491. if (id > matrix_mdev->matrix.adm_max)
  492. return -ENODEV;
  493. /* Set the bit in the ADM (bitmask) corresponding to the AP control
  494. * domain number (id). The bits in the mask, from most significant to
  495. * least significant, correspond to IDs 0 up to the one less than the
  496. * number of control domains that can be assigned.
  497. */
  498. mutex_lock(&matrix_dev->lock);
  499. set_bit_inv(id, matrix_mdev->matrix.adm);
  500. mutex_unlock(&matrix_dev->lock);
  501. return count;
  502. }
  503. DEVICE_ATTR_WO(assign_control_domain);
  504. /**
  505. * unassign_control_domain_store
  506. *
  507. * @dev: the matrix device
  508. * @attr: the mediated matrix device's unassign_control_domain attribute
  509. * @buf: a buffer containing the domain ID to be unassigned
  510. * @count: the number of bytes in @buf
  511. *
  512. * Parses the domain ID from @buf and clears the corresponding bit in the
  513. * mediated matrix device's ADM.
  514. *
  515. * Returns the number of bytes processed if the domain ID is valid; otherwise,
  516. * returns one of the following errors:
  517. * -EINVAL if the ID is not a number
  518. * -ENODEV if the ID exceeds the maximum value configured for the system
  519. */
  520. static ssize_t unassign_control_domain_store(struct device *dev,
  521. struct device_attribute *attr,
  522. const char *buf, size_t count)
  523. {
  524. int ret;
  525. unsigned long domid;
  526. struct mdev_device *mdev = mdev_from_dev(dev);
  527. struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
  528. unsigned long max_domid = matrix_mdev->matrix.adm_max;
  529. /* If the guest is running, disallow un-assignment of control domain */
  530. if (matrix_mdev->kvm)
  531. return -EBUSY;
  532. ret = kstrtoul(buf, 0, &domid);
  533. if (ret)
  534. return ret;
  535. if (domid > max_domid)
  536. return -ENODEV;
  537. mutex_lock(&matrix_dev->lock);
  538. clear_bit_inv(domid, matrix_mdev->matrix.adm);
  539. mutex_unlock(&matrix_dev->lock);
  540. return count;
  541. }
  542. DEVICE_ATTR_WO(unassign_control_domain);
  543. static ssize_t control_domains_show(struct device *dev,
  544. struct device_attribute *dev_attr,
  545. char *buf)
  546. {
  547. unsigned long id;
  548. int nchars = 0;
  549. int n;
  550. char *bufpos = buf;
  551. struct mdev_device *mdev = mdev_from_dev(dev);
  552. struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
  553. unsigned long max_domid = matrix_mdev->matrix.adm_max;
  554. mutex_lock(&matrix_dev->lock);
  555. for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) {
  556. n = sprintf(bufpos, "%04lx\n", id);
  557. bufpos += n;
  558. nchars += n;
  559. }
  560. mutex_unlock(&matrix_dev->lock);
  561. return nchars;
  562. }
  563. DEVICE_ATTR_RO(control_domains);
  564. static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
  565. char *buf)
  566. {
  567. struct mdev_device *mdev = mdev_from_dev(dev);
  568. struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
  569. char *bufpos = buf;
  570. unsigned long apid;
  571. unsigned long apqi;
  572. unsigned long apid1;
  573. unsigned long apqi1;
  574. unsigned long napm_bits = matrix_mdev->matrix.apm_max + 1;
  575. unsigned long naqm_bits = matrix_mdev->matrix.aqm_max + 1;
  576. int nchars = 0;
  577. int n;
  578. apid1 = find_first_bit_inv(matrix_mdev->matrix.apm, napm_bits);
  579. apqi1 = find_first_bit_inv(matrix_mdev->matrix.aqm, naqm_bits);
  580. mutex_lock(&matrix_dev->lock);
  581. if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) {
  582. for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
  583. for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
  584. naqm_bits) {
  585. n = sprintf(bufpos, "%02lx.%04lx\n", apid,
  586. apqi);
  587. bufpos += n;
  588. nchars += n;
  589. }
  590. }
  591. } else if (apid1 < napm_bits) {
  592. for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
  593. n = sprintf(bufpos, "%02lx.\n", apid);
  594. bufpos += n;
  595. nchars += n;
  596. }
  597. } else if (apqi1 < naqm_bits) {
  598. for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, naqm_bits) {
  599. n = sprintf(bufpos, ".%04lx\n", apqi);
  600. bufpos += n;
  601. nchars += n;
  602. }
  603. }
  604. mutex_unlock(&matrix_dev->lock);
  605. return nchars;
  606. }
  607. DEVICE_ATTR_RO(matrix);
  608. static struct attribute *vfio_ap_mdev_attrs[] = {
  609. &dev_attr_assign_adapter.attr,
  610. &dev_attr_unassign_adapter.attr,
  611. &dev_attr_assign_domain.attr,
  612. &dev_attr_unassign_domain.attr,
  613. &dev_attr_assign_control_domain.attr,
  614. &dev_attr_unassign_control_domain.attr,
  615. &dev_attr_control_domains.attr,
  616. &dev_attr_matrix.attr,
  617. NULL,
  618. };
  619. static struct attribute_group vfio_ap_mdev_attr_group = {
  620. .attrs = vfio_ap_mdev_attrs
  621. };
  622. static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
  623. &vfio_ap_mdev_attr_group,
  624. NULL
  625. };
  626. static void vfio_ap_mdev_copy_masks(struct ap_matrix_mdev *matrix_mdev)
  627. {
  628. int nbytes;
  629. unsigned long *apm, *aqm, *adm;
  630. struct kvm_s390_crypto_cb *crycb = matrix_mdev->kvm->arch.crypto.crycb;
  631. switch (matrix_mdev->kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
  632. case CRYCB_FORMAT2:
  633. apm = (unsigned long *)crycb->apcb1.apm;
  634. aqm = (unsigned long *)crycb->apcb1.aqm;
  635. adm = (unsigned long *)crycb->apcb1.adm;
  636. break;
  637. case CRYCB_FORMAT1:
  638. case CRYCB_FORMAT0:
  639. apm = (unsigned long *)crycb->apcb0.apm;
  640. aqm = (unsigned long *)crycb->apcb0.aqm;
  641. adm = (unsigned long *)crycb->apcb0.adm;
  642. break;
  643. default:
  644. /* cannot happen */
  645. return;
  646. }
  647. nbytes = DIV_ROUND_UP(matrix_mdev->matrix.apm_max + 1, BITS_PER_BYTE);
  648. memcpy(apm, matrix_mdev->matrix.apm, nbytes);
  649. nbytes = DIV_ROUND_UP(matrix_mdev->matrix.aqm_max + 1, BITS_PER_BYTE);
  650. memcpy(aqm, matrix_mdev->matrix.aqm, nbytes);
  651. nbytes = DIV_ROUND_UP(matrix_mdev->matrix.adm_max + 1, BITS_PER_BYTE);
  652. memcpy(adm, matrix_mdev->matrix.adm, nbytes);
  653. }
  654. /**
  655. * vfio_ap_mdev_set_kvm
  656. *
  657. * @matrix_mdev: a mediated matrix device
  658. * @kvm: reference to KVM instance
  659. *
  660. * Verifies no other mediated matrix device has @kvm and sets a reference to
  661. * it in @matrix_mdev->kvm.
  662. *
  663. * Return 0 if no other mediated matrix device has a reference to @kvm;
  664. * otherwise, returns an -EPERM.
  665. */
  666. static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
  667. struct kvm *kvm)
  668. {
  669. struct ap_matrix_mdev *m;
  670. mutex_lock(&matrix_dev->lock);
  671. list_for_each_entry(m, &matrix_dev->mdev_list, node) {
  672. if ((m != matrix_mdev) && (m->kvm == kvm)) {
  673. mutex_unlock(&matrix_dev->lock);
  674. return -EPERM;
  675. }
  676. }
  677. matrix_mdev->kvm = kvm;
  678. mutex_unlock(&matrix_dev->lock);
  679. return 0;
  680. }
  681. static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
  682. unsigned long action, void *data)
  683. {
  684. int ret;
  685. struct ap_matrix_mdev *matrix_mdev;
  686. if (action != VFIO_GROUP_NOTIFY_SET_KVM)
  687. return NOTIFY_OK;
  688. matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
  689. if (!data) {
  690. matrix_mdev->kvm = NULL;
  691. return NOTIFY_OK;
  692. }
  693. ret = vfio_ap_mdev_set_kvm(matrix_mdev, data);
  694. if (ret)
  695. return NOTIFY_DONE;
  696. /* If there is no CRYCB pointer, then we can't copy the masks */
  697. if (!matrix_mdev->kvm->arch.crypto.crycbd)
  698. return NOTIFY_DONE;
  699. vfio_ap_mdev_copy_masks(matrix_mdev);
  700. return NOTIFY_OK;
  701. }
  702. static int vfio_ap_mdev_open(struct mdev_device *mdev)
  703. {
  704. struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
  705. unsigned long events;
  706. int ret;
  707. if (!try_module_get(THIS_MODULE))
  708. return -ENODEV;
  709. matrix_mdev->group_notifier.notifier_call = vfio_ap_mdev_group_notifier;
  710. events = VFIO_GROUP_NOTIFY_SET_KVM;
  711. ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
  712. &events, &matrix_mdev->group_notifier);
  713. if (ret) {
  714. module_put(THIS_MODULE);
  715. return ret;
  716. }
  717. return 0;
  718. }
  719. static void vfio_ap_mdev_release(struct mdev_device *mdev)
  720. {
  721. struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
  722. if (matrix_mdev->kvm)
  723. kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
  724. vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
  725. &matrix_mdev->group_notifier);
  726. matrix_mdev->kvm = NULL;
  727. module_put(THIS_MODULE);
  728. }
  729. static int vfio_ap_mdev_get_device_info(unsigned long arg)
  730. {
  731. unsigned long minsz;
  732. struct vfio_device_info info;
  733. minsz = offsetofend(struct vfio_device_info, num_irqs);
  734. if (copy_from_user(&info, (void __user *)arg, minsz))
  735. return -EFAULT;
  736. if (info.argsz < minsz)
  737. return -EINVAL;
  738. info.flags = VFIO_DEVICE_FLAGS_AP;
  739. info.num_regions = 0;
  740. info.num_irqs = 0;
  741. return copy_to_user((void __user *)arg, &info, minsz);
  742. }
  743. static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
  744. unsigned int cmd, unsigned long arg)
  745. {
  746. int ret;
  747. switch (cmd) {
  748. case VFIO_DEVICE_GET_INFO:
  749. ret = vfio_ap_mdev_get_device_info(arg);
  750. break;
  751. default:
  752. ret = -EOPNOTSUPP;
  753. break;
  754. }
  755. return ret;
  756. }
  757. static const struct mdev_parent_ops vfio_ap_matrix_ops = {
  758. .owner = THIS_MODULE,
  759. .supported_type_groups = vfio_ap_mdev_type_groups,
  760. .mdev_attr_groups = vfio_ap_mdev_attr_groups,
  761. .create = vfio_ap_mdev_create,
  762. .remove = vfio_ap_mdev_remove,
  763. .open = vfio_ap_mdev_open,
  764. .release = vfio_ap_mdev_release,
  765. .ioctl = vfio_ap_mdev_ioctl,
  766. };
  767. int vfio_ap_mdev_register(void)
  768. {
  769. atomic_set(&matrix_dev->available_instances, MAX_ZDEV_ENTRIES_EXT);
  770. return mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_ops);
  771. }
  772. void vfio_ap_mdev_unregister(void)
  773. {
  774. mdev_unregister_device(&matrix_dev->device);
  775. }