vfio_ccw_ops.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Physical device callbacks for vfio_ccw
  4. *
  5. * Copyright IBM Corp. 2017
  6. *
  7. * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
  8. * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
  9. */
  10. #include <linux/vfio.h>
  11. #include <linux/mdev.h>
  12. #include "vfio_ccw_private.h"
  13. static int vfio_ccw_mdev_reset(struct mdev_device *mdev)
  14. {
  15. struct vfio_ccw_private *private;
  16. struct subchannel *sch;
  17. int ret;
  18. private = dev_get_drvdata(mdev_parent_dev(mdev));
  19. sch = private->sch;
  20. /*
  21. * TODO:
  22. * In the cureent stage, some things like "no I/O running" and "no
  23. * interrupt pending" are clear, but we are not sure what other state
  24. * we need to care about.
  25. * There are still a lot more instructions need to be handled. We
  26. * should come back here later.
  27. */
  28. ret = vfio_ccw_sch_quiesce(sch);
  29. if (ret)
  30. return ret;
  31. ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
  32. if (!ret)
  33. private->state = VFIO_CCW_STATE_IDLE;
  34. return ret;
  35. }
  36. static int vfio_ccw_mdev_notifier(struct notifier_block *nb,
  37. unsigned long action,
  38. void *data)
  39. {
  40. struct vfio_ccw_private *private =
  41. container_of(nb, struct vfio_ccw_private, nb);
  42. /*
  43. * Vendor drivers MUST unpin pages in response to an
  44. * invalidation.
  45. */
  46. if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
  47. struct vfio_iommu_type1_dma_unmap *unmap = data;
  48. if (!cp_iova_pinned(&private->cp, unmap->iova))
  49. return NOTIFY_OK;
  50. if (vfio_ccw_mdev_reset(private->mdev))
  51. return NOTIFY_BAD;
  52. cp_free(&private->cp);
  53. return NOTIFY_OK;
  54. }
  55. return NOTIFY_DONE;
  56. }
  57. static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
  58. {
  59. return sprintf(buf, "I/O subchannel (Non-QDIO)\n");
  60. }
  61. static MDEV_TYPE_ATTR_RO(name);
  62. static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
  63. char *buf)
  64. {
  65. return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING);
  66. }
  67. static MDEV_TYPE_ATTR_RO(device_api);
  68. static ssize_t available_instances_show(struct kobject *kobj,
  69. struct device *dev, char *buf)
  70. {
  71. struct vfio_ccw_private *private = dev_get_drvdata(dev);
  72. return sprintf(buf, "%d\n", atomic_read(&private->avail));
  73. }
  74. static MDEV_TYPE_ATTR_RO(available_instances);
  75. static struct attribute *mdev_types_attrs[] = {
  76. &mdev_type_attr_name.attr,
  77. &mdev_type_attr_device_api.attr,
  78. &mdev_type_attr_available_instances.attr,
  79. NULL,
  80. };
  81. static struct attribute_group mdev_type_group = {
  82. .name = "io",
  83. .attrs = mdev_types_attrs,
  84. };
  85. static struct attribute_group *mdev_type_groups[] = {
  86. &mdev_type_group,
  87. NULL,
  88. };
  89. static int vfio_ccw_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
  90. {
  91. struct vfio_ccw_private *private =
  92. dev_get_drvdata(mdev_parent_dev(mdev));
  93. if (private->state == VFIO_CCW_STATE_NOT_OPER)
  94. return -ENODEV;
  95. if (atomic_dec_if_positive(&private->avail) < 0)
  96. return -EPERM;
  97. private->mdev = mdev;
  98. private->state = VFIO_CCW_STATE_IDLE;
  99. return 0;
  100. }
  101. static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
  102. {
  103. struct vfio_ccw_private *private =
  104. dev_get_drvdata(mdev_parent_dev(mdev));
  105. if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
  106. (private->state != VFIO_CCW_STATE_STANDBY)) {
  107. if (!vfio_ccw_mdev_reset(mdev))
  108. private->state = VFIO_CCW_STATE_STANDBY;
  109. /* The state will be NOT_OPER on error. */
  110. }
  111. private->mdev = NULL;
  112. atomic_inc(&private->avail);
  113. return 0;
  114. }
  115. static int vfio_ccw_mdev_open(struct mdev_device *mdev)
  116. {
  117. struct vfio_ccw_private *private =
  118. dev_get_drvdata(mdev_parent_dev(mdev));
  119. unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
  120. private->nb.notifier_call = vfio_ccw_mdev_notifier;
  121. return vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
  122. &events, &private->nb);
  123. }
  124. static void vfio_ccw_mdev_release(struct mdev_device *mdev)
  125. {
  126. struct vfio_ccw_private *private =
  127. dev_get_drvdata(mdev_parent_dev(mdev));
  128. vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
  129. &private->nb);
  130. }
  131. static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
  132. char __user *buf,
  133. size_t count,
  134. loff_t *ppos)
  135. {
  136. struct vfio_ccw_private *private;
  137. struct ccw_io_region *region;
  138. if (*ppos + count > sizeof(*region))
  139. return -EINVAL;
  140. private = dev_get_drvdata(mdev_parent_dev(mdev));
  141. region = private->io_region;
  142. if (copy_to_user(buf, (void *)region + *ppos, count))
  143. return -EFAULT;
  144. return count;
  145. }
  146. static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
  147. const char __user *buf,
  148. size_t count,
  149. loff_t *ppos)
  150. {
  151. struct vfio_ccw_private *private;
  152. struct ccw_io_region *region;
  153. if (*ppos + count > sizeof(*region))
  154. return -EINVAL;
  155. private = dev_get_drvdata(mdev_parent_dev(mdev));
  156. if (private->state != VFIO_CCW_STATE_IDLE)
  157. return -EACCES;
  158. region = private->io_region;
  159. if (copy_from_user((void *)region + *ppos, buf, count))
  160. return -EFAULT;
  161. vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
  162. if (region->ret_code != 0) {
  163. private->state = VFIO_CCW_STATE_IDLE;
  164. return region->ret_code;
  165. }
  166. return count;
  167. }
  168. static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info)
  169. {
  170. info->flags = VFIO_DEVICE_FLAGS_CCW | VFIO_DEVICE_FLAGS_RESET;
  171. info->num_regions = VFIO_CCW_NUM_REGIONS;
  172. info->num_irqs = VFIO_CCW_NUM_IRQS;
  173. return 0;
  174. }
  175. static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
  176. u16 *cap_type_id,
  177. void **cap_type)
  178. {
  179. switch (info->index) {
  180. case VFIO_CCW_CONFIG_REGION_INDEX:
  181. info->offset = 0;
  182. info->size = sizeof(struct ccw_io_region);
  183. info->flags = VFIO_REGION_INFO_FLAG_READ
  184. | VFIO_REGION_INFO_FLAG_WRITE;
  185. return 0;
  186. default:
  187. return -EINVAL;
  188. }
  189. }
  190. static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
  191. {
  192. if (info->index != VFIO_CCW_IO_IRQ_INDEX)
  193. return -EINVAL;
  194. info->count = 1;
  195. info->flags = VFIO_IRQ_INFO_EVENTFD;
  196. return 0;
  197. }
  198. static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
  199. uint32_t flags,
  200. void __user *data)
  201. {
  202. struct vfio_ccw_private *private;
  203. struct eventfd_ctx **ctx;
  204. if (!(flags & VFIO_IRQ_SET_ACTION_TRIGGER))
  205. return -EINVAL;
  206. private = dev_get_drvdata(mdev_parent_dev(mdev));
  207. ctx = &private->io_trigger;
  208. switch (flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
  209. case VFIO_IRQ_SET_DATA_NONE:
  210. {
  211. if (*ctx)
  212. eventfd_signal(*ctx, 1);
  213. return 0;
  214. }
  215. case VFIO_IRQ_SET_DATA_BOOL:
  216. {
  217. uint8_t trigger;
  218. if (get_user(trigger, (uint8_t __user *)data))
  219. return -EFAULT;
  220. if (trigger && *ctx)
  221. eventfd_signal(*ctx, 1);
  222. return 0;
  223. }
  224. case VFIO_IRQ_SET_DATA_EVENTFD:
  225. {
  226. int32_t fd;
  227. if (get_user(fd, (int32_t __user *)data))
  228. return -EFAULT;
  229. if (fd == -1) {
  230. if (*ctx)
  231. eventfd_ctx_put(*ctx);
  232. *ctx = NULL;
  233. } else if (fd >= 0) {
  234. struct eventfd_ctx *efdctx;
  235. efdctx = eventfd_ctx_fdget(fd);
  236. if (IS_ERR(efdctx))
  237. return PTR_ERR(efdctx);
  238. if (*ctx)
  239. eventfd_ctx_put(*ctx);
  240. *ctx = efdctx;
  241. } else
  242. return -EINVAL;
  243. return 0;
  244. }
  245. default:
  246. return -EINVAL;
  247. }
  248. }
  249. static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
  250. unsigned int cmd,
  251. unsigned long arg)
  252. {
  253. int ret = 0;
  254. unsigned long minsz;
  255. switch (cmd) {
  256. case VFIO_DEVICE_GET_INFO:
  257. {
  258. struct vfio_device_info info;
  259. minsz = offsetofend(struct vfio_device_info, num_irqs);
  260. if (copy_from_user(&info, (void __user *)arg, minsz))
  261. return -EFAULT;
  262. if (info.argsz < minsz)
  263. return -EINVAL;
  264. ret = vfio_ccw_mdev_get_device_info(&info);
  265. if (ret)
  266. return ret;
  267. return copy_to_user((void __user *)arg, &info, minsz);
  268. }
  269. case VFIO_DEVICE_GET_REGION_INFO:
  270. {
  271. struct vfio_region_info info;
  272. u16 cap_type_id = 0;
  273. void *cap_type = NULL;
  274. minsz = offsetofend(struct vfio_region_info, offset);
  275. if (copy_from_user(&info, (void __user *)arg, minsz))
  276. return -EFAULT;
  277. if (info.argsz < minsz)
  278. return -EINVAL;
  279. ret = vfio_ccw_mdev_get_region_info(&info, &cap_type_id,
  280. &cap_type);
  281. if (ret)
  282. return ret;
  283. return copy_to_user((void __user *)arg, &info, minsz);
  284. }
  285. case VFIO_DEVICE_GET_IRQ_INFO:
  286. {
  287. struct vfio_irq_info info;
  288. minsz = offsetofend(struct vfio_irq_info, count);
  289. if (copy_from_user(&info, (void __user *)arg, minsz))
  290. return -EFAULT;
  291. if (info.argsz < minsz || info.index >= VFIO_CCW_NUM_IRQS)
  292. return -EINVAL;
  293. ret = vfio_ccw_mdev_get_irq_info(&info);
  294. if (ret)
  295. return ret;
  296. if (info.count == -1)
  297. return -EINVAL;
  298. return copy_to_user((void __user *)arg, &info, minsz);
  299. }
  300. case VFIO_DEVICE_SET_IRQS:
  301. {
  302. struct vfio_irq_set hdr;
  303. size_t data_size;
  304. void __user *data;
  305. minsz = offsetofend(struct vfio_irq_set, count);
  306. if (copy_from_user(&hdr, (void __user *)arg, minsz))
  307. return -EFAULT;
  308. ret = vfio_set_irqs_validate_and_prepare(&hdr, 1,
  309. VFIO_CCW_NUM_IRQS,
  310. &data_size);
  311. if (ret)
  312. return ret;
  313. data = (void __user *)(arg + minsz);
  314. return vfio_ccw_mdev_set_irqs(mdev, hdr.flags, data);
  315. }
  316. case VFIO_DEVICE_RESET:
  317. return vfio_ccw_mdev_reset(mdev);
  318. default:
  319. return -ENOTTY;
  320. }
  321. }
  322. static const struct mdev_parent_ops vfio_ccw_mdev_ops = {
  323. .owner = THIS_MODULE,
  324. .supported_type_groups = mdev_type_groups,
  325. .create = vfio_ccw_mdev_create,
  326. .remove = vfio_ccw_mdev_remove,
  327. .open = vfio_ccw_mdev_open,
  328. .release = vfio_ccw_mdev_release,
  329. .read = vfio_ccw_mdev_read,
  330. .write = vfio_ccw_mdev_write,
  331. .ioctl = vfio_ccw_mdev_ioctl,
  332. };
  333. int vfio_ccw_mdev_reg(struct subchannel *sch)
  334. {
  335. return mdev_register_device(&sch->dev, &vfio_ccw_mdev_ops);
  336. }
  337. void vfio_ccw_mdev_unreg(struct subchannel *sch)
  338. {
  339. mdev_unregister_device(&sch->dev);
  340. }